summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Saryerwinnie <js@jamesls.com>2013-04-18 04:14:08 -0700
committerJames Saryerwinnie <js@jamesls.com>2013-04-18 04:14:08 -0700
commit89f4947000587e12042e5b35c4557871b21137b9 (patch)
tree0606c64058f8402e74b29c621105b74292ff366c
parentb5852b0aa5ac91f462b28ac9decee33d872dec4d (diff)
parent699d861f453aff8a398f9cd5a8de91ec8e36a8cf (diff)
downloadboto-2.9.0.tar.gz
Merge branch 'release-2.9.0'2.9.0
* release-2.9.0: (158 commits) Bump version to 2.9.0 Added underlying DynamoDB v2 support. Add redshift to setup.py/docs index Updated requests to something more modern. Only use 2 metadata service calls to get credentials Fix #1146: return response from custom url opener Fixed missing import. Add metadata_service_num_attempts config option Added cleanup for the snapshots created. Added support for redshift. Let total attempts by 1 + num_retries Add more diagnostics to debug logs Change GS calls to make_request to always convert to utf-8 bytes. Allow kwargs to be passed through to uplaoder Remove whitespace, fix long line lengths Improve VPC and VPN support Added sleeps to allow amazon time to propogate Added error handling for out of space during downloads Initial integration tests for idempotent subscribe Removed dead code from resumable upload handler ...
-rw-r--r--.gitignore3
-rw-r--r--README.rst2
-rwxr-xr-xbin/dynamodb_dump75
-rwxr-xr-xbin/dynamodb_load109
-rwxr-xr-xbin/mturk2
-rwxr-xr-xbin/sdbadmin10
-rw-r--r--boto/__init__.py69
-rw-r--r--boto/auth.py6
-rw-r--r--boto/beanstalk/exception.py2
-rw-r--r--boto/beanstalk/layer1.py620
-rw-r--r--boto/beanstalk/response.py2
-rw-r--r--boto/cacerts/cacerts.txt1564
-rw-r--r--boto/cloudsearch/__init__.py3
-rw-r--r--boto/cloudsearch/document.py34
-rw-r--r--boto/cloudsearch/layer1.py2
-rw-r--r--boto/cloudsearch/search.py2
-rw-r--r--boto/connection.py11
-rw-r--r--boto/core/credentials.py2
-rw-r--r--boto/datapipeline/layer1.py2
-rw-r--r--boto/dynamodb/__init__.py3
-rw-r--r--boto/dynamodb/item.py6
-rw-r--r--boto/dynamodb/layer2.py4
-rw-r--r--boto/dynamodb/table.py2
-rw-r--r--boto/dynamodb2/__init__.py63
-rw-r--r--boto/dynamodb2/exceptions.py50
-rw-r--r--boto/dynamodb2/layer1.py1407
-rw-r--r--boto/ec2/attributes.py71
-rw-r--r--boto/ec2/blockdevicemapping.py25
-rw-r--r--boto/ec2/connection.py117
-rw-r--r--boto/ec2/image.py61
-rw-r--r--boto/ec2/instance.py4
-rw-r--r--boto/elasticache/__init__.py2
-rw-r--r--boto/elasticache/layer1.py7
-rw-r--r--boto/elastictranscoder/layer1.py33
-rw-r--r--boto/emr/connection.py2
-rw-r--r--boto/exception.py4
-rwxr-xr-xboto/file/key.py41
-rw-r--r--boto/fps/connection.py108
-rw-r--r--boto/glacier/concurrent.py21
-rw-r--r--boto/glacier/vault.py11
-rwxr-xr-xboto/gs/acl.py16
-rw-r--r--boto/gs/bucket.py140
-rwxr-xr-xboto/gs/connection.py8
-rw-r--r--boto/gs/key.py267
-rw-r--r--boto/gs/resumable_upload_handler.py89
-rw-r--r--boto/handler.py12
-rw-r--r--boto/https_connection.py2
-rw-r--r--boto/opsworks/__init__.py0
-rw-r--r--boto/opsworks/exceptions.py30
-rw-r--r--boto/opsworks/layer1.py1457
-rw-r--r--boto/provider.py14
-rw-r--r--boto/rds/dbsecuritygroup.py41
-rw-r--r--boto/redshift/__init__.py50
-rw-r--r--boto/redshift/exceptions.py182
-rw-r--r--boto/redshift/layer1.py2076
-rw-r--r--boto/route53/connection.py2
-rw-r--r--boto/s3/__init__.py3
-rw-r--r--boto/s3/bucket.py54
-rw-r--r--boto/s3/connection.py57
-rw-r--r--boto/s3/key.py224
-rw-r--r--boto/s3/keyfile.py12
-rw-r--r--boto/s3/resumable_download_handler.py27
-rw-r--r--boto/s3/website.py166
-rw-r--r--boto/sns/connection.py22
-rw-r--r--boto/sqs/connection.py8
-rw-r--r--boto/sqs/queue.py4
-rwxr-xr-xboto/storage_uri.py200
-rw-r--r--boto/swf/__init__.py21
-rw-r--r--boto/swf/layer2.py4
-rw-r--r--boto/utils.py49
-rw-r--r--boto/vpc/__init__.py43
-rw-r--r--boto/vpc/vpc.py32
-rw-r--r--boto/vpc/vpnconnection.py153
-rw-r--r--docs/source/autoscale_tut.rst11
-rw-r--r--docs/source/boto_config_tut.rst288
-rw-r--r--docs/source/cloudsearch_tut.rst141
-rw-r--r--docs/source/cloudwatch_tut.rst6
-rw-r--r--docs/source/dynamodb_tut.rst679
-rw-r--r--docs/source/ec2_tut.rst118
-rw-r--r--docs/source/elb_tut.rst57
-rw-r--r--docs/source/emr_tut.rst19
-rw-r--r--docs/source/getting_started.rst177
-rw-r--r--docs/source/index.rst19
-rw-r--r--docs/source/rds_tut.rst108
-rw-r--r--docs/source/ref/cloudsearch.rst2
-rw-r--r--docs/source/ref/dynamodb2.rst26
-rw-r--r--docs/source/ref/index.rst1
-rw-r--r--docs/source/ref/redshift.rst26
-rw-r--r--docs/source/s3_tut.rst330
-rw-r--r--docs/source/ses_tut.rst11
-rw-r--r--docs/source/simpledb_tut.rst7
-rw-r--r--docs/source/sqs_tut.rst8
-rw-r--r--docs/source/vpc_tut.rst11
-rw-r--r--requirements.txt7
-rw-r--r--setup.py6
-rw-r--r--tests/integration/dynamodb2/__init__.py0
-rw-r--r--tests/integration/dynamodb2/test_cert_verification.py40
-rw-r--r--tests/integration/dynamodb2/test_layer1.py244
-rw-r--r--tests/integration/ec2/elb/test_connection.py102
-rw-r--r--tests/integration/elastictranscoder/test_layer1.py5
-rw-r--r--tests/integration/gs/test_basic.py2
-rw-r--r--tests/integration/gs/test_generation_conditionals.py38
-rw-r--r--tests/integration/gs/test_resumable_downloads.py6
-rw-r--r--tests/integration/gs/test_resumable_uploads.py2
-rw-r--r--tests/integration/gs/test_storage_uri.py158
-rw-r--r--tests/integration/gs/test_versioning.py9
-rw-r--r--tests/integration/gs/util.py22
-rw-r--r--tests/integration/opsworks/__init__.py0
-rw-r--r--tests/integration/opsworks/test_layer1.py40
-rw-r--r--tests/integration/redshift/__init__.py0
-rw-r--r--tests/integration/redshift/test_cert_verification.py35
-rw-r--r--tests/integration/redshift/test_layer1.py134
-rw-r--r--tests/integration/s3/mock_storage_service.py31
-rw-r--r--tests/integration/s3/test_connection.py3
-rw-r--r--tests/integration/sns/test_sns_sqs_subscription.py101
-rw-r--r--tests/integration/sqs/test_connection.py76
-rwxr-xr-xtests/test.py5
-rw-r--r--tests/unit/beanstalk/test_layer1.py15
-rw-r--r--tests/unit/cloudsearch/__init__.py1
-rw-r--r--tests/unit/cloudsearch/test_connection.py241
-rw-r--r--tests/unit/cloudsearch/test_document.py324
-rw-r--r--tests/unit/cloudsearch/test_search.py325
-rw-r--r--tests/unit/ec2/test_connection.py86
-rw-r--r--tests/unit/glacier/test_concurrent.py56
-rw-r--r--tests/unit/glacier/test_vault.py10
-rw-r--r--tests/unit/provider/test_provider.py53
-rw-r--r--tests/unit/s3/test_bucket.py48
-rw-r--r--tests/unit/s3/test_keyfile.py6
-rw-r--r--tests/unit/s3/test_uri.py257
-rw-r--r--tests/unit/s3/test_website.py72
-rw-r--r--tests/unit/test_connection.py206
-rw-r--r--tests/unit/test_exception.py78
-rw-r--r--tests/unit/utils/__init__.py0
-rw-r--r--tests/unit/utils/test_utils.py81
-rw-r--r--tests/unit/vpc/__init__.py0
-rw-r--r--tests/unit/vpc/test_vpc.py40
-rw-r--r--tests/unit/vpc/test_vpnconnection.py123
137 files changed, 13362 insertions, 1798 deletions
diff --git a/.gitignore b/.gitignore
index ecf63f94..f5685bd5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,4 +10,5 @@ MANIFEST
.idea
.tox
.coverage
-*flymake.py \ No newline at end of file
+*flymake.py
+venv
diff --git a/README.rst b/README.rst
index 65763144..e1b9fa37 100644
--- a/README.rst
+++ b/README.rst
@@ -137,7 +137,7 @@ Getting Started with Boto
*************************
Your credentials can be passed into the methods that create
-connections. Alternatively, boto will check for the existance of the
+connections. Alternatively, boto will check for the existence of the
following environment variables to ascertain your credentials:
**AWS_ACCESS_KEY_ID** - Your AWS Access Key ID
diff --git a/bin/dynamodb_dump b/bin/dynamodb_dump
new file mode 100755
index 00000000..8b6aada7
--- /dev/null
+++ b/bin/dynamodb_dump
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+
+import argparse
+import errno
+import os
+
+import boto
+from boto.compat import json
+
+
+DESCRIPTION = """Dump the contents of one or more DynamoDB tables to the local filesystem.
+
+Each table is dumped into two files:
+ - {table_name}.metadata stores the table's name, schema and provisioned
+ throughput.
+ - {table_name}.data stores the table's actual contents.
+
+Both files are created in the current directory. To write them somewhere else,
+use the --out-dir parameter (the target directory will be created if needed).
+"""
+
+
+def dump_table(table, out_dir):
+ metadata_file = os.path.join(out_dir, "%s.metadata" % table.name)
+ data_file = os.path.join(out_dir, "%s.data" % table.name)
+
+ with open(metadata_file, "w") as metadata_fd:
+ json.dump(
+ {
+ "name": table.name,
+ "schema": table.schema.dict,
+ "read_units": table.read_units,
+ "write_units": table.write_units,
+ },
+ metadata_fd
+ )
+
+ with open(data_file, "w") as data_fd:
+ for item in table.scan():
+ # JSON can't serialize sets -- convert those to lists.
+ data = {}
+ for k, v in item.iteritems():
+ if isinstance(v, (set, frozenset)):
+ data[k] = list(v)
+ else:
+ data[k] = v
+
+ data_fd.write(json.dumps(data))
+ data_fd.write("\n")
+
+
+def dynamodb_dump(tables, out_dir):
+ try:
+ os.makedirs(out_dir)
+ except OSError as e:
+ # We don't care if the dir already exists.
+ if e.errno != errno.EEXIST:
+ raise
+
+ conn = boto.connect_dynamodb()
+ for t in tables:
+ dump_table(conn.get_table(t), out_dir)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ prog="dynamodb_dump",
+ description=DESCRIPTION
+ )
+ parser.add_argument("--out-dir", default=".")
+ parser.add_argument("tables", metavar="TABLES", nargs="+")
+
+ namespace = parser.parse_args()
+
+ dynamodb_dump(namespace.tables, namespace.out_dir)
diff --git a/bin/dynamodb_load b/bin/dynamodb_load
new file mode 100755
index 00000000..21dfa176
--- /dev/null
+++ b/bin/dynamodb_load
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+
+import argparse
+import os
+
+import boto
+from boto.compat import json
+from boto.dynamodb.schema import Schema
+
+
+DESCRIPTION = """Load data into one or more DynamoDB tables.
+
+For each table, data is read from two files:
+ - {table_name}.metadata for the table's name, schema and provisioned
+ throughput (only required if creating the table).
+ - {table_name}.data for the table's actual contents.
+
+Both files are searched for in the current directory. To read them from
+somewhere else, use the --in-dir parameter.
+
+This program does not wipe the tables prior to loading data. However, any
+items present in the data files will overwrite the table's contents.
+"""
+
+
+def _json_iterload(fd):
+ """Lazily load newline-separated JSON objects from a file-like object."""
+ buffer = ""
+ eof = False
+ while not eof:
+ try:
+ # Add a line to the buffer
+ buffer += fd.next()
+ except StopIteration:
+ # We can't let that exception bubble up, otherwise the last
+ # object in the file will never be decoded.
+ eof = True
+ try:
+ # Try to decode a JSON object.
+ json_object = json.loads(buffer.strip())
+
+ # Success: clear the buffer (everything was decoded).
+ buffer = ""
+ except ValueError:
+ if eof and buffer.strip():
+ # No more lines to load and the buffer contains something other
+ # than whitespace: the file is, in fact, malformed.
+ raise
+ # We couldn't decode a complete JSON object: load more lines.
+ continue
+
+ yield json_object
+
+
+def create_table(metadata_fd):
+ """Create a table from a metadata file-like object."""
+
+
+def load_table(table, in_fd):
+ """Load items into a table from a file-like object."""
+ for i in _json_iterload(in_fd):
+ # Convert lists back to sets.
+ data = {}
+ for k, v in i.iteritems():
+ if isinstance(v, list):
+ data[k] = set(v)
+ else:
+ data[k] = v
+ table.new_item(attrs=i).put()
+
+
+def dynamodb_load(tables, in_dir, create_tables):
+ conn = boto.connect_dynamodb()
+ for t in tables:
+ metadata_file = os.path.join(in_dir, "%s.metadata" % t)
+ data_file = os.path.join(in_dir, "%s.data" % t)
+ if create_tables:
+ with open(metadata_file) as meta_fd:
+ metadata = json.load(meta_fd)
+ table = conn.create_table(
+ name=t,
+ schema=Schema(metadata["schema"]),
+ read_units=metadata["read_units"],
+ write_units=metadata["write_units"],
+ )
+ table.refresh(wait_for_active=True)
+ else:
+ table = conn.get_table(t)
+
+ with open(data_file) as in_fd:
+ load_table(table, in_fd)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ prog="dynamodb_load",
+ description=DESCRIPTION
+ )
+ parser.add_argument(
+ "--create-tables",
+ action="store_true",
+ help="Create the tables if they don't exist already (without this flag, attempts to load data into non-existing tables fail)."
+ )
+ parser.add_argument("--in-dir", default=".")
+ parser.add_argument("tables", metavar="TABLES", nargs="+")
+
+ namespace = parser.parse_args()
+
+ dynamodb_load(namespace.tables, namespace.in_dir, namespace.create_tables)
diff --git a/bin/mturk b/bin/mturk
index 84707409..e0b4bab4 100755
--- a/bin/mturk
+++ b/bin/mturk
@@ -25,8 +25,8 @@ import os.path
import string
import inspect
import datetime, calendar
-import json
import boto.mturk.connection, boto.mturk.price, boto.mturk.question, boto.mturk.qualification
+from boto.compat import json
# --------------------------------------------------
# Globals
diff --git a/bin/sdbadmin b/bin/sdbadmin
index 7e87c7b3..3fbd3f44 100755
--- a/bin/sdbadmin
+++ b/bin/sdbadmin
@@ -26,15 +26,7 @@ VERSION = "%prog version 1.0"
import boto
import time
from boto import sdb
-
-# Allow support for JSON
-try:
- import simplejson as json
-except:
- try:
- import json
- except:
- json = False
+from boto.compat import json
def choice_input(options, default=None, title=None):
"""
diff --git a/boto/__init__.py b/boto/__init__.py
index 076476af..1c114296 100644
--- a/boto/__init__.py
+++ b/boto/__init__.py
@@ -2,6 +2,7 @@
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# Copyright (c) 2010, Google, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
@@ -35,12 +36,20 @@ import logging.config
import urlparse
from boto.exception import InvalidUriError
-__version__ = '2.8.0'
+__version__ = '2.9.0'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
config = Config()
+# Regex to disallow buckets violating charset or not [3..255] chars total.
+BUCKET_NAME_RE = re.compile(r'^[a-z0-9][a-z0-9\._-]{1,253}[a-z0-9]$')
+# Regex to disallow buckets with individual DNS labels longer than 63.
+TOO_LONG_DNS_NAME_COMP = re.compile(r'[-_a-z0-9]{64}')
+GENERATION_RE = re.compile(r'(?P<versionless_uri_str>.+)'
+ r'#(?P<generation>[0-9]+)$')
+VERSION_RE = re.compile('(?P<versionless_uri_str>.+)#(?P<version_id>.+)$')
+
def init_logging():
for file in BotoConfigLocations:
@@ -655,9 +664,19 @@ def connect_elastictranscoder(aws_access_key_id=None,
**kwargs)
+def connect_opsworks(aws_access_key_id=None,
+ aws_secret_access_key=None,
+ **kwargs):
+ from boto.opsworks.layer1 import OpsWorksConnection
+ return OpsWorksConnection(
+ aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key,
+ **kwargs)
+
+
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
bucket_storage_uri_class=BucketStorageUri,
- suppress_consec_slashes=True):
+ suppress_consec_slashes=True, is_latest=False):
"""
Instantiate a StorageUri from a URI string.
@@ -673,6 +692,9 @@ def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
:param bucket_storage_uri_class: Allows mocking for unit tests.
:param suppress_consec_slashes: If provided, controls whether
consecutive slashes will be suppressed in key paths.
+ :type is_latest: bool
+ :param is_latest: whether this versioned object represents the
+ current version.
We allow validate to be disabled to allow caller
to implement bucket-level wildcarding (outside the boto library;
@@ -684,14 +706,17 @@ def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
``uri_str`` must be one of the following formats:
* gs://bucket/name
+ * gs://bucket/name#ver
* s3://bucket/name
* gs://bucket
* s3://bucket
* filename (which could be a Unix path like /a/b/c or a Windows path like
C:\a\b\c)
- The last example uses the default scheme ('file', unless overridden)
+ The last example uses the default scheme ('file', unless overridden).
"""
+ version_id = None
+ generation = None
# Manually parse URI components instead of using urlparse.urlparse because
# what we're calling URIs don't really fit the standard syntax for URIs
@@ -708,7 +733,8 @@ def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
if not (platform.system().lower().startswith('windows')
and colon_pos == 1
and drive_char >= 'a' and drive_char <= 'z'):
- raise InvalidUriError('"%s" contains ":" instead of "://"' % uri_str)
+ raise InvalidUriError('"%s" contains ":" instead of "://"' %
+ uri_str)
scheme = default_scheme.lower()
path = uri_str
else:
@@ -727,23 +753,38 @@ def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
else:
path_parts = path.split('/', 1)
bucket_name = path_parts[0]
- if (validate and bucket_name and
- # Disallow buckets violating charset or not [3..255] chars total.
- (not re.match('^[a-z0-9][a-z0-9\._-]{1,253}[a-z0-9]$', bucket_name)
- # Disallow buckets with individual DNS labels longer than 63.
- or re.search('[-_a-z0-9]{64}', bucket_name))):
- raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str)
- # If enabled, ensure the bucket name is valid, to avoid possibly
- # confusing other parts of the code. (For example if we didn't
+ object_name = ''
+ # If validate enabled, ensure the bucket name is valid, to avoid
+ # possibly confusing other parts of the code. (For example if we didn't
# catch bucket names containing ':', when a user tried to connect to
# the server with that name they might get a confusing error about
# non-integer port numbers.)
- object_name = ''
+ if (validate and bucket_name and
+ (not BUCKET_NAME_RE.match(bucket_name)
+ or TOO_LONG_DNS_NAME_COMP.search(bucket_name))):
+ raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str)
+ if scheme == 'gs':
+ match = GENERATION_RE.search(path)
+ if match:
+ md = match.groupdict()
+ versionless_uri_str = md['versionless_uri_str']
+ path_parts = versionless_uri_str.split('/', 1)
+ generation = int(md['generation'])
+ elif scheme == 's3':
+ match = VERSION_RE.search(path)
+ if match:
+ md = match.groupdict()
+ versionless_uri_str = md['versionless_uri_str']
+ path_parts = versionless_uri_str.split('/', 1)
+ version_id = md['version_id']
+ else:
+ raise InvalidUriError('Unrecognized scheme "%s"' % scheme)
if len(path_parts) > 1:
object_name = path_parts[1]
return bucket_storage_uri_class(
scheme, bucket_name, object_name, debug,
- suppress_consec_slashes=suppress_consec_slashes)
+ suppress_consec_slashes=suppress_consec_slashes,
+ version_id=version_id, generation=generation, is_latest=is_latest)
def storage_uri_for_key(key):
diff --git a/boto/auth.py b/boto/auth.py
index 91c48640..c5ff8fe8 100644
--- a/boto/auth.py
+++ b/boto/auth.py
@@ -164,9 +164,9 @@ class HmacAuthV1Handler(AuthHandler, HmacKeys):
boto.log.debug('StringToSign:\n%s' % string_to_sign)
b64_hmac = self.sign_string(string_to_sign)
auth_hdr = self._provider.auth_header
- headers['Authorization'] = ("%s %s:%s" %
- (auth_hdr,
- self._provider.access_key, b64_hmac))
+ auth = ("%s %s:%s" % (auth_hdr, self._provider.access_key, b64_hmac))
+ boto.log.debug('Signature:\n%s' % auth)
+ headers['Authorization'] = auth
class HmacAuthV2Handler(AuthHandler, HmacKeys):
diff --git a/boto/beanstalk/exception.py b/boto/beanstalk/exception.py
index c209cefc..f6f9ffad 100644
--- a/boto/beanstalk/exception.py
+++ b/boto/beanstalk/exception.py
@@ -1,5 +1,5 @@
import sys
-import json
+from boto.compat import json
from boto.exception import BotoServerError
diff --git a/boto/beanstalk/layer1.py b/boto/beanstalk/layer1.py
index d67d2eb7..e63f70e7 100644
--- a/boto/beanstalk/layer1.py
+++ b/boto/beanstalk/layer1.py
@@ -21,10 +21,10 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
-import json
import boto
import boto.jsonresponse
+from boto.compat import json
from boto.regioninfo import RegionInfo
from boto.connection import AWSQueryConnection
@@ -54,7 +54,7 @@ class Layer1(AWSQueryConnection):
security_token)
def _required_auth_capability(self):
- return ['sign-v2']
+ return ['hmac-v4']
def _encode_bool(self, v):
v = bool(v)
@@ -75,7 +75,7 @@ class Layer1(AWSQueryConnection):
:type cname_prefix: string
:param cname_prefix: The prefix used when this CNAME is
- reserved.
+ reserved.
"""
params = {'CNAMEPrefix': cname_prefix}
return self._get_response('CheckDNSAvailability', params)
@@ -87,9 +87,9 @@ class Layer1(AWSQueryConnection):
:type application_name: string
:param application_name: The name of the application.
- Constraint: This name must be unique within your account. If the
- specified name already exists, the action returns an
- InvalidParameterValue error.
+ Constraint: This name must be unique within your account. If the
+ specified name already exists, the action returns an
+ InvalidParameterValue error.
:type description: string
:param description: Describes the application.
@@ -108,37 +108,34 @@ class Layer1(AWSQueryConnection):
:type application_name: string
:param application_name: The name of the application. If no
- application is found with this name, and AutoCreateApplication
- is false, returns an InvalidParameterValue error.
+ application is found with this name, and AutoCreateApplication is
+ false, returns an InvalidParameterValue error.
:type version_label: string
- :param version_label: A label identifying this
- version.Constraint: Must be unique per application. If an
- application version already exists with this label for the
- specified application, AWS Elastic Beanstalk returns an
- InvalidParameterValue error.
+ :param version_label: A label identifying this version. Constraint:
+ Must be unique per application. If an application version already
+ exists with this label for the specified application, AWS Elastic
+ Beanstalk returns an InvalidParameterValue error.
:type description: string
:param description: Describes this version.
:type s3_bucket: string
- :param s3_bucket: The Amazon S3 bucket where the data is
- located.
+ :param s3_bucket: The Amazon S3 bucket where the data is located.
:type s3_key: string
- :param s3_key: The Amazon S3 key where the data is located.
- Both s3_bucket and s3_key must be specified in order to use
- a specific source bundle. If both of these values are not specified
- the sample application will be used.
+ :param s3_key: The Amazon S3 key where the data is located. Both
+ s3_bucket and s3_key must be specified in order to use a specific
+ source bundle. If both of these values are not specified the
+ sample application will be used.
:type auto_create_application: boolean
- :param auto_create_application: Determines how the system
- behaves if the specified application for this version does not
- already exist: true: Automatically creates the specified
- application for this version if it does not already exist.
- false: Returns an InvalidParameterValue if the specified
- application for this version does not already exist. Default:
- false Valid Values: true | false
+ :param auto_create_application: Determines how the system behaves if
+ the specified application for this version does not already exist:
+ true: Automatically creates the specified application for this
+ version if it does not already exist. false: Returns an
+ InvalidParameterValue if the specified application for this version
+ does not already exist. Default: false Valid Values: true | false
:raises: TooManyApplicationsException,
TooManyApplicationVersionsException,
@@ -171,52 +168,49 @@ class Layer1(AWSQueryConnection):
configuration settings.
:type application_name: string
- :param application_name: The name of the application to
- associate with this configuration template. If no application is
- found with this name, AWS Elastic Beanstalk returns an
- InvalidParameterValue error.
+ :param application_name: The name of the application to associate with
+ this configuration template. If no application is found with this
+ name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
:type template_name: string
- :param template_name: The name of the configuration
- template.Constraint: This name must be unique per application.
- Default: If a configuration template already exists with this
- name, AWS Elastic Beanstalk returns an InvalidParameterValue
- error.
+ :param template_name: The name of the configuration template.
+ Constraint: This name must be unique per application. Default: If
+ a configuration template already exists with this name, AWS Elastic
+ Beanstalk returns an InvalidParameterValue error.
:type solution_stack_name: string
- :param solution_stack_name: The name of the solution stack used
- by this configuration. The solution stack specifies the
- operating system, architecture, and application server for a
- configuration template. It determines the set of configuration
- options as well as the possible and default values. Use
- ListAvailableSolutionStacks to obtain a list of available
- solution stacks. Default: If the SolutionStackName is not
- specified and the source configuration parameter is blank, AWS
- Elastic Beanstalk uses the default solution stack. If not
- specified and the source configuration parameter is specified,
- AWS Elastic Beanstalk uses the same solution stack as the source
- configuration template.
+ :param solution_stack_name: The name of the solution stack used by this
+ configuration. The solution stack specifies the operating system,
+ architecture, and application server for a configuration template.
+ It determines the set of configuration options as well as the
+ possible and default values. Use ListAvailableSolutionStacks to
+ obtain a list of available solution stacks. Default: If the
+ SolutionStackName is not specified and the source configuration
+ parameter is blank, AWS Elastic Beanstalk uses the default solution
+ stack. If not specified and the source configuration parameter is
+ specified, AWS Elastic Beanstalk uses the same solution stack as
+ the source configuration template.
:type source_configuration_application_name: string
:param source_configuration_application_name: The name of the
- application associated with the configuration.
+ application associated with the configuration.
:type source_configuration_template_name: string
:param source_configuration_template_name: The name of the
- configuration template.
+ configuration template.
:type environment_id: string
:param environment_id: The ID of the environment used with this
- configuration template.
+ configuration template.
:type description: string
:param description: Describes this configuration.
:type option_settings: list
- :param option_settings: If specified, AWS Elastic Beanstalk sets
- the specified configuration option to the requested value. The
- new value overrides the value obtained from the solution stack
- or the source configuration template.
+ :param option_settings: If specified, AWS Elastic Beanstalk sets the
+ specified configuration option to the requested value. The new
+ value overrides the value obtained from the solution stack or the
+ source configuration template.
:raises: InsufficientPrivilegesException,
TooManyConfigurationTemplatesException
@@ -247,73 +241,72 @@ class Layer1(AWSQueryConnection):
"""Launches an environment for the application using a configuration.
:type application_name: string
- :param application_name: The name of the application that
- contains the version to be deployed. If no application is found
- with this name, CreateEnvironment returns an
- InvalidParameterValue error.
+ :param application_name: The name of the application that contains the
+ version to be deployed. If no application is found with this name,
+ CreateEnvironment returns an InvalidParameterValue error.
:type version_label: string
- :param version_label: The name of the application version to
- deploy. If the specified application has no associated
- application versions, AWS Elastic Beanstalk UpdateEnvironment
- returns an InvalidParameterValue error. Default: If not
- specified, AWS Elastic Beanstalk attempts to launch the most
- recently created application version.
+ :param version_label: The name of the application version to deploy. If
+ the specified application has no associated application versions,
+ AWS Elastic Beanstalk UpdateEnvironment returns an
+ InvalidParameterValue error. Default: If not specified, AWS
+ Elastic Beanstalk attempts to launch the most recently created
+ application version.
:type environment_name: string
- :param environment_name: A unique name for the deployment
- environment. Used in the application URL. Constraint: Must be
- from 4 to 23 characters in length. The name can contain only
- letters, numbers, and hyphens. It cannot start or end with a
- hyphen. This name must be unique in your account. If the
- specified name already exists, AWS Elastic Beanstalk returns an
- InvalidParameterValue error. Default: If the CNAME parameter is
- not specified, the environment name becomes part of the CNAME,
- and therefore part of the visible URL for your application.
+ :param environment_name: A unique name for the deployment environment.
+ Used in the application URL. Constraint: Must be from 4 to 23
+ characters in length. The name can contain only letters, numbers,
+ and hyphens. It cannot start or end with a hyphen. This name must
+ be unique in your account. If the specified name already exists,
+ AWS Elastic Beanstalk returns an InvalidParameterValue error.
+ Default: If the CNAME parameter is not specified, the environment
+ name becomes part of the CNAME, and therefore part of the visible
+ URL for your application.
:type template_name: string
:param template_name: The name of the configuration template to
- use in deployment. If no configuration template is found with
- this name, AWS Elastic Beanstalk returns an
- InvalidParameterValue error. Condition: You must specify either
- this parameter or a SolutionStackName, but not both. If you
- specify both, AWS Elastic Beanstalk returns an
- InvalidParameterCombination error. If you do not specify either,
- AWS Elastic Beanstalk returns a MissingRequiredParameter error.
+ use in deployment. If no configuration template is found with this
+ name, AWS Elastic Beanstalk returns an InvalidParameterValue error.
+ Condition: You must specify either this parameter or a
+ SolutionStackName, but not both. If you specify both, AWS Elastic
+ Beanstalk returns an InvalidParameterCombination error. If you do
+ not specify either, AWS Elastic Beanstalk returns a
+ MissingRequiredParameter error.
:type solution_stack_name: string
- :param solution_stack_name: This is an alternative to specifying
- a configuration name. If specified, AWS Elastic Beanstalk sets
- the configuration values to the default values associated with
- the specified solution stack. Condition: You must specify
- either this or a TemplateName, but not both. If you specify
- both, AWS Elastic Beanstalk returns an
- InvalidParameterCombination error. If you do not specify either,
- AWS Elastic Beanstalk returns a MissingRequiredParameter error.
+ :param solution_stack_name: This is an alternative to specifying a
+ configuration name. If specified, AWS Elastic Beanstalk sets the
+ configuration values to the default values associated with the
+ specified solution stack. Condition: You must specify either this
+ or a TemplateName, but not both. If you specify both, AWS Elastic
+ Beanstalk returns an InvalidParameterCombination error. If you do
+ not specify either, AWS Elastic Beanstalk returns a
+ MissingRequiredParameter error.
:type cname_prefix: string
- :param cname_prefix: If specified, the environment attempts to
- use this value as the prefix for the CNAME. If not specified,
- the environment uses the environment name.
+ :param cname_prefix: If specified, the environment attempts to use this
+ value as the prefix for the CNAME. If not specified, the
+ environment uses the environment name.
:type description: string
:param description: Describes this environment.
:type option_settings: list
- :param option_settings: If specified, AWS Elastic Beanstalk sets
- the specified configuration options to the requested value in
- the configuration set for the new environment. These override
- the values obtained from the solution stack or the configuration
- template. Each element in the list is a tuple of (Namespace,
- OptionName, Value), for example::
+ :param option_settings: If specified, AWS Elastic Beanstalk sets the
+ specified configuration options to the requested value in the
+ configuration set for the new environment. These override the
+ values obtained from the solution stack or the configuration
+ template. Each element in the list is a tuple of (Namespace,
+ OptionName, Value), for example::
- [('aws:autoscaling:launchconfiguration',
- 'Ec2KeyName', 'mykeypair')]
+ [('aws:autoscaling:launchconfiguration',
+ 'Ec2KeyName', 'mykeypair')]
:type options_to_remove: list
- :param options_to_remove: A list of custom user-defined
- configuration options to remove from the configuration set for
- this new environment.
+ :param options_to_remove: A list of custom user-defined configuration
+ options to remove from the configuration set for this new
+ environment.
:raises: TooManyEnvironmentsException, InsufficientPrivilegesException
@@ -363,7 +356,7 @@ class Layer1(AWSQueryConnection):
:type terminate_env_by_force: boolean
:param terminate_env_by_force: When set to true, running
- environments will be terminated before deleting the application.
+ environments will be terminated before deleting the application.
:raises: OperationInProgressException
@@ -380,14 +373,15 @@ class Layer1(AWSQueryConnection):
:type application_name: string
:param application_name: The name of the application to delete
- releases from.
+ releases from.
:type version_label: string
:param version_label: The label of the version to delete.
:type delete_source_bundle: boolean
:param delete_source_bundle: Indicates whether to delete the
- associated source bundle from Amazon S3. Valid Values: true | false
+ associated source bundle from Amazon S3. Valid Values: true |
+ false
:raises: SourceBundleDeletionException,
InsufficientPrivilegesException,
@@ -406,11 +400,11 @@ class Layer1(AWSQueryConnection):
:type application_name: string
:param application_name: The name of the application to delete
- the configuration template from.
+ the configuration template from.
:type template_name: string
:param template_name: The name of the configuration template to
- delete.
+ delete.
:raises: OperationInProgressException
@@ -434,11 +428,11 @@ class Layer1(AWSQueryConnection):
:type application_name: string
:param application_name: The name of the application the
- environment is associated with.
+ environment is associated with.
:type environment_name: string
:param environment_name: The name of the environment to delete
- the draft configuration from.
+ the draft configuration from.
"""
params = {'ApplicationName': application_name,
@@ -450,14 +444,14 @@ class Layer1(AWSQueryConnection):
"""Returns descriptions for existing application versions.
:type application_name: string
- :param application_name: If specified, AWS Elastic Beanstalk
- restricts the returned descriptions to only include ones that
- are associated with the specified application.
+ :param application_name: If specified, AWS Elastic Beanstalk restricts
+ the returned descriptions to only include ones that are associated
+ with the specified application.
:type version_labels: list
:param version_labels: If specified, restricts the returned
- descriptions to only include ones that have the specified
- version labels.
+ descriptions to only include ones that have the specified version
+ labels.
"""
params = {}
@@ -472,9 +466,9 @@ class Layer1(AWSQueryConnection):
"""Returns the descriptions of existing applications.
:type application_names: list
- :param application_names: If specified, AWS Elastic Beanstalk
- restricts the returned descriptions to only include those with
- the specified names.
+ :param application_names: If specified, AWS Elastic Beanstalk restricts
+ the returned descriptions to only include those with the specified
+ names.
"""
params = {}
@@ -497,26 +491,26 @@ class Layer1(AWSQueryConnection):
is changed.
:type application_name: string
- :param application_name: The name of the application associated
- with the configuration template or environment. Only needed if
- you want to describe the configuration options associated with
- either the configuration template or environment.
+ :param application_name: The name of the application associated with
+ the configuration template or environment. Only needed if you want
+ to describe the configuration options associated with either the
+ configuration template or environment.
:type template_name: string
- :param template_name: The name of the configuration template
- whose configuration options you want to describe.
+ :param template_name: The name of the configuration template whose
+ configuration options you want to describe.
:type environment_name: string
:param environment_name: The name of the environment whose
- configuration options you want to describe.
+ configuration options you want to describe.
:type solution_stack_name: string
:param solution_stack_name: The name of the solution stack whose
- configuration options you want to describe.
+ configuration options you want to describe.
:type options: list
:param options: If specified, restricts the descriptions to only
- the specified options.
+ the specified options.
"""
params = {}
if application_name:
@@ -547,23 +541,22 @@ class Layer1(AWSQueryConnection):
:type application_name: string
:param application_name: The application for the environment or
- configuration template.
+ configuration template.
:type template_name: string
:param template_name: The name of the configuration template to
- describe. Conditional: You must specify either this parameter
- or an EnvironmentName, but not both. If you specify both, AWS
- Elastic Beanstalk returns an InvalidParameterCombination error.
- If you do not specify either, AWS Elastic Beanstalk returns a
- MissingRequiredParameter error.
+ describe. Conditional: You must specify either this parameter or
+ an EnvironmentName, but not both. If you specify both, AWS Elastic
+ Beanstalk returns an InvalidParameterCombination error. If you do
+ not specify either, AWS Elastic Beanstalk returns a
+ MissingRequiredParameter error.
:type environment_name: string
- :param environment_name: The name of the environment to
- describe. Condition: You must specify either this or a
- TemplateName, but not both. If you specify both, AWS Elastic
- Beanstalk returns an InvalidParameterCombination error. If you
- do not specify either, AWS Elastic Beanstalk returns
- MissingRequiredParameter error.
+ :param environment_name: The name of the environment to describe.
+ Condition: You must specify either this or a TemplateName, but not
+ both. If you specify both, AWS Elastic Beanstalk returns an
+ InvalidParameterCombination error. If you do not specify either,
+ AWS Elastic Beanstalk returns MissingRequiredParameter error.
"""
params = {'ApplicationName': application_name}
if template_name:
@@ -578,15 +571,15 @@ class Layer1(AWSQueryConnection):
:type environment_id: string
:param environment_id: The ID of the environment to retrieve AWS
- resource usage data. Condition: You must specify either this or
- an EnvironmentName, or both. If you do not specify either, AWS
- Elastic Beanstalk returns MissingRequiredParameter error.
+ resource usage data. Condition: You must specify either this or an
+ EnvironmentName, or both. If you do not specify either, AWS Elastic
+ Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to retrieve
- AWS resource usage data. Condition: You must specify either
- this or an EnvironmentId, or both. If you do not specify either,
- AWS Elastic Beanstalk returns MissingRequiredParameter error.
+ AWS resource usage data. Condition: You must specify either this
+ or an EnvironmentId, or both. If you do not specify either, AWS
+ Elastic Beanstalk returns MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
@@ -604,35 +597,35 @@ class Layer1(AWSQueryConnection):
"""Returns descriptions for existing environments.
:type application_name: string
- :param application_name: If specified, AWS Elastic Beanstalk
- restricts the returned descriptions to include only those that
- are associated with this application.
+ :param application_name: If specified, AWS Elastic Beanstalk restricts
+ the returned descriptions to include only those that are associated
+ with this application.
:type version_label: string
- :param version_label: If specified, AWS Elastic Beanstalk
- restricts the returned descriptions to include only those that
- are associated with this application version.
+ :param version_label: If specified, AWS Elastic Beanstalk restricts the
+ returned descriptions to include only those that are associated
+ with this application version.
:type environment_ids: list
- :param environment_ids: If specified, AWS Elastic Beanstalk
- restricts the returned descriptions to include only those that
- have the specified IDs.
+ :param environment_ids: If specified, AWS Elastic Beanstalk restricts
+ the returned descriptions to include only those that have the
+ specified IDs.
:type environment_names: list
- :param environment_names: If specified, AWS Elastic Beanstalk
- restricts the returned descriptions to include only those that
- have the specified names.
+ :param environment_names: If specified, AWS Elastic Beanstalk restricts
+ the returned descriptions to include only those that have the
+ specified names.
:type include_deleted: boolean
:param include_deleted: Indicates whether to include deleted
- environments: true: Environments that have been deleted after
- IncludedDeletedBackTo are displayed. false: Do not include
- deleted environments.
+ environments: true: Environments that have been deleted after
+ IncludedDeletedBackTo are displayed. false: Do not include deleted
+ environments.
:type included_deleted_back_to: timestamp
- :param included_deleted_back_to: If specified when
- IncludeDeleted is set to true, then environments deleted after
- this date are displayed.
+ :param included_deleted_back_to: If specified when IncludeDeleted is
+ set to true, then environments deleted after this date are
+ displayed.
"""
params = {}
if application_name:
@@ -659,57 +652,55 @@ class Layer1(AWSQueryConnection):
"""Returns event descriptions matching criteria up to the last 6 weeks.
:type application_name: string
- :param application_name: If specified, AWS Elastic Beanstalk
- restricts the returned descriptions to include only those
- associated with this application.
+ :param application_name: If specified, AWS Elastic Beanstalk restricts
+ the returned descriptions to include only those associated with
+ this application.
:type version_label: string
- :param version_label: If specified, AWS Elastic Beanstalk
- restricts the returned descriptions to those associated with
- this application version.
+ :param version_label: If specified, AWS Elastic Beanstalk restricts the
+ returned descriptions to those associated with this application
+ version.
:type template_name: string
- :param template_name: If specified, AWS Elastic Beanstalk
- restricts the returned descriptions to those that are associated
- with this environment configuration.
+ :param template_name: If specified, AWS Elastic Beanstalk restricts the
+ returned descriptions to those that are associated with this
+ environment configuration.
:type environment_id: string
- :param environment_id: If specified, AWS Elastic Beanstalk
- restricts the returned descriptions to those associated with
- this environment.
+ :param environment_id: If specified, AWS Elastic Beanstalk restricts
+ the returned descriptions to those associated with this
+ environment.
:type environment_name: string
- :param environment_name: If specified, AWS Elastic Beanstalk
- restricts the returned descriptions to those associated with
- this environment.
+ :param environment_name: If specified, AWS Elastic Beanstalk restricts
+ the returned descriptions to those associated with this
+ environment.
:type request_id: string
- :param request_id: If specified, AWS Elastic Beanstalk restricts
- the described events to include only those associated with this
- request ID.
+ :param request_id: If specified, AWS Elastic Beanstalk restricts the
+ described events to include only those associated with this request
+ ID.
:type severity: string
- :param severity: If specified, limits the events returned from
- this call to include only those with the specified severity or
- higher.
+ :param severity: If specified, limits the events returned from this
+ call to include only those with the specified severity or higher.
:type start_time: timestamp
- :param start_time: If specified, AWS Elastic Beanstalk restricts
- the returned descriptions to those that occur on or after this
- time.
+ :param start_time: If specified, AWS Elastic Beanstalk restricts the
+ returned descriptions to those that occur on or after this time.
:type end_time: timestamp
- :param end_time: If specified, AWS Elastic Beanstalk restricts
- the returned descriptions to those that occur up to, but not
- including, the EndTime.
+ :param end_time: If specified, AWS Elastic Beanstalk restricts the
+ returned descriptions to those that occur up to, but not including,
+ the EndTime.
:type max_records: integer
- :param max_records: Specifies the maximum number of events that
- can be returned, beginning with the most recent event.
+ :param max_records: Specifies the maximum number of events that can be
+ returned, beginning with the most recent event.
:type next_token: string
- :param next_token: Pagination token. If specified, the events
- return the next batch of results.
+ :param next_token: Pagination token. If specified, the events return
+ the next batch of results.
"""
params = {}
if application_name:
@@ -748,15 +739,15 @@ class Layer1(AWSQueryConnection):
:type environment_id: string
:param environment_id: The ID of the environment to rebuild.
- Condition: You must specify either this or an EnvironmentName,
- or both. If you do not specify either, AWS Elastic Beanstalk
- returns MissingRequiredParameter error.
+ Condition: You must specify either this or an EnvironmentName, or
+ both. If you do not specify either, AWS Elastic Beanstalk returns
+ MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to rebuild.
- Condition: You must specify either this or an EnvironmentId, or
- both. If you do not specify either, AWS Elastic Beanstalk
- returns MissingRequiredParameter error.
+ Condition: You must specify either this or an EnvironmentId, or
+ both. If you do not specify either, AWS Elastic Beanstalk returns
+ MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
@@ -781,19 +772,19 @@ class Layer1(AWSQueryConnection):
:type environment_id: string
:param environment_id: The ID of the environment of the
- requested data. If no such environment is found,
- RequestEnvironmentInfo returns an InvalidParameterValue error.
- Condition: You must specify either this or an EnvironmentName,
- or both. If you do not specify either, AWS Elastic Beanstalk
- returns MissingRequiredParameter error.
+ requested data. If no such environment is found,
+ RequestEnvironmentInfo returns an InvalidParameterValue error.
+ Condition: You must specify either this or an EnvironmentName, or
+ both. If you do not specify either, AWS Elastic Beanstalk returns
+ MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment of the
- requested data. If no such environment is found,
- RequestEnvironmentInfo returns an InvalidParameterValue error.
- Condition: You must specify either this or an EnvironmentId, or
- both. If you do not specify either, AWS Elastic Beanstalk
- returns MissingRequiredParameter error.
+ requested data. If no such environment is found,
+ RequestEnvironmentInfo returns an InvalidParameterValue error.
+ Condition: You must specify either this or an EnvironmentId, or
+ both. If you do not specify either, AWS Elastic Beanstalk returns
+ MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
@@ -808,16 +799,16 @@ class Layer1(AWSQueryConnection):
server running on each Amazon EC2 instance.
:type environment_id: string
- :param environment_id: The ID of the environment to restart the
- server for. Condition: You must specify either this or an
- EnvironmentName, or both. If you do not specify either, AWS
- Elastic Beanstalk returns MissingRequiredParameter error.
+ :param environment_id: The ID of the environment to restart the server
+ for. Condition: You must specify either this or an
+ EnvironmentName, or both. If you do not specify either, AWS Elastic
+ Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
- :param environment_name: The name of the environment to restart
- the server for. Condition: You must specify either this or an
- EnvironmentId, or both. If you do not specify either, AWS
- Elastic Beanstalk returns MissingRequiredParameter error.
+ :param environment_name: The name of the environment to restart the
+ server for. Condition: You must specify either this or an
+ EnvironmentId, or both. If you do not specify either, AWS Elastic
+ Beanstalk returns MissingRequiredParameter error.
"""
params = {}
if environment_id:
@@ -836,18 +827,18 @@ class Layer1(AWSQueryConnection):
:param info_type: The type of information to retrieve.
:type environment_id: string
- :param environment_id: The ID of the data's environment. If no
- such environment is found, returns an InvalidParameterValue
- error. Condition: You must specify either this or an
- EnvironmentName, or both. If you do not specify either, AWS
- Elastic Beanstalk returns MissingRequiredParameter error.
+ :param environment_id: The ID of the data's environment. If no such
+ environment is found, returns an InvalidParameterValue error.
+ Condition: You must specify either this or an EnvironmentName, or
+ both. If you do not specify either, AWS Elastic Beanstalk returns
+ MissingRequiredParameter error.
:type environment_name: string
- :param environment_name: The name of the data's environment. If
- no such environment is found, returns an InvalidParameterValue
- error. Condition: You must specify either this or an
- EnvironmentId, or both. If you do not specify either, AWS
- Elastic Beanstalk returns MissingRequiredParameter error.
+ :param environment_name: The name of the data's environment. If no such
+ environment is found, returns an InvalidParameterValue error.
+ Condition: You must specify either this or an EnvironmentId, or
+ both. If you do not specify either, AWS Elastic Beanstalk returns
+ MissingRequiredParameter error.
"""
params = {'InfoType': info_type}
if environment_id:
@@ -864,31 +855,31 @@ class Layer1(AWSQueryConnection):
:type source_environment_id: string
:param source_environment_id: The ID of the source environment.
- Condition: You must specify at least the SourceEnvironmentID or
- the SourceEnvironmentName. You may also specify both. If you
- specify the SourceEnvironmentId, you must specify the
- DestinationEnvironmentId.
+ Condition: You must specify at least the SourceEnvironmentID or the
+ SourceEnvironmentName. You may also specify both. If you specify
+ the SourceEnvironmentId, you must specify the
+ DestinationEnvironmentId.
:type source_environment_name: string
- :param source_environment_name: The name of the source
- environment. Condition: You must specify at least the
- SourceEnvironmentID or the SourceEnvironmentName. You may also
- specify both. If you specify the SourceEnvironmentName, you must
- specify the DestinationEnvironmentName.
+ :param source_environment_name: The name of the source environment.
+ Condition: You must specify at least the SourceEnvironmentID or the
+ SourceEnvironmentName. You may also specify both. If you specify
+ the SourceEnvironmentName, you must specify the
+ DestinationEnvironmentName.
:type destination_environment_id: string
:param destination_environment_id: The ID of the destination
- environment. Condition: You must specify at least the
- DestinationEnvironmentID or the DestinationEnvironmentName. You
- may also specify both. You must specify the SourceEnvironmentId
- with the DestinationEnvironmentId.
+ environment. Condition: You must specify at least the
+ DestinationEnvironmentID or the DestinationEnvironmentName. You may
+ also specify both. You must specify the SourceEnvironmentId with
+ the DestinationEnvironmentId.
:type destination_environment_name: string
:param destination_environment_name: The name of the destination
- environment. Condition: You must specify at least the
- DestinationEnvironmentID or the DestinationEnvironmentName. You
- may also specify both. You must specify the
- SourceEnvironmentName with the DestinationEnvironmentName.
+ environment. Condition: You must specify at least the
+ DestinationEnvironmentID or the DestinationEnvironmentName. You may
+ also specify both. You must specify the SourceEnvironmentName with
+ the DestinationEnvironmentName.
"""
params = {}
if source_environment_id:
@@ -907,25 +898,25 @@ class Layer1(AWSQueryConnection):
:type environment_id: string
:param environment_id: The ID of the environment to terminate.
- Condition: You must specify either this or an EnvironmentName,
- or both. If you do not specify either, AWS Elastic Beanstalk
- returns MissingRequiredParameter error.
+ Condition: You must specify either this or an EnvironmentName, or
+ both. If you do not specify either, AWS Elastic Beanstalk returns
+ MissingRequiredParameter error.
:type environment_name: string
- :param environment_name: The name of the environment to
- terminate. Condition: You must specify either this or an
- EnvironmentId, or both. If you do not specify either, AWS
- Elastic Beanstalk returns MissingRequiredParameter error.
+ :param environment_name: The name of the environment to terminate.
+ Condition: You must specify either this or an EnvironmentId, or
+ both. If you do not specify either, AWS Elastic Beanstalk returns
+ MissingRequiredParameter error.
:type terminate_resources: boolean
:param terminate_resources: Indicates whether the associated AWS
- resources should shut down when the environment is terminated:
- true: (default) The user AWS resources (for example, the Auto
- Scaling group, LoadBalancer, etc.) are terminated along with the
- environment. false: The environment is removed from the AWS
- Elastic Beanstalk but the AWS resources continue to operate.
- For more information, see the AWS Elastic Beanstalk User Guide.
- Default: true Valid Values: true | false
+ resources should shut down when the environment is terminated:
+ true: (default) The user AWS resources (for example, the Auto
+ Scaling group, LoadBalancer, etc.) are terminated along with the
+ environment. false: The environment is removed from the AWS
+ Elastic Beanstalk but the AWS resources continue to operate. For
+ more information, see the AWS Elastic Beanstalk User Guide.
+ Default: true Valid Values: true | false
:raises: InsufficientPrivilegesException
"""
@@ -946,13 +937,13 @@ class Layer1(AWSQueryConnection):
:type application_name: string
:param application_name: The name of the application to update.
- If no such application is found, UpdateApplication returns an
- InvalidParameterValue error.
+ If no such application is found, UpdateApplication returns an
+ InvalidParameterValue error.
:type description: string
- :param description: A new description for the application.
- Default: If not specified, AWS Elastic Beanstalk does not update
- the description.
+ :param description: A new description for the application. Default: If
+ not specified, AWS Elastic Beanstalk does not update the
+ description.
"""
params = {'ApplicationName': application_name}
if description:
@@ -964,14 +955,14 @@ class Layer1(AWSQueryConnection):
"""Updates the application version to have the properties.
:type application_name: string
- :param application_name: The name of the application associated
- with this version. If no application is found with this name,
- UpdateApplication returns an InvalidParameterValue error.
+ :param application_name: The name of the application associated with
+ this version. If no application is found with this name,
+ UpdateApplication returns an InvalidParameterValue error.
:type version_label: string
:param version_label: The name of the version to update. If no
- application version is found with this label, UpdateApplication
- returns an InvalidParameterValue error.
+ application version is found with this label, UpdateApplication
+ returns an InvalidParameterValue error.
:type description: string
:param description: A new description for this release.
@@ -990,28 +981,27 @@ class Layer1(AWSQueryConnection):
specified properties or configuration option values.
:type application_name: string
- :param application_name: The name of the application associated
- with the configuration template to update. If no application is
- found with this name, UpdateConfigurationTemplate returns an
- InvalidParameterValue error.
+ :param application_name: The name of the application associated with
+ the configuration template to update. If no application is found
+ with this name, UpdateConfigurationTemplate returns an
+ InvalidParameterValue error.
:type template_name: string
- :param template_name: The name of the configuration template to
- update. If no configuration template is found with this name,
- UpdateConfigurationTemplate returns an InvalidParameterValue
- error.
+ :param template_name: The name of the configuration template to update.
+ If no configuration template is found with this name,
+ UpdateConfigurationTemplate returns an InvalidParameterValue error.
:type description: string
:param description: A new description for the configuration.
:type option_settings: list
- :param option_settings: A list of configuration option settings
- to update with the new specified option value.
+ :param option_settings: A list of configuration option settings to
+ update with the new specified option value.
:type options_to_remove: list
- :param options_to_remove: A list of configuration options to
- remove from the configuration set. Constraint: You can remove
- only UserDefined configuration options.
+ :param options_to_remove: A list of configuration options to remove
+ from the configuration set. Constraint: You can remove only
+ UserDefined configuration options.
:raises: InsufficientPrivilegesException
"""
@@ -1045,47 +1035,43 @@ class Layer1(AWSQueryConnection):
setting descriptions with different DeploymentStatus values.
:type environment_id: string
- :param environment_id: The ID of the environment to update. If
- no environment with this ID exists, AWS Elastic Beanstalk
- returns an InvalidParameterValue error. Condition: You must
- specify either this or an EnvironmentName, or both. If you do
- not specify either, AWS Elastic Beanstalk returns
- MissingRequiredParameter error.
+ :param environment_id: The ID of the environment to update. If no
+ environment with this ID exists, AWS Elastic Beanstalk returns an
+ InvalidParameterValue error. Condition: You must specify either
+ this or an EnvironmentName, or both. If you do not specify either,
+ AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
- :param environment_name: The name of the environment to update.
- If no environment with this name exists, AWS Elastic Beanstalk
- returns an InvalidParameterValue error. Condition: You must
- specify either this or an EnvironmentId, or both. If you do not
- specify either, AWS Elastic Beanstalk returns
- MissingRequiredParameter error.
+ :param environment_name: The name of the environment to update. If no
+ environment with this name exists, AWS Elastic Beanstalk returns an
+ InvalidParameterValue error. Condition: You must specify either
+ this or an EnvironmentId, or both. If you do not specify either,
+ AWS Elastic Beanstalk returns MissingRequiredParameter error.
:type version_label: string
- :param version_label: If this parameter is specified, AWS
- Elastic Beanstalk deploys the named application version to the
- environment. If no such application version is found, returns an
- InvalidParameterValue error.
+ :param version_label: If this parameter is specified, AWS Elastic
+ Beanstalk deploys the named application version to the environment.
+ If no such application version is found, returns an
+ InvalidParameterValue error.
:type template_name: string
- :param template_name: If this parameter is specified, AWS
- Elastic Beanstalk deploys this configuration template to the
- environment. If no such configuration template is found, AWS
- Elastic Beanstalk returns an InvalidParameterValue error.
+ :param template_name: If this parameter is specified, AWS Elastic
+ Beanstalk deploys this configuration template to the environment.
+ If no such configuration template is found, AWS Elastic Beanstalk
+ returns an InvalidParameterValue error.
:type description: string
:param description: If this parameter is specified, AWS Elastic
- Beanstalk updates the description of this environment.
+ Beanstalk updates the description of this environment.
:type option_settings: list
- :param option_settings: If specified, AWS Elastic Beanstalk
- updates the configuration set associated with the running
- environment and sets the specified configuration options to the
- requested value.
+ :param option_settings: If specified, AWS Elastic Beanstalk updates the
+ configuration set associated with the running environment and sets
+ the specified configuration options to the requested value.
:type options_to_remove: list
- :param options_to_remove: A list of custom user-defined
- configuration options to remove from the configuration set for
- this environment.
+ :param options_to_remove: A list of custom user-defined configuration
+ options to remove from the configuration set for this environment.
:raises: InsufficientPrivilegesException
"""
@@ -1121,21 +1107,21 @@ class Layer1(AWSQueryConnection):
:type application_name: string
:param application_name: The name of the application that the
- configuration template or environment belongs to.
+ configuration template or environment belongs to.
:type template_name: string
:param template_name: The name of the configuration template to
- validate the settings against. Condition: You cannot specify
- both this and an environment name.
+ validate the settings against. Condition: You cannot specify both
+ this and an environment name.
:type environment_name: string
- :param environment_name: The name of the environment to validate
- the settings against. Condition: You cannot specify both this
- and a configuration template name.
+ :param environment_name: The name of the environment to validate the
+ settings against. Condition: You cannot specify both this and a
+ configuration template name.
:type option_settings: list
- :param option_settings: A list of the options and desired values
- to evaluate.
+ :param option_settings: A list of the options and desired values to
+ evaluate.
:raises: InsufficientPrivilegesException
"""
diff --git a/boto/beanstalk/response.py b/boto/beanstalk/response.py
index 22bc1028..2d071bc9 100644
--- a/boto/beanstalk/response.py
+++ b/boto/beanstalk/response.py
@@ -175,7 +175,7 @@ class EnvironmentDescription(BaseObject):
class EnvironmentInfoDescription(BaseObject):
def __init__(self, response):
- EnvironmentInfoDescription(Response, self).__init__()
+ super(EnvironmentInfoDescription, self).__init__()
self.ec2_instance_id = str(response['Ec2InstanceId'])
self.info_type = str(response['InfoType'])
diff --git a/boto/cacerts/cacerts.txt b/boto/cacerts/cacerts.txt
index e65f21dd..f6e0ee60 100644
--- a/boto/cacerts/cacerts.txt
+++ b/boto/cacerts/cacerts.txt
@@ -631,3 +631,1567 @@ hvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyNEwr75Ji174z4xRAN
95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9n9cd
2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI=
-----END CERTIFICATE-----
+
+GTE CyberTrust Global Root
+==========================
+
+-----BEGIN CERTIFICATE-----
+MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYD
+VQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNv
+bHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJv
+b3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEzMjM1OTAwWjB1MQswCQYDVQQGEwJV
+UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
+cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds
+b2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrH
+iM3dFw4usJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTS
+r41tiGeA5u2ylc9yMcqlHHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X4
+04Wqk2kmhXBIgD8SFcd5tB8FLztimQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAG3r
+GwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMWM4ETCJ57NE7fQMh017l9
+3PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OFNMQkpw0P
+lZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/
+-----END CERTIFICATE-----
+
+GlobalSign Root CA
+==================
+
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
+A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
+b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
+MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
+YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
+aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
+jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
+xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
+1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
+snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
+U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
+9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
+AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
+yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
+38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
+AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
+DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
+HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
+-----END CERTIFICATE-----
+
+GlobalSign Root CA - R2
+=======================
+
+-----BEGIN CERTIFICATE-----
+MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1
+MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL
+v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8
+eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq
+tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd
+C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa
+zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB
+mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH
+V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n
+bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG
+3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs
+J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO
+291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS
+ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd
+AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
+TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
+-----END CERTIFICATE-----
+
+ValiCert Class 1 VA
+===================
+
+-----BEGIN CERTIFICATE-----
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIyMjM0OFoXDTE5MDYy
+NTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9Y
+LqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIiGQj4/xEjm84H9b9pGib+
+TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCmDuJWBQ8Y
+TfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0
+LBwGlN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLW
+I8sogTLDAHkY7FkXicnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPw
+nXS3qT6gpf+2SQMT2iLM7XGCK5nPOrf1LXLI
+-----END CERTIFICATE-----
+
+ValiCert Class 2 VA
+===================
+
+-----BEGIN CERTIFICATE-----
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMTk1NFoXDTE5MDYy
+NjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDOOnHK5avIWZJV16vY
+dA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVCCSRrCl6zfN1SLUzm1NZ9
+WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7RfZHM047QS
+v4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9v
+UJSZSWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTu
+IYEZoDJJKPTEjlbVUjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwC
+W/POuZ6lcg5Ktz885hZo+L7tdEy8W9ViH0Pd
+-----END CERTIFICATE-----
+
+RSA Root Certificate 1
+======================
+
+-----BEGIN CERTIFICATE-----
+MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0
+IFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz
+BgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y
+aXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG
+9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy
+NjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y
+azEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs
+YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw
+Oi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl
+cnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD
+cnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs
+2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY
+JJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE
+Zwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ
+n0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A
+PhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu
+-----END CERTIFICATE-----
+
+Entrust.net Premium 2048 Secure Server CA
+=========================================
+
+-----BEGIN CERTIFICATE-----
+MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
+RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
+IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0xOTEy
+MjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
+LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
+YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
+A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
+K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
+sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
+MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
+XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
+HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
+4QIDAQABo3QwcjARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGA
+vtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdERgL7YibkIozH5oSQJFrlwMB0G
+CSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEA
+WUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo
+oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQ
+h7A6tcOdBTcSo8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18
+f3v/rxzP5tsHrV7bhZ3QKw0z2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfN
+B/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjXOP/swNlQ8C5LWK5Gb9Auw2DaclVy
+vUxFnmG6v4SBkgPR0ml8xQ==
+-----END CERTIFICATE-----
+
+Baltimore CyberTrust Root
+=========================
+
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
+RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD
+VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX
+DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y
+ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy
+VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr
+mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr
+IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK
+mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu
+XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy
+dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye
+jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1
+BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3
+DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92
+9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx
+jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0
+Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz
+ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
+R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
+-----END CERTIFICATE-----
+
+AddTrust Low-Value Services Root
+================================
+
+-----BEGIN CERTIFICATE-----
+MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3
+b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMw
+MTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML
+QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYD
+VQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ul
+CDtbKRY654eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6n
+tGO0/7Gcrjyvd7ZWxbWroulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyl
+dI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1Zmne3yzxbrww2ywkEtvrNTVokMsAsJch
+PXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJuiGMx1I4S+6+JNM3GOGvDC
++Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8wHQYDVR0O
+BBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBl
+MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFk
+ZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENB
+IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxtZBsfzQ3duQH6lmM0MkhHma6X
+7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0PhiVYrqW9yTkkz
+43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY
+eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJl
+pz/+0WatC7xrmYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOA
+WiFeIc9TVPC6b4nbqKqVz4vjccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk=
+-----END CERTIFICATE-----
+
+AddTrust External Root
+======================
+
+-----BEGIN CERTIFICATE-----
+MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs
+IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290
+MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux
+FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h
+bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v
+dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt
+H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9
+uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX
+mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX
+a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN
+E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0
+WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD
+VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0
+Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU
+cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx
+IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN
+AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH
+YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
+6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC
+Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX
+c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a
+mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
+-----END CERTIFICATE-----
+
+AddTrust Public Services Root
+=============================
+
+-----BEGIN CERTIFICATE-----
+MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3
+b3JrMSAwHgYDVQQDExdBZGRUcnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAx
+MDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtB
+ZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIDAeBgNV
+BAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOC
+AQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV
+6tsfSlbunyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nX
+GCwwfQ56HmIexkvA/X1id9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnP
+dzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSGAa2Il+tmzV7R/9x98oTaunet3IAIx6eH
+1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAwHM+A+WD+eeSI8t0A65RF
+62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0GA1UdDgQW
+BBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUw
+AwEB/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDEL
+MAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRU
+cnVzdCBUVFAgTmV0d29yazEgMB4GA1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJv
+b3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4JNojVhaTdt02KLmuG7jD8WS6
+IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL+YPoRNWyQSW/
+iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao
+GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh
+4SINhwBk/ox9Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQm
+XiLsks3/QppEIW1cxeMiHV9HEufOX1362KqxMy3ZdvJOOjMMK7MtkAY=
+-----END CERTIFICATE-----
+
+AddTrust Qualified Certificates Root
+====================================
+
+-----BEGIN CERTIFICATE-----
+MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3
+b3JrMSMwIQYDVQQDExpBZGRUcnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1
+MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcxCzAJBgNVBAYTAlNFMRQwEgYDVQQK
+EwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIzAh
+BgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwq
+xBb/4Oxx64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G
+87B4pfYOQnrjfxvM0PC3KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i
+2O+tCBGaKZnhqkRFmhJePp1tUvznoD1oL/BLcHwTOK28FSXx1s6rosAx1i+f4P8U
+WfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GRwVY18BTcZTYJbqukB8c1
+0cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HUMIHRMB0G
+A1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6Fr
+pGkwZzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQL
+ExRBZGRUcnVzdCBUVFAgTmV0d29yazEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlm
+aWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBABmrder4i2VhlRO6aQTv
+hsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxGGuoYQ992zPlm
+hpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X
+dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3
+P6CxB9bpT9zeRXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9Y
+iQBCYz95OdBEsIJuQRno3eDBiFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5no
+xqE=
+-----END CERTIFICATE-----
+
+Entrust Root Certification Authority
+====================================
+
+-----BEGIN CERTIFICATE-----
+MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0
+Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW
+KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl
+cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw
+NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw
+NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy
+ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV
+BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo
+Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4
+4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9
+KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI
+rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi
+94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB
+sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi
+gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo
+kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE
+vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
+A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t
+O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua
+AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP
+9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/
+eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
+0vdXcDazv/wor3ElhVsT/h5/WrQ8
+-----END CERTIFICATE-----
+
+GeoTrust Global CA
+==================
+
+-----BEGIN CERTIFICATE-----
+MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i
+YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG
+EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg
+R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9
+9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq
+fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv
+iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU
+1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+
+bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW
+MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA
+ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l
+uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn
+Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS
+tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF
+PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un
+hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV
+5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw==
+-----END CERTIFICATE-----
+
+GeoTrust Global CA 2
+====================
+
+-----BEGIN CERTIFICATE-----
+MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFs
+IENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQG
+EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3Qg
+R2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDvPE1A
+PRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/NTL8
+Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hL
+TytCOb1kLUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL
+5mkWRxHCJ1kDs6ZgwiFAVvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7
+S4wMcoKK+xfNAGw6EzywhIdLFnopsk/bHdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe
+2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE
+FHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNHK266ZUap
+EBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6td
+EPx7srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv
+/NgdRN3ggX+d6YvhZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywN
+A0ZF66D0f0hExghAzN4bcLUprbqLOzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0
+abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkCx1YAzUm5s2x7UwQa4qjJqhIF
+I8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqFH4z1Ir+rzoPz
+4iIprn2DQKi6bA==
+-----END CERTIFICATE-----
+
+GeoTrust Universal CA
+=====================
+
+-----BEGIN CERTIFICATE-----
+MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy
+c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE
+BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0
+IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV
+VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8
+cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT
+QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh
+F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v
+c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w
+mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd
+VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX
+teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ
+f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe
+Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+
+nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB
+/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY
+MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG
+9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc
+aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX
+IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn
+ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z
+uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN
+Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja
+QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW
+koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9
+ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt
+DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm
+bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw=
+-----END CERTIFICATE-----
+
+GeoTrust Universal CA 2
+=======================
+
+-----BEGIN CERTIFICATE-----
+MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy
+c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD
+VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1
+c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81
+WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG
+FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq
+XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL
+se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb
+KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd
+IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73
+y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt
+hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc
+QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4
+Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV
+HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ
+KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z
+dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ
+L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr
+Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo
+ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY
+T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz
+GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m
+1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV
+OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH
+6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX
+QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS
+-----END CERTIFICATE-----
+
+America Online Root Certification Authority 1
+=============================================
+
+-----BEGIN CERTIFICATE-----
+MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEc
+MBoGA1UEChMTQW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBP
+bmxpbmUgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2
+MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0Ft
+ZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2EgT25saW5lIFJvb3Qg
+Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lk
+hsmj76CGv2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym
+1BW32J/X3HGrfpq/m44zDyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsW
+OqMFf6Dch9Wc/HKpoH145LcxVR5lu9RhsCFg7RAycsWSJR74kEoYeEfffjA3PlAb
+2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP8c9GsEsPPt2IYriMqQko
+O3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAU
+AK3Zo/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB
+BQUAA4IBAQB8itEfGDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkF
+Zu90821fnZmv9ov761KyBZiibyrFVL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAb
+LjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft3OJvx8Fi8eNy1gTIdGcL+oir
+oQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43gKd8hdIaC2y+C
+MMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds
+sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7
+-----END CERTIFICATE-----
+
+America Online Root Certification Authority 2
+=============================================
+
+-----BEGIN CERTIFICATE-----
+MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEc
+MBoGA1UEChMTQW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBP
+bmxpbmUgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2
+MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0Ft
+ZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2EgT25saW5lIFJvb3Qg
+Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIP
+ADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC
+206B89enfHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFci
+KtZHgVdEglZTvYYUAQv8f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2
+JxhP7JsowtS013wMPgwr38oE18aO6lhOqKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9
+BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JNRvCAOVIyD+OEsnpD8l7e
+Xz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0gBe4lL8B
+PeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67
+Xnfn6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEq
+Z8A9W6Wa6897GqidFEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZ
+o2C7HK2JNDJiuEMhBnIMoVxtRsX6Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3
++L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnjB453cMor9H124HhnAgMBAAGj
+YzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3OpaaEg5+31IqEj
+FNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE
+AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmn
+xPBUlgtk87FYT15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2
+LHo1YGwRgJfMqZJS5ivmae2p+DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzccc
+obGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXgJXUjhx5c3LqdsKyzadsXg8n33gy8
+CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//ZoyzH1kUQ7rVyZ2OuMe
+IjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgOZtMA
+DjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2F
+AjgQ5ANh1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUX
+Om/9riW99XJZZLF0KjhfGEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPb
+AZO1XB4Y3WRayhgoPmMEEf0cjQAPuDffZ4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQl
+Zvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuPcX/9XhmgD0uRuMRUvAaw
+RY8mkaKO/qk=
+-----END CERTIFICATE-----
+
+Comodo AAA Services root
+========================
+
+-----BEGIN CERTIFICATE-----
+MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
+YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
+GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
+BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
+3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
+YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
+rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
+ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
+oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
+MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
+QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
+b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
+AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
+GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
+Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
+G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
+l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
+smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
+-----END CERTIFICATE-----
+
+Comodo Secure Services root
+===========================
+
+-----BEGIN CERTIFICATE-----
+MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRp
+ZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVow
+fjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAiBgNV
+BAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPM
+cm3ye5drswfxdySRXyWP9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3S
+HpR7LZQdqnXXs5jLrLxkU0C8j6ysNstcrbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996
+CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rCoznl2yY4rYsK7hljxxwk
+3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3Vp6ea5EQz
+6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNV
+HQ4EFgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
+EwEB/wQFMAMBAf8wgYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2Rv
+Y2EuY29tL1NlY3VyZUNlcnRpZmljYXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRw
+Oi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmww
+DQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm4J4oqF7Tt/Q0
+5qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj
+Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtI
+gKvcnDe4IRRLDXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJ
+aD61JlfutuC23bkpgHl9j6PwpCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDl
+izeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1HRR3B7Hzs/Sk=
+-----END CERTIFICATE-----
+
+Comodo Trusted Services root
+============================
+
+-----BEGIN CERTIFICATE-----
+MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0
+aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEwMDAwMDBaFw0yODEyMzEyMzU5NTla
+MH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO
+BgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUwIwYD
+VQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWW
+fnJSoBVC21ndZHoa0Lh73TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMt
+TGo87IvDktJTdyR0nAducPy9C1t2ul/y/9c3S0pgePfw+spwtOpZqqPOSC+pw7IL
+fhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6juljatEPmsbS9Is6FARW
+1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsSivnkBbA7
+kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0G
+A1UdDgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21v
+ZG9jYS5jb20vVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRo
+dHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMu
+Y3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8NtwuleGFTQQuS9/
+HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32
+pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxIS
+jBc/lDb+XbDABHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+
+xqFx7D+gIIxmOom0jtTYsU0lR+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/Atyjcn
+dBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O9y5Xt5hwXsjEeLBi
+-----END CERTIFICATE-----
+
+UTN DATACorp SGC Root CA
+========================
+
+-----BEGIN CERTIFICATE-----
+MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB
+kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
+dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw
+IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD
+VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu
+dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6
+E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ
+D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK
+4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq
+lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW
+bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB
+o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT
+MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js
+LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr
+BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB
+AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft
+Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj
+j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH
+KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv
+2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3
+mfnGV/TJVTl4uix5yaaIK/QI
+-----END CERTIFICATE-----
+
+UTN USERFirst Hardware Root CA
+==============================
+
+-----BEGIN CERTIFICATE-----
+MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB
+lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
+dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt
+SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG
+A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe
+MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v
+d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh
+cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn
+0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ
+M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a
+MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd
+oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI
+DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy
+oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD
+VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0
+dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy
+bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF
+BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM
+//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli
+CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE
+CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t
+3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS
+KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA==
+-----END CERTIFICATE-----
+
+XRamp Global CA Root
+====================
+
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB
+gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk
+MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY
+UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx
+NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3
+dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy
+dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6
+38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP
+KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q
+DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4
+qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa
+JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi
+PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P
+BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs
+jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0
+eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD
+ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR
+vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
+qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa
+IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy
+i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ
+O+7ETPTsJ3xCwnR8gooJybQDJbw=
+-----END CERTIFICATE-----
+
+Go Daddy Class 2 CA
+===================
+
+-----BEGIN CERTIFICATE-----
+MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
+MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
+YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
+MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
+ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
+MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
+ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
+PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
+wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
+EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
+avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
+sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
+/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
+IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
+ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
+OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
+TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
+HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
+dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
+ReYNnyicsbkqWletNw+vHX/bvZ8=
+-----END CERTIFICATE-----
+
+Starfield Class 2 CA
+====================
+
+-----BEGIN CERTIFICATE-----
+MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl
+MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp
+U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw
+NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE
+ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp
+ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3
+DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf
+8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN
++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0
+X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa
+K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA
+1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G
+A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR
+zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0
+YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD
+bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w
+DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3
+L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D
+eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
+xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp
+VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
+WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
+-----END CERTIFICATE-----
+
+StartCom Certification Authority
+================================
+
+-----BEGIN CERTIFICATE-----
+MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9
+MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
+U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
+cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
+pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
+OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
+Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
+Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
+HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
+Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
+Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
+26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
+AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
+FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j
+ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js
+LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM
+BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0
+Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy
+dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh
+cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh
+YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg
+dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp
+bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ
+YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT
+TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ
+9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8
+jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW
+FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz
+ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1
+ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L
+EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu
+L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
+yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC
+O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V
+um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh
+NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=
+-----END CERTIFICATE-----
+
+DigiCert Assured ID Root CA
+===========================
+
+-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
+JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
+mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
+VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
+AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
+AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
+pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
+dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
+fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
+NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
+H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
+-----END CERTIFICATE-----
+
+DigiCert Global Root CA
+=======================
+
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
+CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
+nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
+43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
+T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
+gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
+TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
+DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
+hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
+06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
+PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
+YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
+CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
+-----END CERTIFICATE-----
+
+DigiCert High Assurance EV Root CA
+==================================
+
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
+ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
+LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
+RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
+PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
+xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
+Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
+hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
+EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
+FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
+nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
+eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
+hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
+Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
+vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
++OkuE6N36B9K
+-----END CERTIFICATE-----
+
+GeoTrust Primary Certification Authority
+========================================
+
+-----BEGIN CERTIFICATE-----
+MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY
+MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo
+R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx
+MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK
+Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9
+AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA
+ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0
+7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W
+kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI
+mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ
+KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1
+6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl
+4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K
+oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj
+UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU
+AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk=
+-----END CERTIFICATE-----
+
+COMODO Certification Authority
+==============================
+
+-----BEGIN CERTIFICATE-----
+MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB
+gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV
+BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw
+MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl
+YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P
+RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3
+UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI
+2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8
+Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp
++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+
+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O
+nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW
+/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g
+PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u
+QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY
+SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv
+IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
+RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4
+zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd
+BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
+ZQ==
+-----END CERTIFICATE-----
+
+Network Solutions Certificate Authority
+=======================================
+
+-----BEGIN CERTIFICATE-----
+MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi
+MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu
+MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp
+dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV
+UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO
+ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz
+c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP
+OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl
+mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF
+BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4
+qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw
+gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB
+BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu
+bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp
+dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8
+6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/
+h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH
+/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv
+wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN
+pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey
+-----END CERTIFICATE-----
+
+COMODO ECC Certification Authority
+==================================
+
+-----BEGIN CERTIFICATE-----
+MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT
+IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw
+MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy
+ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N
+T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR
+FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J
+cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW
+BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm
+fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
+GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
+-----END CERTIFICATE-----
+
+TC TrustCenter Class 2 CA II
+============================
+
+-----BEGIN CERTIFICATE-----
+MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjEL
+MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV
+BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0
+Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYwMTEyMTQzODQzWhcNMjUxMjMxMjI1
+OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i
+SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UEAxMc
+VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jf
+tMjWQ+nEdVl//OEd+DFwIxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKg
+uNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2J
+XjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQXa7pIXSSTYtZgo+U4+lK
+8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7uSNQZu+99
+5OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3
+kUrL84J6E1wIqzCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy
+dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6
+Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz
+JTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290
+Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u
+TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iS
+GNn3Bzn1LL4GdXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprt
+ZjluS5TmVfwLG4t3wVMTZonZKNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8
+au0WOB9/WIFaGusyiC2y8zl3gK9etmF1KdsjTYjKUCjLhdLTEKJZbtOTVAB6okaV
+hgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kPJOzHdiEoZa5X6AeI
+dUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfkvQ==
+-----END CERTIFICATE-----
+
+TC TrustCenter Class 3 CA II
+============================
+
+-----BEGIN CERTIFICATE-----
+MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjEL
+MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV
+BAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0
+Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYwMTEyMTQ0MTU3WhcNMjUxMjMxMjI1
+OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i
+SDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UEAxMc
+VEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJW
+Ht4bNwcwIi9v8Qbxq63WyKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+Q
+Vl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo6SI7dYnWRBpl8huXJh0obazovVkdKyT2
+1oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZuV3bOx4a+9P/FRQI2Alq
+ukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk2ZyqBwi1
+Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NX
+XAek0CSnwPIA1DCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy
+dXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6
+Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz
+JTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290
+Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u
+TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlN
+irTzwppVMXzEO2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8
+TtXqluJucsG7Kv5sbviRmEb8yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6
+g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9IJqDnxrcOfHFcqMRA/07QlIp2+gB
+95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal092Y+tTmBvTwtiBj
+S+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc5A==
+-----END CERTIFICATE-----
+
+TC TrustCenter Universal CA I
+=============================
+
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTEL
+MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV
+BAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1
+c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcNMDYwMzIyMTU1NDI4WhcNMjUxMjMx
+MjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIg
+R21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYwJAYD
+VQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSR
+JJZ4Hgmgm5qVSkr1YnwCqMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3T
+fCZdzHd55yx4Oagmcw6iXSVphU9VDprvxrlE4Vc93x9UIuVvZaozhDrzznq+VZeu
+jRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtwag+1m7Z3W0hZneTvWq3z
+wZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9OgdwZu5GQ
+fezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYD
+VR0jBBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0G
+CSqGSIb3DQEBBQUAA4IBAQAo0uCG1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X1
+7caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/CyvwbZ71q+s2IhtNerNXxTPqYn
+8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3ghUJGooWMNjs
+ydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT
+ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/
+2TYcuiUaUj0a7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY
+-----END CERTIFICATE-----
+
+Cybertrust Global Root
+======================
+
+-----BEGIN CERTIFICATE-----
+MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG
+A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh
+bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE
+ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS
+b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5
+7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS
+J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y
+HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP
+t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz
+FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY
+XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/
+MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw
+hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js
+MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA
+A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj
+Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx
+XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o
+omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc
+A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW
+WL1WMRJOEcgh4LMRkWXbtKaIOM5V
+-----END CERTIFICATE-----
+
+GeoTrust Primary Certification Authority - G3
+=============================================
+
+-----BEGIN CERTIFICATE-----
+MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB
+mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT
+MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s
+eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv
+cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ
+BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg
+MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0
+BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz
++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm
+hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn
+5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W
+JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL
+DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC
+huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
+HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB
+AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB
+zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN
+kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD
+AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH
+SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G
+spki4cErx5z481+oghLrGREt
+-----END CERTIFICATE-----
+
+thawte Primary Root CA - G2
+===========================
+
+-----BEGIN CERTIFICATE-----
+MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp
+IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi
+BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw
+MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh
+d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig
+YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v
+dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/
+BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6
+papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K
+DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3
+KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox
+XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg==
+-----END CERTIFICATE-----
+
+thawte Primary Root CA - G3
+===========================
+
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB
+rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV
+BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa
+Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl
+LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u
+MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl
+ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm
+gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8
+YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf
+b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9
+9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S
+zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk
+OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
+HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA
+2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW
+oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu
+t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c
+KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM
+m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu
+MdRAGmI0Nj81Aa6sY6A=
+-----END CERTIFICATE-----
+
+GeoTrust Primary Certification Authority - G2
+=============================================
+
+-----BEGIN CERTIFICATE-----
+MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL
+MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj
+KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2
+MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
+eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV
+BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw
+NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV
+BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH
+MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL
+So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal
+tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG
+CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT
+qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz
+rD6ogRLQy7rQkgu2npaqBA+K
+-----END CERTIFICATE-----
+
+VeriSign Universal Root Certification Authority
+===============================================
+
+-----BEGIN CERTIFICATE-----
+MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB
+vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W
+ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX
+MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0
+IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y
+IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh
+bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF
+9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH
+H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H
+LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN
+/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT
+rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw
+WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs
+exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud
+DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4
+sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+
+seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz
+4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+
+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR
+lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3
+7M2CYfE45k+XmCpajQ==
+-----END CERTIFICATE-----
+
+VeriSign Class 3 Public Primary Certification Authority - G4
+============================================================
+
+-----BEGIN CERTIFICATE-----
+MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG
+A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp
+U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg
+SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln
+biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm
+GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve
+fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ
+aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj
+aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW
+kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC
+4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga
+FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA==
+-----END CERTIFICATE-----
+
+GlobalSign Root CA - R3
+=======================
+
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
+MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
+RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
+gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
+KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
+QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
+XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
+LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
+RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
+jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
+6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
+mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
+Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
+WD9f
+-----END CERTIFICATE-----
+
+TC TrustCenter Universal CA III
+===============================
+
+-----BEGIN CERTIFICATE-----
+MIID4TCCAsmgAwIBAgIOYyUAAQACFI0zFQLkbPQwDQYJKoZIhvcNAQEFBQAwezEL
+MAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV
+BAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEoMCYGA1UEAxMfVEMgVHJ1
+c3RDZW50ZXIgVW5pdmVyc2FsIENBIElJSTAeFw0wOTA5MDkwODE1MjdaFw0yOTEy
+MzEyMzU5NTlaMHsxCzAJBgNVBAYTAkRFMRwwGgYDVQQKExNUQyBUcnVzdENlbnRl
+ciBHbWJIMSQwIgYDVQQLExtUQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0ExKDAm
+BgNVBAMTH1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQSBJSUkwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDC2pxisLlxErALyBpXsq6DFJmzNEubkKLF
+5+cvAqBNLaT6hdqbJYUtQCggbergvbFIgyIpRJ9Og+41URNzdNW88jBmlFPAQDYv
+DIRlzg9uwliT6CwLOunBjvvya8o84pxOjuT5fdMnnxvVZ3iHLX8LR7PH6MlIfK8v
+zArZQe+f/prhsq75U7Xl6UafYOPfjdN/+5Z+s7Vy+EutCHnNaYlAJ/Uqwa1D7KRT
+yGG299J5KmcYdkhtWyUB0SbFt1dpIxVbYYqt8Bst2a9c8SaQaanVDED1M4BDj5yj
+dipFtK+/fz6HP3bFzSreIMUWWMv5G/UPyw0RUmS40nZid4PxWJ//AgMBAAGjYzBh
+MB8GA1UdIwQYMBaAFFbn4VslQ4Dg9ozhcbyO5YAvxEjiMA8GA1UdEwEB/wQFMAMB
+Af8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRW5+FbJUOA4PaM4XG8juWAL8RI
+4jANBgkqhkiG9w0BAQUFAAOCAQEAg8ev6n9NCjw5sWi+e22JLumzCecYV42Fmhfz
+dkJQEw/HkG8zrcVJYCtsSVgZ1OK+t7+rSbyUyKu+KGwWaODIl0YgoGhnYIg5IFHY
+aAERzqf2EQf27OysGh+yZm5WZ2B6dF7AbZc2rrUNXWZzwCUyRdhKBgePxLcHsU0G
+DeGl6/R1yrqc0L2z0zIkTO5+4nYES0lT2PLpVDP85XEfPRRclkvxOvIAu2y0+pZV
+CIgJwcyRGSmwIC3/yzikQOEXvnlhgP8HA4ZMTnsGnxGGjYnuJ8Tb4rwZjgvDwxPH
+LQNjO9Po5KIqwoIIlBZU8O8fJ5AluA0OKBtHd0e9HKgl8ZS0Zg==
+-----END CERTIFICATE-----
+
+Go Daddy Root Certificate Authority - G2
+========================================
+
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
+EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
+ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
+NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
+EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
+AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
+E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
+/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
+DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
+GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
+tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
+AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
+WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
+9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
+gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
+2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
+LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
+4uJEvlz36hz1
+-----END CERTIFICATE-----
+
+Starfield Root Certificate Authority - G2
+=========================================
+
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
+ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
+MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
+b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
+aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
+Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
+nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
+HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
+Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
+dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
+HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
+CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
+sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
+4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
+8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
+pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
+mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
+-----END CERTIFICATE-----
+
+Starfield Services Root Certificate Authority - G2
+==================================================
+
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
+ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
+MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
+VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
+ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
+dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
+OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
+8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
+Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
+hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
+6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
+AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
+bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
+ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
+qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
+iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
+0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
+sSi6
+-----END CERTIFICATE-----
+
+AffirmTrust Commercial
+======================
+
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
+Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
+ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
+MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
+yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
+VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
+nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
+XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
+vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
+Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
+N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
+nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
+-----END CERTIFICATE-----
+
+AffirmTrust Networking
+======================
+
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
+YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
+kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
+QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
+6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
+yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
+QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
+tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
+QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
+Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
+olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
+x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
+-----END CERTIFICATE-----
+
+AffirmTrust Premium
+===================
+
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
+dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
+A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
+cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
+qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
+JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
+s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
+HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
+70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
+V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
+qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
+5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
+C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
+OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
+FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
+KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
+Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
+8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
+MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
+0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
+u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
+u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
+YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
+GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
+RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
+KeC2uAloGRwYQw==
+-----END CERTIFICATE-----
+
+AffirmTrust Premium ECC
+=======================
+
+-----BEGIN CERTIFICATE-----
+MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
+VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
+cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
+BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
+VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
+0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
+ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
+A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
+aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
+flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
+-----END CERTIFICATE-----
+
+StartCom Certification Authority
+================================
+
+-----BEGIN CERTIFICATE-----
+MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
+Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9
+MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
+U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
+cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
+pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
+OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
+Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
+Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
+HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
+Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
+Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
+26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
+AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul
+F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC
+ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w
+ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk
+aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0
+YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg
+c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93
+d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG
+CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1
+dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF
+wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS
+Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst
+0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc
+pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl
+CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF
+P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK
+1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm
+KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE
+JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ
+8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm
+fyWl8kgAwKQB2j8=
+-----END CERTIFICATE-----
+
+StartCom Certification Authority G2
+===================================
+
+-----BEGIN CERTIFICATE-----
+MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW
+MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1
+OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG
+A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ
+JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD
+vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo
+D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/
+Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW
+RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK
+HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN
+nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM
+0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i
+UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9
+Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg
+TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE
+AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL
+BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K
+2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX
+UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl
+6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK
+9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ
+HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI
+wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY
+XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l
+IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo
+hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr
+so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI
+-----END CERTIFICATE-----
diff --git a/boto/cloudsearch/__init__.py b/boto/cloudsearch/__init__.py
index 9c8157a1..5ba1060e 100644
--- a/boto/cloudsearch/__init__.py
+++ b/boto/cloudsearch/__init__.py
@@ -35,6 +35,9 @@ def regions():
return [RegionInfo(name='us-east-1',
endpoint='cloudsearch.us-east-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='eu-west-1',
+ endpoint='cloudsearch.eu-west-1.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
]
diff --git a/boto/cloudsearch/document.py b/boto/cloudsearch/document.py
index 7807c08f..c799d707 100644
--- a/boto/cloudsearch/document.py
+++ b/boto/cloudsearch/document.py
@@ -117,7 +117,7 @@ class DocumentServiceConnection(object):
"""
Schedule a document to be removed from the CloudSearch service
- The document will not actually be scheduled for removal until .commit() is called
+ The document will not actually be scheduled for removal until :func:`commit` is called
:type _id: string
:param _id: The unique ID of this document.
@@ -145,7 +145,7 @@ class DocumentServiceConnection(object):
"""
Clear the working documents from this DocumentServiceConnection
- This should be used after a commit() if the connection will be reused
+ This should be used after :func:`commit` if the connection will be reused
for another set of documents.
"""
@@ -187,15 +187,19 @@ class DocumentServiceConnection(object):
url = "http://%s/2011-02-01/documents/batch" % (self.endpoint)
- request_config = {
- 'pool_connections': 20,
- 'keep_alive': True,
- 'max_retries': 5,
- 'pool_maxsize': 50
- }
-
- r = requests.post(url, data=sdf, config=request_config,
- headers={'Content-Type': 'application/json'})
+ # Keep-alive is automatic in a post-1.0 requests world.
+ session = requests.Session()
+ adapter = requests.adapters.HTTPAdapter(
+ pool_connections=20,
+ pool_maxsize=50
+ )
+ # Now kludge in the right number of retries.
+ # Once we're requiring ``requests>=1.2.1``, this can become an
+ # initialization parameter above.
+ adapter.max_retries = 5
+ session.mount('http://', adapter)
+ session.mount('https://', adapter)
+ r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})
return CommitResponse(r, self, sdf)
@@ -206,12 +210,14 @@ class CommitResponse(object):
:type response: :class:`requests.models.Response`
:param response: Response from Cloudsearch /documents/batch API
- :type doc_service: :class:`exfm.cloudsearch.DocumentServiceConnection`
+ :type doc_service: :class:`boto.cloudsearch.document.DocumentServiceConnection`
:param doc_service: Object containing the documents posted and methods to
retry
:raises: :class:`boto.exception.BotoServerError`
- :raises: :class:`exfm.cloudsearch.SearchServiceException`
+ :raises: :class:`boto.cloudsearch.document.SearchServiceException`
+ :raises: :class:`boto.cloudsearch.document.EncodingError`
+ :raises: :class:`boto.cloudsearch.document.ContentTooLongError`
"""
def __init__(self, response, doc_service, sdf):
self.response = response
@@ -252,7 +258,7 @@ class CommitResponse(object):
:type response_num: int
:param response_num: Number of adds or deletes in the response.
- :raises: :class:`exfm.cloudsearch.SearchServiceException`
+ :raises: :class:`boto.cloudsearch.document.CommitMismatchError`
"""
commit_num = len([d for d in self.doc_service.documents_batch
if d['type'] == type_])
diff --git a/boto/cloudsearch/layer1.py b/boto/cloudsearch/layer1.py
index 054fc323..ff712932 100644
--- a/boto/cloudsearch/layer1.py
+++ b/boto/cloudsearch/layer1.py
@@ -82,7 +82,7 @@ class Layer1(AWSQueryConnection):
if not inner:
return None if list_marker == None else []
if isinstance(inner, list):
- return [dict(**i) for i in inner]
+ return inner
else:
return dict(**inner)
else:
diff --git a/boto/cloudsearch/search.py b/boto/cloudsearch/search.py
index e77fe1c3..813f4a40 100644
--- a/boto/cloudsearch/search.py
+++ b/boto/cloudsearch/search.py
@@ -23,8 +23,8 @@
#
from math import ceil
import time
-import json
import boto
+from boto.compat import json
import requests
diff --git a/boto/connection.py b/boto/connection.py
index 15ee4614..4d214c0f 100644
--- a/boto/connection.py
+++ b/boto/connection.py
@@ -523,9 +523,11 @@ class AWSAuthConnection(object):
# timeouts will only be applied if Python is 2.6 or greater.
self.http_connection_kwargs = {}
if (sys.version_info[0], sys.version_info[1]) >= (2, 6):
- if config.has_option('Boto', 'http_socket_timeout'):
- timeout = config.getint('Boto', 'http_socket_timeout')
- self.http_connection_kwargs['timeout'] = timeout
+ # If timeout isn't defined in boto config file, use 70 second
+ # default as recommended by
+ # http://docs.aws.amazon.com/amazonswf/latest/apireference/API_PollForActivityTask.html
+ self.http_connection_kwargs['timeout'] = config.getint(
+ 'Boto', 'http_socket_timeout', 70)
if isinstance(provider, Provider):
# Allow overriding Provider
@@ -730,6 +732,8 @@ class AWSAuthConnection(object):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.proxy, int(self.proxy_port)))
+ if "timeout" in self.http_connection_kwargs:
+ sock.settimeout(self.http_connection_kwargs["timeout"])
except:
raise
boto.log.debug("Proxy connection: CONNECT %s HTTP/1.0\r\n", host)
@@ -811,6 +815,7 @@ class AWSAuthConnection(object):
boto.log.debug('Data: %s' % request.body)
boto.log.debug('Headers: %s' % request.headers)
boto.log.debug('Host: %s' % request.host)
+ boto.log.debug('Params: %s' % request.params)
response = None
body = None
e = None
diff --git a/boto/core/credentials.py b/boto/core/credentials.py
index 1f315a32..b4b35b53 100644
--- a/boto/core/credentials.py
+++ b/boto/core/credentials.py
@@ -23,8 +23,8 @@
#
import os
from six.moves import configparser
+from boto.compat import json
import requests
-import json
class Credentials(object):
diff --git a/boto/datapipeline/layer1.py b/boto/datapipeline/layer1.py
index 9dc87ecb..1c9a789c 100644
--- a/boto/datapipeline/layer1.py
+++ b/boto/datapipeline/layer1.py
@@ -20,8 +20,8 @@
# IN THE SOFTWARE.
#
-import json
import boto
+from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
diff --git a/boto/dynamodb/__init__.py b/boto/dynamodb/__init__.py
index 4290e7f0..12204361 100644
--- a/boto/dynamodb/__init__.py
+++ b/boto/dynamodb/__init__.py
@@ -53,6 +53,9 @@ def regions():
RegionInfo(name='eu-west-1',
endpoint='dynamodb.eu-west-1.amazonaws.com',
connection_cls=boto.dynamodb.layer2.Layer2),
+ RegionInfo(name='sa-east-1',
+ endpoint='dynamodb.sa-east-1.amazonaws.com',
+ connection_cls=boto.dynamodb.layer2.Layer2),
]
diff --git a/boto/dynamodb/item.py b/boto/dynamodb/item.py
index 4d4abda3..b2b444d7 100644
--- a/boto/dynamodb/item.py
+++ b/boto/dynamodb/item.py
@@ -194,3 +194,9 @@ class Item(dict):
if self._updates is not None:
self.delete_attribute(key)
dict.__delitem__(self, key)
+
+ # Allow this item to still be pickled
+ def __getstate__(self):
+ return self.__dict__
+ def __setstate__(self, d):
+ self.__dict__.update(d)
diff --git a/boto/dynamodb/layer2.py b/boto/dynamodb/layer2.py
index 26e51ede..ec8cc51d 100644
--- a/boto/dynamodb/layer2.py
+++ b/boto/dynamodb/layer2.py
@@ -29,7 +29,7 @@ from boto.dynamodb.types import get_dynamodb_type, Dynamizer, \
LossyFloatDynamizer
-class TableGenerator:
+class TableGenerator(object):
"""
This is an object that wraps up the table_generator function.
The only real reason to have this is that we want to be able
@@ -99,7 +99,7 @@ class TableGenerator:
"""
# preserve any existing limit in case the user alters self.remaining
limit = self.kwargs.get('limit')
- if self.remaining > 0 and limit is None or limit > self.remaining:
+ if (self.remaining > 0 and (limit is None or limit > self.remaining)):
self.kwargs['limit'] = self.remaining
self._response = self.callable(**self.kwargs)
self.kwargs['limit'] = limit
diff --git a/boto/dynamodb/table.py b/boto/dynamodb/table.py
index 0e11f8d5..b10ce04f 100644
--- a/boto/dynamodb/table.py
+++ b/boto/dynamodb/table.py
@@ -335,7 +335,7 @@ class Table(object):
the hash_key and range_key values of the item. You can use
these explicit parameters when calling the method, such as::
- >>> my_item = my_table.new_item(hash_key='a', range_key=1,
+ >>> my_item = my_table.new_item(hash_key='a', range_key=1,
attrs={'key1': 'val1', 'key2': 'val2'})
>>> my_item
{u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'}
diff --git a/boto/dynamodb2/__init__.py b/boto/dynamodb2/__init__.py
new file mode 100644
index 00000000..8cdfcace
--- /dev/null
+++ b/boto/dynamodb2/__init__.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.regioninfo import RegionInfo
+
+
+def regions():
+ """
+ Get all available regions for the Amazon DynamoDB service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.dynamodb2.layer1 import DynamoDBConnection
+ return [RegionInfo(name='us-east-1',
+ endpoint='dynamodb.us-east-1.amazonaws.com',
+ connection_cls=DynamoDBConnection),
+ RegionInfo(name='us-west-1',
+ endpoint='dynamodb.us-west-1.amazonaws.com',
+ connection_cls=DynamoDBConnection),
+ RegionInfo(name='us-west-2',
+ endpoint='dynamodb.us-west-2.amazonaws.com',
+ connection_cls=DynamoDBConnection),
+ RegionInfo(name='eu-west-1',
+ endpoint='dynamodb.eu-west-1.amazonaws.com',
+ connection_cls=DynamoDBConnection),
+ RegionInfo(name='ap-northeast-1',
+ endpoint='dynamodb.ap-northeast-1.amazonaws.com',
+ connection_cls=DynamoDBConnection),
+ RegionInfo(name='ap-southeast-1',
+ endpoint='dynamodb.ap-southeast-1.amazonaws.com',
+ connection_cls=DynamoDBConnection),
+ RegionInfo(name='sa-east-1',
+ endpoint='dynamodb.sa-east-1.amazonaws.com',
+ connection_cls=DynamoDBConnection),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
diff --git a/boto/dynamodb2/exceptions.py b/boto/dynamodb2/exceptions.py
new file mode 100644
index 00000000..9821e451
--- /dev/null
+++ b/boto/dynamodb2/exceptions.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.exception import JSONResponseError
+
+
+class ProvisionedThroughputExceededException(JSONResponseError):
+ pass
+
+
+class LimitExceededException(JSONResponseError):
+ pass
+
+
+class ConditionalCheckFailedException(JSONResponseError):
+ pass
+
+
+class ResourceInUseException(JSONResponseError):
+ pass
+
+
+class ResourceNotFoundException(JSONResponseError):
+ pass
+
+
+class InternalServerError(JSONResponseError):
+ pass
+
+
+class ItemCollectionSizeLimitExceededException(JSONResponseError):
+ pass
diff --git a/boto/dynamodb2/layer1.py b/boto/dynamodb2/layer1.py
new file mode 100644
index 00000000..01f58045
--- /dev/null
+++ b/boto/dynamodb2/layer1.py
@@ -0,0 +1,1407 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import json
+import boto
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.dynamodb2 import exceptions
+
+
+class DynamoDBConnection(AWSQueryConnection):
+ """
+ Amazon DynamoDB **Overview**
+ This is the Amazon DynamoDB API Reference. This guide provides
+ descriptions and samples of the Amazon DynamoDB API.
+ """
+ APIVersion = "2012-08-10"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "dynamodb.us-east-1.amazonaws.com"
+ ServiceName = "DynamoDB"
+ TargetPrefix = "DynamoDB_20120810"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
+ "LimitExceededException": exceptions.LimitExceededException,
+ "ConditionalCheckFailedException": exceptions.ConditionalCheckFailedException,
+ "ResourceInUseException": exceptions.ResourceInUseException,
+ "ResourceNotFoundException": exceptions.ResourceNotFoundException,
+ "InternalServerError": exceptions.InternalServerError,
+ "ItemCollectionSizeLimitExceededException": exceptions.ItemCollectionSizeLimitExceededException,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ kwargs['host'] = region.endpoint
+ AWSQueryConnection.__init__(self, **kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def batch_get_item(self, request_items, return_consumed_capacity=None):
+ """
+ The BatchGetItem operation returns the attributes for multiple
+ items from multiple tables using their primary keys. The
+ maximum number of items that can be retrieved for a single
+ operation is 100. Also, the number of items retrieved is
+ constrained by a 1 MB size limit. If the response size limit
+ is exceeded or a partial result is returned because the
+ tables provisioned throughput is exceeded, or because of an
+ internal processing failure, Amazon DynamoDB returns an
+ UnprocessedKeys value so you can retry the operation starting
+ with the next item to get. Amazon DynamoDB automatically
+ adjusts the number of items returned per page to enforce this
+ limit. For example, even if you ask to retrieve 100 items, but
+ each individual item is 50 KB in size, the system returns 20
+ items and an appropriate UnprocessedKeys value so you can get
+ the next page of results. If desired, your application can
+ include its own logic to assemble the pages of results into
+ one set.
+
+ If no items could be processed because of insufficient
+ provisioned throughput on each of the tables involved in the
+ request, Amazon DynamoDB returns a
+ ProvisionedThroughputExceededException .
+
+ By default, BatchGetItem performs eventually consistent reads
+ on every table in the request. You can set ConsistentRead to
+ `True`, on a per-table basis, if you want consistent reads
+ instead.
+
+ BatchGetItem fetches items in parallel to minimize response
+ latencies.
+
+ When designing your application, keep in mind that Amazon
+ DynamoDB does not guarantee how attributes are ordered in the
+ returned response. Include the primary key values in the
+ AttributesToGet for the items in your request to help parse
+ the response by item.
+
+ If the requested items do not exist, nothing is returned in
+ the response for those items. Requests for non-existent items
+ consume the minimum read capacity units according to the type
+ of read. For more information, see `Capacity Units
+ Calculations`_ of the Amazon DynamoDB Developer Guide .
+
+ :type request_items: map
+ :param request_items:
+ A map of one or more table names and, for each table, the corresponding
+ primary keys for the items to retrieve. While requesting items,
+ each table name can be invoked only once per operation.
+
+ Each KeysAndAttributes element consists of:
+
+
+ + Keys -An array of primary key attribute values that define the items
+ and the attributes associated with the items.
+ + AttributesToGet -One or more attributes to retrieve from the table or
+ index. If AttributesToGet is not specified, then all attributes
+ will be returned. If any of the specified attributes are not found,
+ they will not appear in the result.
+ + ConsistentRead -The consistency of a read operation. If set to
+ `True`, then a strongly consistent read is used; otherwise, an
+ eventually consistent read is used.
+
+ :type return_consumed_capacity: string
+ :param return_consumed_capacity:
+
+ """
+ params = {'RequestItems': request_items, }
+ if return_consumed_capacity is not None:
+ params['ReturnConsumedCapacity'] = return_consumed_capacity
+ return self.make_request(action='BatchGetItem',
+ body=json.dumps(params))
+
+ def batch_write_item(self, request_items, return_consumed_capacity=None,
+ return_item_collection_metrics=None):
+ """
+ This operation enables you to put or delete several items
+ across multiple tables in a single API call.
+
+ To upload one item, you can use the PutItem API and to delete
+ one item, you can use the DeleteItem API. However, when you
+ want to upload or delete large amounts of data, such as
+ uploading large amounts of data from Amazon Elastic MapReduce
+ (EMR) or migrate data from another database into Amazon
+ DynamoDB, this API offers an efficient alternative.
+
+ If you use a programming language that supports concurrency,
+ such as Java, you can use threads to upload items in parallel.
+ This adds complexity in your application to handle the
+ threads. With other languages that don't support threading,
+ such as PHP, you must upload or delete items one at a time. In
+ both situations, the BatchWriteItem API provides an
+ alternative where the API performs the specified put and
+ delete operations in parallel, giving you the power of the
+ thread pool approach without having to introduce complexity in
+ your application.
+
+ Note that each individual put and delete specified in a
+ BatchWriteItem operation costs the same in terms of consumed
+ capacity units, however, the API performs the specified
+ operations in parallel giving you lower latency. Delete
+ operations on non-existent items consume 1 write capacity
+ unit.
+
+ When using this API, note the following limitations:
+
+
+ + Maximum operations in a single request- You can specify a
+ total of up to 25 put or delete operations; however, the total
+ request size cannot exceed 1 MB (the HTTP payload).
+ + You can use the BatchWriteItem operation only to put and
+ delete items. You cannot use it to update existing items.
+ + Not an atomic operation- The individual PutItem and
+ DeleteItem operations specified in BatchWriteItem are atomic;
+ however BatchWriteItem as a whole is a "best-effort" operation
+ and not an atomic operation. That is, in a BatchWriteItem
+ request, some operations might succeed and others might fail.
+ The failed operations are returned in UnprocessedItems in the
+ response. Some of these failures might be because you exceeded
+ the provisioned throughput configured for the table or a
+ transient failure such as a network error. You can investigate
+ and optionally resend the requests. Typically, you call
+ BatchWriteItem in a loop and in each iteration check for
+ unprocessed items, and submit a new BatchWriteItem request
+ with those unprocessed items.
+ + Does not return any items- The BatchWriteItem is designed
+ for uploading large amounts of data efficiently. It does not
+ provide some of the sophistication offered by APIs such as
+ PutItem and DeleteItem . For example, the DeleteItem API
+ supports ReturnValues in the request body to request the
+ deleted item in the response. The BatchWriteItem operation
+ does not return any items in the response.
+ + Unlike the PutItem and DeleteItem APIs, BatchWriteItem does
+ not allow you to specify conditions on individual write
+ requests in the operation.
+ + Attribute values must not be null; string and binary type
+ attributes must have lengths greater than zero; and set type
+ attributes must not be empty. Requests that have empty values
+ will be rejected with a ValidationException .
+
+
+ Amazon DynamoDB rejects the entire batch write operation if
+ any one of the following is true:
+
+ + If one or more tables specified in the BatchWriteItem
+ request does not exist.
+ + If primary key attributes specified on an item in the
+ request does not match the corresponding table's primary key
+ schema.
+ + If you try to perform multiple operations on the same item
+ in the same BatchWriteItem request. For example, you cannot
+ put and delete the same item in the same BatchWriteItem
+ request.
+ + If the total request size exceeds the 1 MB request size (the
+ HTTP payload) limit.
+ + If any individual item in a batch exceeds the 64 KB item
+ size limit.
+
+ :type request_items: map
+ :param request_items:
+ A map of one or more table names and, for each table, a list of
+ operations to perform ( DeleteRequest or PutRequest ).
+
+
+ + DeleteRequest -Perform a DeleteItem operation on the specified item.
+ The item to be deleted is identified by:
+
+ + Key -A map of primary key attribute values that uniquely identify the
+ item. Each entry in this map consists of an attribute name and an
+ attribute value.
+
+ + PutRequest -Perform a PutItem operation on the specified item. The
+ item to be updated is identified by:
+
+ + Item -A map of attributes and their values. Each entry in this map
+ consists of an attribute name and an attribute value. If you
+ specify any attributes that are part of an index key, then the data
+ types for those attributes must match those of the schema in the
+ table's attribute definition.
+
+ :type return_consumed_capacity: string
+ :param return_consumed_capacity:
+
+ :type return_item_collection_metrics: string
+ :param return_item_collection_metrics: Indicates whether to return
+ statistics about item collections, if any, that were modified
+ during the operation. The default for ReturnItemCollectionMetrics
+ is `NONE`, meaning that no statistics will be returned. To obtain
+ the statistics, set ReturnItemCollectionMetrics to `SIZE`.
+
+ """
+ params = {'RequestItems': request_items, }
+ if return_consumed_capacity is not None:
+ params['ReturnConsumedCapacity'] = return_consumed_capacity
+ if return_item_collection_metrics is not None:
+ params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
+ return self.make_request(action='BatchWriteItem',
+ body=json.dumps(params))
+
+ def create_table(self, attribute_definitions, table_name, key_schema,
+ provisioned_throughput, local_secondary_indexes=None):
+ """
+ The CreateTable operation adds a new table to your account. In
+ an AWS account, table names must be unique within each region.
+ That is, you can have two tables with same name if you create
+ the tables in different regions.
+
+ CreateTable is an asynchronous operation. Upon receiving a
+ CreateTable request, Amazon DynamoDB immediately returns a
+ response with a TableStatus of `CREATING`. After the table is
+ created, Amazon DynamoDB sets the TableStatus to `ACTIVE`. You
+ can perform read and write operations only on an `ACTIVE`
+ table.
+
+ You can use the DescribeTable API to check the table status.
+
+ :type attribute_definitions: list
+ :param attribute_definitions: An array of attributes that describe the
+ key schema for the table and indexes.
+
+ :type table_name: string
+ :param table_name: The name of the table to create.
+
+ :type key_schema: list
+ :param key_schema: Specifies the attributes that make up the primary
+ key for the table. The attributes in KeySchema must also be defined
+ in the AttributeDefinitions array. For more information, see `Data
+ Model`_ of the Amazon DynamoDB Developer Guide .
+ Each KeySchemaElement in the array is composed of:
+
+
+ + AttributeName -The name of this key attribute.
+ + KeyType -Determines whether the key attribute is `HASH` or `RANGE`.
+
+
+ For more information, see `Specifying the Primary Key`_ of the Amazon
+ DynamoDB Developer Guide .
+
+ :type local_secondary_indexes: list
+ :param local_secondary_indexes:
+ One or more secondary indexes to be created on the table. Each index is
+ scoped to a given hash key value. There is a 10 gigabyte size limit
+ per hash key; otherwise, the size of a local secondary index is
+ unconstrained.
+
+ Each secondary index in the array includes the following:
+
+
+ + IndexName -The name of the secondary index. Must be unique only for
+ this table.
+ + KeySchema -Specifies the key schema for the index. The key schema
+ must begin with the same hash key attribute as the table.
+ + Projection -Specifies attributes that are copied (projected) from the
+ table into the index. These are in addition to the primary key
+ attributes and index key attributes, which are automatically
+ projected. Each attribute specification is composed of:
+
+ + ProjectionType -One of the following:
+
+ + `ALL`-All of the table attributes are projected into the index.
+ + `KEYS_ONLY`-Only the index and primary keys are projected into the
+ index.
+ + `INCLUDE`-Only the specified table attributes are projected into the
+ index. The list of projected attributes are in NonKeyAttributes .
+
+ + NonKeyAttributes -A list of one or more non-key attribute names that
+ are projected into the index.
+
+ :type provisioned_throughput: dict
+ :param provisioned_throughput:
+
+ """
+ params = {
+ 'AttributeDefinitions': attribute_definitions,
+ 'TableName': table_name,
+ 'KeySchema': key_schema,
+ 'ProvisionedThroughput': provisioned_throughput,
+ }
+ if local_secondary_indexes is not None:
+ params['LocalSecondaryIndexes'] = local_secondary_indexes
+ return self.make_request(action='CreateTable',
+ body=json.dumps(params))
+
+ def delete_item(self, table_name, key, expected=None, return_values=None,
+ return_consumed_capacity=None,
+ return_item_collection_metrics=None):
+ """
+ Deletes a single item in a table by primary key. You can
+ perform a conditional delete operation that deletes the item
+ if it exists, or if it has an expected attribute value.
+
+ In addition to deleting an item, you can also return the
+ item's attribute values in the same operation, using the
+ ReturnValues parameter.
+
+ Unless you specify conditions, the DeleteItem is an idempotent
+ operation; running it multiple times on the same item or
+ attribute does not result in an error response.
+
+ Conditional deletes are useful for only deleting items if
+ specific conditions are met. If those conditions are met,
+ Amazon DynamoDB performs the delete. Otherwise, the item is
+ not deleted.
+
+ :type table_name: string
+ :param table_name: The name of the table from which to delete the item.
+
+ :type key: map
+ :param key: A map of attribute names to AttributeValue objects,
+ representing the primary key of the item to delete.
+
+ :type expected: map
+ :param expected: A map of attribute/condition pairs. This is the
+ conditional block for the DeleteItem operation. All the conditions
+ must be met for the operation to succeed.
+ Expected allows you to provide an attribute name, and whether or not
+ Amazon DynamoDB should check to see if the attribute value already
+ exists; or if the attribute value exists and has a particular value
+ before changing it.
+
+ Each item in Expected represents an attribute name for Amazon DynamoDB
+ to check, along with the following:
+
+
+ + Value -the attribute value for Amazon DynamoDB to check.
+ + Exists -causes Amazon DynamoDB to evaluate the value before
+ attempting a conditional operation:
+
+ + If Exists is `True`, Amazon DynamoDB will check to see if that
+ attribute value already exists in the table. If it is found, then
+ the operation succeeds. If it is not found, the operation fails
+ with a ConditionalCheckFailedException .
+ + If Exists is `False`, Amazon DynamoDB assumes that the attribute
+ value does not exist in the table. If in fact the value does not
+ exist, then the assumption is valid and the operation succeeds. If
+ the value is found, despite the assumption that it does not exist,
+ the operation fails with a ConditionalCheckFailedException .
+ The default setting for Exists is `True`. If you supply a Value all by
+ itself, Amazon DynamoDB assumes the attribute exists: You don't
+ have to set Exists to `True`, because it is implied. Amazon
+ DynamoDB returns a ValidationException if:
+
+ + Exists is `True` but there is no Value to check. (You expect a value
+ to exist, but don't specify what that value is.)
+ + Exists is `False` but you also specify a Value . (You cannot expect
+ an attribute to have a value, while also expecting it not to
+ exist.)
+
+
+
+ If you specify more than one condition for Exists , then all of the
+ conditions must evaluate to true. (In other words, the conditions
+ are ANDed together.) Otherwise, the conditional operation will
+ fail.
+
+ :type return_values: string
+ :param return_values:
+ Use ReturnValues if you want to get the item attributes as they
+ appeared before they were deleted. For DeleteItem , the valid
+ values are:
+
+
+ + `NONE`-(default) If ReturnValues is not specified, or if its value is
+ `NONE`, then nothing is returned.
+ + `ALL_OLD`-The content of the old item is returned.
+
+ :type return_consumed_capacity: string
+ :param return_consumed_capacity:
+
+ :type return_item_collection_metrics: string
+ :param return_item_collection_metrics: Indicates whether to return
+ statistics about item collections, if any, that were modified
+ during the operation. The default for ReturnItemCollectionMetrics
+ is `NONE`, meaning that no statistics will be returned. To obtain
+ the statistics, set ReturnItemCollectionMetrics to `SIZE`.
+
+ """
+ params = {'TableName': table_name, 'Key': key, }
+ if expected is not None:
+ params['Expected'] = expected
+ if return_values is not None:
+ params['ReturnValues'] = return_values
+ if return_consumed_capacity is not None:
+ params['ReturnConsumedCapacity'] = return_consumed_capacity
+ if return_item_collection_metrics is not None:
+ params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
+ return self.make_request(action='DeleteItem',
+ body=json.dumps(params))
+
+ def delete_table(self, table_name):
+ """
+ The DeleteTable operation deletes a table and all of its
+ items. After a DeleteTable request, the specified table is in
+ the `DELETING` state until Amazon DynamoDB completes the
+ deletion. If the table is in the `ACTIVE` state, you can
+ delete it. If a table is in `CREATING` or `UPDATING` states,
+ then Amazon DynamoDB returns a ResourceInUseException . If the
+ specified table does not exist, Amazon DynamoDB returns a
+ ResourceNotFoundException . If table is already in the
+ `DELETING` state, no error is returned.
+
+ Amazon DynamoDB might continue to accept data read and write
+ operations, such as GetItem and PutItem , on a table in the
+ `DELETING` state until the table deletion is complete.
+
+ Tables are unique among those associated with the AWS Account
+ issuing the request, and the AWS region that receives the
+ request (such as dynamodb.us-east-1.amazonaws.com). Each
+ Amazon DynamoDB endpoint is entirely independent. For example,
+ if you have two tables called "MyTable," one in dynamodb.us-
+ east-1.amazonaws.com and one in dynamodb.us-
+ west-1.amazonaws.com, they are completely independent and do
+ not share any data; deleting one does not delete the other.
+
+ Use the DescribeTable API to check the status of the table.
+
+ :type table_name: string
+ :param table_name: The name of the table to delete.
+
+ """
+ params = {'TableName': table_name, }
+ return self.make_request(action='DeleteTable',
+ body=json.dumps(params))
+
+ def describe_table(self, table_name):
+ """
+ Returns information about the table, including the current
+ status of the table, when it was created, the primary key
+ schema,and any indexes on the table.
+
+ :type table_name: string
+ :param table_name: The name of the table to describe.
+
+ """
+ params = {'TableName': table_name, }
+ return self.make_request(action='DescribeTable',
+ body=json.dumps(params))
+
+ def get_item(self, table_name, key, attributes_to_get=None,
+ consistent_read=None, return_consumed_capacity=None):
+ """
+ The GetItem operation returns a set of attributes for the item
+ with the given primary key. If there is no matching item,
+ GetItem does not return any data.
+
+ GetItem provides an eventually consistent read by default. If
+ your application requires a strongly consistent read, set
+ ConsistentRead to `True`. Although a strongly consistent read
+ might take more time than an eventually consistent read, it
+ always returns the last updated value.
+
+ :type table_name: string
+ :param table_name: The name of the table containing the requested item.
+
+ :type key: map
+ :param key: A map of attribute names to AttributeValue objects,
+ representing the primary key of the item to retrieve.
+
+ :type attributes_to_get: list
+ :param attributes_to_get: The names of one or more attributes to
+ retrieve. If no attribute names are specified, then all attributes
+ will be returned. If any of the requested attributes are not found,
+ they will not appear in the result.
+
+ :type consistent_read: boolean
+ :param consistent_read: If set to `True`, then the operation uses
+ strongly consistent reads; otherwise, eventually consistent reads
+ are used.
+
+ :type return_consumed_capacity: string
+ :param return_consumed_capacity:
+
+ """
+ params = {'TableName': table_name, 'Key': key, }
+ if attributes_to_get is not None:
+ params['AttributesToGet'] = attributes_to_get
+ if consistent_read is not None:
+ params['ConsistentRead'] = consistent_read
+ if return_consumed_capacity is not None:
+ params['ReturnConsumedCapacity'] = return_consumed_capacity
+ return self.make_request(action='GetItem',
+ body=json.dumps(params))
+
+ def list_tables(self, exclusive_start_table_name=None, limit=None):
+ """
+ Returns an array of all the tables associated with the current
+ account and endpoint.
+
+ Each Amazon DynamoDB endpoint is entirely independent. For
+ example, if you have two tables called "MyTable," one in
+ dynamodb.us-east-1.amazonaws.com and one in dynamodb.us-
+ west-1.amazonaws.com , they are completely independent and do
+ not share any data. The ListTables operation returns all of
+ the table names associated with the account making the
+ request, for the endpoint that receives the request.
+
+ :type exclusive_start_table_name: string
+ :param exclusive_start_table_name: The name of the table that starts
+ the list. If you already ran a ListTables operation and received a
+ LastEvaluatedTableName value in the response, use that value here
+ to continue the list.
+
+ :type limit: integer
+ :param limit: A maximum number of table names to return.
+
+ """
+ params = {}
+ if exclusive_start_table_name is not None:
+ params['ExclusiveStartTableName'] = exclusive_start_table_name
+ if limit is not None:
+ params['Limit'] = limit
+ return self.make_request(action='ListTables',
+ body=json.dumps(params))
+
+ def put_item(self, table_name, item, expected=None, return_values=None,
+ return_consumed_capacity=None,
+ return_item_collection_metrics=None):
+ """
+ Creates a new item, or replaces an old item with a new item.
+ If an item already exists in the specified table with the same
+ primary key, the new item completely replaces the existing
+ item. You can perform a conditional put (insert a new item if
+ one with the specified primary key doesn't exist), or replace
+ an existing item if it has certain attribute values.
+
+ In addition to putting an item, you can also return the item's
+ attribute values in the same operation, using the ReturnValues
+ parameter.
+
+ When you add an item, the primary key attribute(s) are the
+ only required attributes. Attribute values cannot be null;
+ string and binary type attributes must have lengths greater
+ than zero; and set type attributes cannot be empty. Requests
+ with empty values will be rejected with a ValidationException
+ .
+
+ You can request that PutItem return either a copy of the old
+ item (before the update) or a copy of the new item (after the
+ update). For more information, see the ReturnValues
+ description.
+
+ To prevent a new item from replacing an existing item, use a
+ conditional put operation with Exists set to `False` for the
+ primary key attribute, or attributes.
+
+ For more information about using this API, see `Working with
+ Items`_ of the Amazon DynamoDB Developer Guide .
+
+ :type table_name: string
+ :param table_name: The name of the table to contain the item.
+
+ :type item: map
+ :param item: A map of attribute name/value pairs, one for each
+ attribute. Only the primary key attributes are required; you can
+ optionally provide other attribute name-value pairs for the item.
+ If you specify any attributes that are part of an index key, then the
+ data types for those attributes must match those of the schema in
+ the table's attribute definition.
+
+ For more information about primary keys, see `Primary Key`_ of the
+ Amazon DynamoDB Developer Guide .
+
+ Each element in the Item map is an AttributeValue object.
+
+ :type expected: map
+ :param expected: A map of attribute/condition pairs. This is the
+ conditional block for the PutItem operation. All the conditions
+ must be met for the operation to succeed.
+ Expected allows you to provide an attribute name, and whether or not
+ Amazon DynamoDB should check to see if the attribute value already
+ exists; or if the attribute value exists and has a particular value
+ before changing it.
+
+ Each item in Expected represents an attribute name for Amazon DynamoDB
+ to check, along with the following:
+
+
+ + Value -the attribute value for Amazon DynamoDB to check.
+ + Exists -causes Amazon DynamoDB to evaluate the value before
+ attempting a conditional operation:
+
+ + If Exists is `True`, Amazon DynamoDB will check to see if that
+ attribute value already exists in the table. If it is found, then
+ the operation succeeds. If it is not found, the operation fails
+ with a ConditionalCheckFailedException .
+ + If Exists is `False`, Amazon DynamoDB assumes that the attribute
+ value does not exist in the table. If in fact the value does not
+ exist, then the assumption is valid and the operation succeeds. If
+ the value is found, despite the assumption that it does not exist,
+ the operation fails with a ConditionalCheckFailedException .
+ The default setting for Exists is `True`. If you supply a Value all by
+ itself, Amazon DynamoDB assumes the attribute exists: You don't
+ have to set Exists to `True`, because it is implied. Amazon
+ DynamoDB returns a ValidationException if:
+
+ + Exists is `True` but there is no Value to check. (You expect a value
+ to exist, but don't specify what that value is.)
+ + Exists is `False` but you also specify a Value . (You cannot expect
+ an attribute to have a value, while also expecting it not to
+ exist.)
+
+
+
+ If you specify more than one condition for Exists , then all of the
+ conditions must evaluate to true. (In other words, the conditions
+ are ANDed together.) Otherwise, the conditional operation will
+ fail.
+
+ :type return_values: string
+ :param return_values:
+ Use ReturnValues if you want to get the item attributes as they
+ appeared before they were updated with the PutItem request. For
+ PutItem , the valid values are:
+
+
+ + `NONE`-(default) If ReturnValues is not specified, or if its value is
+ `NONE`, then nothing is returned.
+ + `ALL_OLD`-If PutItem overwrote an attribute name-value pair, then the
+ content of the old item is returned.
+
+ :type return_consumed_capacity: string
+ :param return_consumed_capacity:
+
+ :type return_item_collection_metrics: string
+ :param return_item_collection_metrics: Indicates whether to return
+ statistics about item collections, if any, that were modified
+ during the operation. The default for ReturnItemCollectionMetrics
+ is `NONE`, meaning that no statistics will be returned. To obtain
+ the statistics, set ReturnItemCollectionMetrics to `SIZE`.
+
+ """
+ params = {'TableName': table_name, 'Item': item, }
+ if expected is not None:
+ params['Expected'] = expected
+ if return_values is not None:
+ params['ReturnValues'] = return_values
+ if return_consumed_capacity is not None:
+ params['ReturnConsumedCapacity'] = return_consumed_capacity
+ if return_item_collection_metrics is not None:
+ params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
+ return self.make_request(action='PutItem',
+ body=json.dumps(params))
+
+ def query(self, table_name, index_name=None, select=None,
+ attributes_to_get=None, limit=None, consistent_read=None,
+ key_conditions=None, scan_index_forward=None,
+ exclusive_start_key=None, return_consumed_capacity=None):
+ """
+ A Query operation directly accesses items from a table using
+ the table primary key, or from an index using the index key.
+ You must provide a specific hash key value. You can narrow the
+ scope of the query by using comparison operators on the range
+ key value, or on the index key. You can use the
+ ScanIndexForward parameter to get results in forward or
+ reverse order, by range key or by index key.
+
+ Queries that do not return results consume the minimum read
+ capacity units according to the type of read.
+
+ If the total number of items meeting the query criteria
+ exceeds the result set size limit of 1 MB, the query stops and
+ results are returned to the user with a LastEvaluatedKey to
+ continue the query in a subsequent operation. Unlike a Scan
+ operation, a Query operation never returns an empty result set
+ and a LastEvaluatedKey . The LastEvaluatedKey is only provided
+ if the results exceed 1 MB, or if you have used Limit .
+
+ To request a strongly consistent result, set ConsistentRead to
+ true.
+
+ :type table_name: string
+ :param table_name: The name of the table containing the requested
+ items.
+
+ :type index_name: string
+ :param index_name: The name of an index on the table to query.
+
+ :type select: string
+ :param select: The attributes to be returned in the result. You can
+ retrieve all item attributes, specific item attributes, the count
+ of matching items, or in the case of an index, some or all of the
+ attributes projected into the index.
+
+ + `ALL_ATTRIBUTES`: Returns all of the item attributes. For a table,
+ this is the default. For an index, this mode causes Amazon DynamoDB
+ to fetch the full item from the table for each matching item in the
+ index. If the index is configured to project all item attributes,
+ the matching items will not be fetched from the table. Fetching
+ items from the table incurs additional throughput cost and latency.
+ + `ALL_PROJECTED_ATTRIBUTES`: Allowed only when querying an index.
+ Retrieves all attributes which have been projected into the index.
+ If the index is configured to project all attributes, this is
+ equivalent to specifying ALL_ATTRIBUTES .
+ + `COUNT`: Returns the number of matching items, rather than the
+ matching items themselves.
+ + `SPECIFIC_ATTRIBUTES` : Returns only the attributes listed in
+ AttributesToGet . This is equivalent to specifying AttributesToGet
+ without specifying any value for Select . If you are querying an
+ index and only request attributes that are projected into that
+ index, the operation will consult the index and bypass the table.
+ If any of the requested attributes are not projected in to the
+ index, Amazon DynamoDB will need to fetch each matching item from
+ the table. This extra fetching incurs additional throughput cost
+ and latency.
+
+
+ When neither Select nor AttributesToGet are specified, Amazon DynamoDB
+ defaults to `ALL_ATTRIBUTES` when accessing a table, and
+ `ALL_PROJECTED_ATTRIBUTES` when accessing an index. You cannot use
+ both Select and AttributesToGet together in a single request,
+ unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage
+ is equivalent to specifying AttributesToGet without any value for
+ Select .)
+
+ :type attributes_to_get: list
+ :param attributes_to_get: The names of one or more attributes to
+ retrieve. If no attribute names are specified, then all attributes
+ will be returned. If any of the requested attributes are not found,
+ they will not appear in the result.
+ If you are querying an index and only request attributes that are
+ projected into that index, the operation will consult the index and
+ bypass the table. If any of the requested attributes are not
+ projected in to the index, Amazon DynamoDB will need to fetch each
+ matching item from the table. This extra fetching incurs additional
+ throughput cost and latency.
+
+ You cannot use both AttributesToGet and Select together in a Query
+ request, unless the value for Select is `SPECIFIC_ATTRIBUTES`.
+ (This usage is equivalent to specifying AttributesToGet without any
+ value for Select .)
+
+ :type limit: integer
+ :param limit: The maximum number of items to evaluate (not necessarily
+ the number of matching items). If Amazon DynamoDB processes the
+ number of items up to the limit while processing the results, it
+ stops the operation and returns the matching values up to that
+ point, and a LastEvaluatedKey to apply in a subsequent operation,
+ so that you can pick up where you left off. Also, if the processed
+ data set size exceeds 1 MB before Amazon DynamoDB reaches this
+ limit, it stops the operation and returns the matching values up to
+ the limit, and a LastEvaluatedKey to apply in a subsequent
+ operation to continue the operation. For more information see
+ `Query and Scan`_ of the Amazon DynamoDB Developer Guide .
+
+ :type consistent_read: boolean
+ :param consistent_read: If set to `True`, then the operation uses
+ strongly consistent reads; otherwise, eventually consistent reads
+ are used.
+
+ :type key_conditions: map
+ :param key_conditions:
+ The selection criteria for the query.
+
+ For a query on a table, you can only have conditions on the table
+ primary key attributes. you must specify the hash key attribute
+ name and value as an `EQ` condition. You can optionally specify a
+ second condition, referring to the range key attribute.
+
+ For a query on a secondary index, you can only have conditions on the
+ index key attributes. You must specify the index hash attribute
+ name and value as an EQ condition. You can optionally specify a
+ second condition, referring to the index key range attribute.
+
+ Multiple conditions are evaluated using "AND"; in other words, all of
+ the conditions must be met in order for an item to appear in the
+ results results.
+
+ Each KeyConditions element consists of an attribute name to compare,
+ along with the following:
+
+
+ + AttributeValueList -One or more values to evaluate against the
+ supplied attribute. This list contains exactly one value, except
+ for a `BETWEEN` or `IN` comparison, in which case the list contains
+ two values. String value comparisons for greater than, equals, or
+ less than are based on ASCII character code values. For example,
+ `a` is greater than `A`, and `aa` is greater than `B`. For a list
+ of code values, see
+ `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
+ For Binary, Amazon DynamoDB treats each byte of the binary data as
+ unsigned when it compares binary values, for example when
+ evaluating query expressions.
+ + ComparisonOperator -A comparator for evaluating attributes. For
+ example, equals, greater than, less than, etc. Valid comparison
+ operators for Query: `EQ | LE | LT | GE | GT | BEGINS_WITH |
+ BETWEEN` For information on specifying data types in JSON, see
+ `JSON Data Format`_ of the Amazon DynamoDB Developer Guide . The
+ following are descriptions of each comparison operator.
+
+ + `EQ` : Equal. AttributeValueList can contain only one AttributeValue
+ of type String, Number, or Binary (not a set). If an item contains
+ an AttributeValue of a different type than the one specified in the
+ request, the value does not match. For example, `{"S":"6"}` does
+ not equal `{"N":"6"}`. Also, `{"N":"6"}` does not equal
+ `{"NS":["6", "2", "1"]}`.
+ + `LE` : Less than or equal. AttributeValueList can contain only one
+ AttributeValue of type String, Number, or Binary (not a set). If an
+ item contains an AttributeValue of a different type than the one
+ specified in the request, the value does not match. For example,
+ `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
+ compare to `{"NS":["6", "2", "1"]}`.
+ + `LT` : Less than. AttributeValueList can contain only one
+ AttributeValue of type String, Number, or Binary (not a set). If an
+ item contains an AttributeValue of a different type than the one
+ specified in the request, the value does not match. For example,
+ `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
+ compare to `{"NS":["6", "2", "1"]}`.
+ + `GE` : Greater than or equal. AttributeValueList can contain only one
+ AttributeValue of type String, Number, or Binary (not a set). If an
+ item contains an AttributeValue of a different type than the one
+ specified in the request, the value does not match. For example,
+ `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
+ compare to `{"NS":["6", "2", "1"]}`.
+ + `GT` : Greater than. AttributeValueList can contain only one
+ AttributeValue of type String, Number, or Binary (not a set). If an
+ item contains an AttributeValue of a different type than the one
+ specified in the request, the value does not match. For example,
+ `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
+ compare to `{"NS":["6", "2", "1"]}`. `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain
+ only one AttributeValue of type String or Binary (not a Number or a
+ set). The target attribute of the comparison must be a String or
+ Binary (not a Number or a set).
+ + `BETWEEN` : Greater than or equal to the first value, and less than
+ or equal to the second value. AttributeValueList must contain two
+ AttributeValue elements of the same type, either String, Number, or
+ Binary (not a set). A target attribute matches if the target value
+ is greater than, or equal to, the first element and less than, or
+ equal to, the second element. If an item contains an AttributeValue
+ of a different type than the one specified in the request, the
+ value does not match. For example, `{"S":"6"}` does not compare to
+ `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6",
+ "2", "1"]}`
+
+ :type scan_index_forward: boolean
+ :param scan_index_forward: Specifies ascending (true) or descending
+ (false) traversal of the index. Amazon DynamoDB returns results
+ reflecting the requested order determined by the range key: If the
+ data type is Number, the results are returned in numeric order;
+ otherwise, the results are returned in order of ASCII character
+ code values.
+ If ScanIndexForward is not specified, the results are returned in
+ ascending order.
+
+ :type exclusive_start_key: map
+ :param exclusive_start_key: The primary key of the item from which to
+ continue an earlier operation. An earlier operation might provide
+ this value as the LastEvaluatedKey if that operation was
+ interrupted before completion; either because of the result set
+ size or because of the setting for Limit . The LastEvaluatedKey can
+ be passed back in a new request to continue the operation from that
+ point.
+ The data type for ExclusiveStartKey must be String, Number or Binary.
+ No set data types are allowed.
+
+ :type return_consumed_capacity: string
+ :param return_consumed_capacity:
+
+ """
+ params = {'TableName': table_name, }
+ if index_name is not None:
+ params['IndexName'] = index_name
+ if select is not None:
+ params['Select'] = select
+ if attributes_to_get is not None:
+ params['AttributesToGet'] = attributes_to_get
+ if limit is not None:
+ params['Limit'] = limit
+ if consistent_read is not None:
+ params['ConsistentRead'] = consistent_read
+ if key_conditions is not None:
+ params['KeyConditions'] = key_conditions
+ if scan_index_forward is not None:
+ params['ScanIndexForward'] = scan_index_forward
+ if exclusive_start_key is not None:
+ params['ExclusiveStartKey'] = exclusive_start_key
+ if return_consumed_capacity is not None:
+ params['ReturnConsumedCapacity'] = return_consumed_capacity
+ return self.make_request(action='Query',
+ body=json.dumps(params))
+
+ def scan(self, table_name, attributes_to_get=None, limit=None,
+ select=None, scan_filter=None, exclusive_start_key=None,
+ return_consumed_capacity=None):
+ """
+ The Scan operation returns one or more items and item
+ attributes by accessing every item in the table. To have
+ Amazon DynamoDB return fewer items, you can provide a
+ ScanFilter .
+
+ If the total number of scanned items exceeds the maximum data
+ set size limit of 1 MB, the scan stops and results are
+ returned to the user with a LastEvaluatedKey to continue the
+ scan in a subsequent operation. The results also include the
+ number of items exceeding the limit. A scan can result in no
+ table data meeting the filter criteria.
+
+ The result set is eventually consistent.
+
+ :type table_name: string
+ :param table_name: The name of the table containing the requested
+ items.
+
+ :type attributes_to_get: list
+ :param attributes_to_get: The names of one or more attributes to
+ retrieve. If no attribute names are specified, then all attributes
+ will be returned. If any of the requested attributes are not found,
+ they will not appear in the result.
+
+ :type limit: integer
+ :param limit: The maximum number of items to evaluate (not necessarily
+ the number of matching items). If Amazon DynamoDB processes the
+ number of items up to the limit while processing the results, it
+ stops the operation and returns the matching values up to that
+ point, and a LastEvaluatedKey to apply in a subsequent operation,
+ so that you can pick up where you left off. Also, if the processed
+ data set size exceeds 1 MB before Amazon DynamoDB reaches this
+ limit, it stops the operation and returns the matching values up to
+ the limit, and a LastEvaluatedKey to apply in a subsequent
+ operation to continue the operation. For more information see
+ `Query and Scan`_ of the Amazon DynamoDB Developer Guide .
+
+ :type select: string
+ :param select: The attributes to be returned in the result. You can
+ retrieve all item attributes, specific item attributes, the count
+ of matching items, or in the case of an index, some or all of the
+ attributes projected into the index.
+
+ + `ALL_ATTRIBUTES`: Returns all of the item attributes. For a table,
+ this is the default. For an index, this mode causes Amazon DynamoDB
+ to fetch the full item from the table for each matching item in the
+ index. If the index is configured to project all item attributes,
+ the matching items will not be fetched from the table. Fetching
+ items from the table incurs additional throughput cost and latency.
+ + `ALL_PROJECTED_ATTRIBUTES`: Retrieves all attributes which have been
+ projected into the index. If the index is configured to project all
+ attributes, this is equivalent to specifying ALL_ATTRIBUTES .
+ + `COUNT`: Returns the number of matching items, rather than the
+ matching items themselves.
+ + `SPECIFIC_ATTRIBUTES` : Returns only the attributes listed in
+ AttributesToGet . This is equivalent to specifying AttributesToGet
+ without specifying any value for Select . If you are querying an
+ index and only request attributes that are projected into that
+ index, the operation will consult the index and bypass the table.
+ If any of the requested attributes are not projected in to the
+ index, Amazon DynamoDB will need to fetch each matching item from
+ the table. This extra fetching incurs additional throughput cost
+ and latency.
+
+
+ When neither Select nor AttributesToGet are specified, Amazon DynamoDB
+ defaults to `ALL_ATTRIBUTES` when accessing a table, and
+ `ALL_PROJECTED_ATTRIBUTES` when accessing an index. You cannot use
+ both Select and AttributesToGet together in a single request,
+ unless the value for Select is `SPECIFIC_ATTRIBUTES`. (This usage
+ is equivalent to specifying AttributesToGet without any value for
+ Select .)
+
+ :type scan_filter: map
+ :param scan_filter:
+ Evaluates the scan results and returns only the desired values.
+ Multiple conditions are treated as "AND" operations: all conditions
+ must be met to be included in the results.
+
+ Each ScanConditions element consists of an attribute name to compare,
+ along with the following:
+
+
+ + AttributeValueList -One or more values to evaluate against the
+ supplied attribute. This list contains exactly one value, except
+ for a `BETWEEN` or `IN` comparison, in which case the list contains
+ two values. String value comparisons for greater than, equals, or
+ less than are based on ASCII character code values. For example,
+ `a` is greater than `A`, and `aa` is greater than `B`. For a list
+ of code values, see
+ `http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters`_.
+ For Binary, Amazon DynamoDB treats each byte of the binary data as
+ unsigned when it compares binary values, for example when
+ evaluating query expressions.
+ + ComparisonOperator -A comparator for evaluating attributes. For
+ example, equals, greater than, less than, etc. Valid comparison
+ operators for Scan: `EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL
+ | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN` For
+ information on specifying data types in JSON, see `JSON Data
+ Format`_ of the Amazon DynamoDB Developer Guide . The following are
+ descriptions of each comparison operator.
+
+ + `EQ` : Equal. AttributeValueList can contain only one AttributeValue
+ of type String, Number, or Binary (not a set). If an item contains
+ an AttributeValue of a different type than the one specified in the
+ request, the value does not match. For example, `{"S":"6"}` does
+ not equal `{"N":"6"}`. Also, `{"N":"6"}` does not equal
+ `{"NS":["6", "2", "1"]}`.
+ + `NE` : Not equal. AttributeValueList can contain only one
+ AttributeValue of type String, Number, or Binary (not a set). If an
+ item contains an AttributeValue of a different type than the one
+ specified in the request, the value does not match. For example,
+ `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
+ equal `{"NS":["6", "2", "1"]}`.
+ + `LE` : Less than or equal. AttributeValueList can contain only one
+ AttributeValue of type String, Number, or Binary (not a set). If an
+ item contains an AttributeValue of a different type than the one
+ specified in the request, the value does not match. For example,
+ `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
+ compare to `{"NS":["6", "2", "1"]}`.
+ + `LT` : Less than. AttributeValueList can contain only one
+ AttributeValue of type String, Number, or Binary (not a set). If an
+ item contains an AttributeValue of a different type than the one
+ specified in the request, the value does not match. For example,
+ `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
+ compare to `{"NS":["6", "2", "1"]}`.
+ + `GE` : Greater than or equal. AttributeValueList can contain only one
+ AttributeValue of type String, Number, or Binary (not a set). If an
+ item contains an AttributeValue of a different type than the one
+ specified in the request, the value does not match. For example,
+ `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
+ compare to `{"NS":["6", "2", "1"]}`.
+ + `GT` : Greater than. AttributeValueList can contain only one
+ AttributeValue of type String, Number, or Binary (not a set). If an
+ item contains an AttributeValue of a different type than the one
+ specified in the request, the value does not match. For example,
+ `{"S":"6"}` does not equal `{"N":"6"}`. Also, `{"N":"6"}` does not
+ compare to `{"NS":["6", "2", "1"]}`.
+ + `NOT_NULL` : The attribute exists.
+ + `NULL` : The attribute does not exist.
+ + `CONTAINS` : checks for a subsequence, or value in a set.
+ AttributeValueList can contain only one AttributeValue of type
+ String, Number, or Binary (not a set). If the target attribute of
+ the comparison is a String, then the operation checks for a
+ substring match. If the target attribute of the comparison is
+ Binary, then the operation looks for a subsequence of the target
+ that matches the input. If the target attribute of the comparison
+ is a set ("SS", "NS", or "BS"), then the operation checks for a
+ member of the set (not as a substring).
+ + `NOT_CONTAINS` : checks for absence of a subsequence, or absence of a
+ value in a set. AttributeValueList can contain only one
+ AttributeValue of type String, Number, or Binary (not a set). If
+ the target attribute of the comparison is a String, then the
+ operation checks for the absence of a substring match. If the
+ target attribute of the comparison is Binary, then the operation
+ checks for the absence of a subsequence of the target that matches
+ the input. If the target attribute of the comparison is a set
+ ("SS", "NS", or "BS"), then the operation checks for the absence of
+ a member of the set (not as a substring).
+ + `BEGINS_WITH` : checks for a prefix. AttributeValueList can contain
+ only one AttributeValue of type String or Binary (not a Number or a
+ set). The target attribute of the comparison must be a String or
+ Binary (not a Number or a set).
+ + `IN` : checks for exact matches. AttributeValueList can contain more
+ than one AttributeValue of type String, Number, or Binary (not a
+ set). The target attribute of the comparison must be of the same
+ type and exact value to match. A String never matches a String set.
+ + `BETWEEN` : Greater than or equal to the first value, and less than
+ or equal to the second value. AttributeValueList must contain two
+ AttributeValue elements of the same type, either String, Number, or
+ Binary (not a set). A target attribute matches if the target value
+ is greater than, or equal to, the first element and less than, or
+ equal to, the second element. If an item contains an AttributeValue
+ of a different type than the one specified in the request, the
+ value does not match. For example, `{"S":"6"}` does not compare to
+ `{"N":"6"}`. Also, `{"N":"6"}` does not compare to `{"NS":["6",
+ "2", "1"]}`
+
+ :type exclusive_start_key: map
+ :param exclusive_start_key: The primary key of the item from which to
+ continue an earlier operation. An earlier operation might provide
+ this value as the LastEvaluatedKey if that operation was
+ interrupted before completion; either because of the result set
+ size or because of the setting for Limit . The LastEvaluatedKey can
+ be passed back in a new request to continue the operation from that
+ point.
+ The data type for ExclusiveStartKey must be String, Number or Binary.
+ No set data types are allowed.
+
+ :type return_consumed_capacity: string
+ :param return_consumed_capacity:
+
+ """
+ params = {'TableName': table_name, }
+ if attributes_to_get is not None:
+ params['AttributesToGet'] = attributes_to_get
+ if limit is not None:
+ params['Limit'] = limit
+ if select is not None:
+ params['Select'] = select
+ if scan_filter is not None:
+ params['ScanFilter'] = scan_filter
+ if exclusive_start_key is not None:
+ params['ExclusiveStartKey'] = exclusive_start_key
+ if return_consumed_capacity is not None:
+ params['ReturnConsumedCapacity'] = return_consumed_capacity
+ return self.make_request(action='Scan',
+ body=json.dumps(params))
+
+ def update_item(self, table_name, key, attribute_updates=None,
+ expected=None, return_values=None,
+ return_consumed_capacity=None,
+ return_item_collection_metrics=None):
+ """
+ Edits an existing item's attributes, or inserts a new item if
+ it does not already exist. You can put, delete, or add
+ attribute values. You can also perform a conditional update
+ (insert a new attribute name-value pair if it doesn't exist,
+ or replace an existing name-value pair if it has certain
+ expected attribute values).
+
+ In addition to updating an item, you can also return the
+ item's attribute values in the same operation, using the
+ ReturnValues parameter.
+
+ :type table_name: string
+ :param table_name: The name of the table containing the item to update.
+
+ :type key: map
+ :param key: The primary key that defines the item. Each element
+ consists of an attribute name and a value for that attribute.
+
+ :type attribute_updates: map
+ :param attribute_updates: The names of attributes to be modified, the
+ action to perform on each, and the new value for each. If you are
+ updating an attribute that is an index key attribute for any
+ indexes on that table, the attribute type must match the index key
+ type defined in the AttributesDefinition of the table description.
+ You can use UpdateItem to update any non-key attributes.
+ Attribute values cannot be null; string and binary type attributes must
+ have lengths greater than zero; and set type attributes must not be
+ empty. Requests with empty values will be rejected with a
+ ValidationException .
+
+ Each AttributeUpdates element consists of an attribute name to modify,
+ along with the following:
+
+
+ + Value -the new value, if applicable, for this attribute.
+ + Action -specifies how to perform the update. Valid values for Action
+ are `PUT`, `DELETE`, and `ADD`. The behavior depends on whether the
+ specified primary key already exists in the table. **If an item
+ with the specified Key is found in the table:**
+
+ + `PUT`-Adds the specified attribute to the item. If the attribute
+ already exists, it is replaced by the new value.
+ + `DELETE`-If no value is specified, the attribute and its value are
+ removed from the item. The data type of the specified value must
+ match the existing value's data type. If a set of values is
+ specified, then those values are subtracted from the old set. For
+ example, if the attribute value was the set `[a,b,c]` and the
+ DELETE action specified `[a,c]`, then the final attribute value
+ would be `[b]`. Specifying an empty set is an error.
+ + `ADD`-If the attribute does not already exist, then the attribute and
+ its values are added to the item. If the attribute does exist, then
+ the behavior of `ADD` depends on the data type of the attribute:
+
+ + If the existing attribute is a number, and if Value is also a number,
+ then the Value is mathematically added to the existing attribute.
+ If Value is a negative number, then it is subtracted from the
+ existing attribute. If you use `ADD` to increment or decrement a
+ number value for an item that doesn't exist before the update,
+ Amazon DynamoDB uses 0 as the initial value. In addition, if you
+ use `ADD` to update an existing item, and intend to increment or
+ decrement an attribute value which does not yet exist, Amazon
+ DynamoDB uses `0` as the initial value. For example, suppose that
+ the item you want to update does not yet have an attribute named
+ itemcount , but you decide to `ADD` the number `3` to this
+ attribute anyway, even though it currently does not exist. Amazon
+ DynamoDB will create the itemcount attribute, set its initial value
+ to `0`, and finally add `3` to it. The result will be a new
+ itemcount attribute in the item, with a value of `3`.
+ + If the existing data type is a set, and if the Value is also a set,
+ then the Value is added to the existing set. (This is a set
+ operation, not mathematical addition.) For example, if the
+ attribute value was the set `[1,2]`, and the `ADD` action specified
+ `[3]`, then the final attribute value would be `[1,2,3]`. An error
+ occurs if an Add action is specified for a set attribute and the
+ attribute type specified does not match the existing set type. Both
+ sets must have the same primitive data type. For example, if the
+ existing data type is a set of strings, the Value must also be a
+ set of strings. The same holds true for number sets and binary
+ sets.
+ This action is only valid for an existing attribute whose data type is
+ number or is a set. Do not use `ADD` for any other data types.
+ **If no item with the specified Key is found:**
+
+ + `PUT`-Amazon DynamoDB creates a new item with the specified primary
+ key, and then adds the attribute.
+ + `DELETE`-Nothing happens; there is no attribute to delete.
+ + `ADD`-Amazon DynamoDB creates an item with the supplied primary key
+ and number (or set of numbers) for the attribute value. The only
+ data types allowed are number and number set; no other data types
+ can be specified.
+
+
+
+ If you specify any attributes that are part of an index key, then the
+ data types for those attributes must match those of the schema in
+ the table's attribute definition.
+
+ :type expected: map
+ :param expected: A map of attribute/condition pairs. This is the
+ conditional block for the UpdateItem operation. All the conditions
+ must be met for the operation to succeed.
+ Expected allows you to provide an attribute name, and whether or not
+ Amazon DynamoDB should check to see if the attribute value already
+ exists; or if the attribute value exists and has a particular value
+ before changing it.
+
+ Each item in Expected represents an attribute name for Amazon DynamoDB
+ to check, along with the following:
+
+
+ + Value -the attribute value for Amazon DynamoDB to check.
+ + Exists -causes Amazon DynamoDB to evaluate the value before
+ attempting a conditional operation:
+
+ + If Exists is `True`, Amazon DynamoDB will check to see if that
+ attribute value already exists in the table. If it is found, then
+ the operation succeeds. If it is not found, the operation fails
+ with a ConditionalCheckFailedException .
+ + If Exists is `False`, Amazon DynamoDB assumes that the attribute
+ value does not exist in the table. If in fact the value does not
+ exist, then the assumption is valid and the operation succeeds. If
+ the value is found, despite the assumption that it does not exist,
+ the operation fails with a ConditionalCheckFailedException .
+ The default setting for Exists is `True`. If you supply a Value all by
+ itself, Amazon DynamoDB assumes the attribute exists: You don't
+ have to set Exists to `True`, because it is implied. Amazon
+ DynamoDB returns a ValidationException if:
+
+ + Exists is `True` but there is no Value to check. (You expect a value
+ to exist, but don't specify what that value is.)
+ + Exists is `False` but you also specify a Value . (You cannot expect
+ an attribute to have a value, while also expecting it not to
+ exist.)
+
+
+
+ If you specify more than one condition for Exists , then all of the
+ conditions must evaluate to true. (In other words, the conditions
+ are ANDed together.) Otherwise, the conditional operation will
+ fail.
+
+ :type return_values: string
+ :param return_values:
+ Use ReturnValues if you want to get the item attributes as they
+ appeared either before or after they were updated. For UpdateItem ,
+ the valid values are:
+
+
+ + `NONE`-(default) If ReturnValues is not specified, or if its value is
+ `NONE`, then nothing is returned.
+ + `ALL_OLD`-If UpdateItem overwrote an attribute name-value pair, then
+ the content of the old item is returned.
+ + `UPDATED_OLD`-The old versions of only the updated attributes are
+ returned.
+ + `ALL_NEW`-All of the attributes of the new version of the item are
+ returned.
+ + `UPDATED_NEW`-The new versions of only the updated attributes are
+ returned.
+
+ :type return_consumed_capacity: string
+ :param return_consumed_capacity:
+
+ :type return_item_collection_metrics: string
+ :param return_item_collection_metrics: Indicates whether to return
+ statistics about item collections, if any, that were modified
+ during the operation. The default for ReturnItemCollectionMetrics
+ is `NONE`, meaning that no statistics will be returned. To obtain
+ the statistics, set ReturnItemCollectionMetrics to `SIZE`.
+
+ """
+ params = {'TableName': table_name, 'Key': key, }
+ if attribute_updates is not None:
+ params['AttributeUpdates'] = attribute_updates
+ if expected is not None:
+ params['Expected'] = expected
+ if return_values is not None:
+ params['ReturnValues'] = return_values
+ if return_consumed_capacity is not None:
+ params['ReturnConsumedCapacity'] = return_consumed_capacity
+ if return_item_collection_metrics is not None:
+ params['ReturnItemCollectionMetrics'] = return_item_collection_metrics
+ return self.make_request(action='UpdateItem',
+ body=json.dumps(params))
+
+ def update_table(self, table_name, provisioned_throughput):
+ """
+ Updates the provisioned throughput for the given table.
+ Setting the throughput for a table helps you manage
+ performance and is part of the provisioned throughput feature
+ of Amazon DynamoDB.
+
+ The provisioned throughput values can be upgraded or
+ downgraded based on the maximums and minimums listed in the
+ `Limits`_ section of the Amazon DynamoDB Developer Guide .
+
+ The table must be in the `ACTIVE` state for this operation to
+ succeed. UpdateTable is an asynchronous operation; while
+ executing the operation, the table is in the `UPDATING` state.
+ While the table is in the `UPDATING` state, the table still
+ has the provisioned throughput from before the call. The new
+ provisioned throughput setting is in effect only when the
+ table returns to the `ACTIVE` state after the UpdateTable
+ operation.
+
+ :type table_name: string
+ :param table_name: The name of the table to be updated.
+
+ :type provisioned_throughput: dict
+ :param provisioned_throughput:
+
+ """
+ params = {
+ 'TableName': table_name,
+ 'ProvisionedThroughput': provisioned_throughput,
+ }
+ return self.make_request(action='UpdateTable',
+ body=json.dumps(params))
+
+ def make_request(self, action, body):
+ headers = {
+ 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/x-amz-json-1.0',
+ 'Content-Length': str(len(body)),
+ }
+ http_request = self.build_base_http_request(
+ method='POST', path='/', auth_path='/', params={},
+ headers=headers, data=body)
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10)
+ response_body = response.read()
+ boto.log.debug(response_body)
+ if response.status == 200:
+ if response_body:
+ return json.loads(response_body)
+ else:
+ json_body = json.loads(response_body)
+ fault_name = json_body.get('__type', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
+
diff --git a/boto/ec2/attributes.py b/boto/ec2/attributes.py
new file mode 100644
index 00000000..d76e5c54
--- /dev/null
+++ b/boto/ec2/attributes.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+class AccountAttribute(object):
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.attribute_name = None
+ self.attribute_values = None
+
+ def startElement(self, name, attrs, connection):
+ if name == 'attributeValueSet':
+ self.attribute_values = AttributeValues()
+ return self.attribute_values
+
+ def endElement(self, name, value, connection):
+ if name == 'attributeName':
+ self.attribute_name = value
+
+
+class AttributeValues(list):
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'attributeValue':
+ self.append(value)
+
+
+class VPCAttribute(object):
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.vpc_id = None
+ self.enable_dns_hostnames = None
+ self.enable_dns_support = None
+ self._current_attr = None
+
+ def startElement(self, name, attrs, connection):
+ if name in ('enableDnsHostnames', 'enableDnsSupport'):
+ self._current_attr = name
+
+ def endElement(self, name, value, connection):
+ if name == 'vpcId':
+ self.vpc_id = value
+ elif name == 'value':
+ if value == 'true':
+ value = True
+ else:
+ value = False
+ if self._current_attr == 'enableDnsHostnames':
+ self.enable_dns_hostnames = value
+ elif self._current_attr == 'enableDnsSupport':
+ self.enable_dns_support = value
diff --git a/boto/ec2/blockdevicemapping.py b/boto/ec2/blockdevicemapping.py
index ca0e9373..df774ae9 100644
--- a/boto/ec2/blockdevicemapping.py
+++ b/boto/ec2/blockdevicemapping.py
@@ -125,17 +125,18 @@ class BlockDeviceMapping(dict):
params['%s.VirtualName' % pre] = block_dev.ephemeral_name
else:
if block_dev.no_device:
- params['%s.Ebs.NoDevice' % pre] = 'true'
- if block_dev.snapshot_id:
- params['%s.Ebs.SnapshotId' % pre] = block_dev.snapshot_id
- if block_dev.size:
- params['%s.Ebs.VolumeSize' % pre] = block_dev.size
- if block_dev.delete_on_termination:
- params['%s.Ebs.DeleteOnTermination' % pre] = 'true'
+ params['%s.NoDevice' % pre] = ''
else:
- params['%s.Ebs.DeleteOnTermination' % pre] = 'false'
- if block_dev.volume_type:
- params['%s.Ebs.VolumeType' % pre] = block_dev.volume_type
- if block_dev.iops is not None:
- params['%s.Ebs.Iops' % pre] = block_dev.iops
+ if block_dev.snapshot_id:
+ params['%s.Ebs.SnapshotId' % pre] = block_dev.snapshot_id
+ if block_dev.size:
+ params['%s.Ebs.VolumeSize' % pre] = block_dev.size
+ if block_dev.delete_on_termination:
+ params['%s.Ebs.DeleteOnTermination' % pre] = 'true'
+ else:
+ params['%s.Ebs.DeleteOnTermination' % pre] = 'false'
+ if block_dev.volume_type:
+ params['%s.Ebs.VolumeType' % pre] = block_dev.volume_type
+ if block_dev.iops is not None:
+ params['%s.Ebs.Iops' % pre] = block_dev.iops
i += 1
diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py
index e74e3179..fb0cc304 100644
--- a/boto/ec2/connection.py
+++ b/boto/ec2/connection.py
@@ -1,6 +1,6 @@
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
-# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
@@ -33,7 +33,7 @@ from datetime import timedelta
import boto
from boto.connection import AWSQueryConnection
from boto.resultset import ResultSet
-from boto.ec2.image import Image, ImageAttribute
+from boto.ec2.image import Image, ImageAttribute, CopyImage
from boto.ec2.instance import Reservation, Instance
from boto.ec2.instance import ConsoleOutput, InstanceAttribute
from boto.ec2.keypair import KeyPair
@@ -58,6 +58,7 @@ from boto.ec2.vmtype import VmType
from boto.ec2.instancestatus import InstanceStatusSet
from boto.ec2.volumestatus import VolumeStatusSet
from boto.ec2.networkinterface import NetworkInterface
+from boto.ec2.attributes import AccountAttribute, VPCAttribute
from boto.exception import EC2ResponseError
#boto.set_stream_logger('ec2')
@@ -65,7 +66,7 @@ from boto.exception import EC2ResponseError
class EC2Connection(AWSQueryConnection):
- APIVersion = boto.config.get('Boto', 'ec2_version', '2012-12-01')
+ APIVersion = boto.config.get('Boto', 'ec2_version', '2013-02-01')
DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint',
'ec2.us-east-1.amazonaws.com')
@@ -147,14 +148,12 @@ class EC2Connection(AWSQueryConnection):
user ID has explicit launch permissions
:type filters: dict
- :param filters: Optional filters that can be used to limit
- the results returned. Filters are provided
- in the form of a dictionary consisting of
- filter names as the key and filter values
- as the value. The set of allowable filter
- names/values is dependent on the request
- being performed. Check the EC2 API guide
- for details.
+ :param filters: Optional filters that can be used to limit the
+ results returned. Filters are provided in the form of a
+ dictionary consisting of filter names as the key and
+ filter values as the value. The set of allowable filter
+ names/values is dependent on the request being performed.
+ Check the EC2 API guide for details.
:rtype: list
:return: A list of :class:`boto.ec2.image.Image`
@@ -299,8 +298,7 @@ class EC2Connection(AWSQueryConnection):
:type delete_snapshot: bool
:param delete_snapshot: Set to True if we should delete the
- snapshot associated with an EBS volume
- mounted at /dev/sda1
+ snapshot associated with an EBS volume mounted at /dev/sda1
:rtype: bool
:return: True if successful
@@ -333,14 +331,14 @@ class EC2Connection(AWSQueryConnection):
:type description: string
:param description: An optional human-readable string describing
- the contents and purpose of the AMI.
+ the contents and purpose of the AMI.
:type no_reboot: bool
- :param no_reboot: An optional flag indicating that the bundling process
- should not attempt to shutdown the instance before
- bundling. If this flag is True, the responsibility
- of maintaining file system integrity is left to the
- owner of the instance.
+ :param no_reboot: An optional flag indicating that the
+ bundling process should not attempt to shutdown the
+ instance before bundling. If this flag is True, the
+ responsibility of maintaining file system integrity is
+ left to the owner of the instance.
:rtype: string
:return: The new image id
@@ -365,10 +363,10 @@ class EC2Connection(AWSQueryConnection):
:type attribute: string
:param attribute: The attribute you need information about.
- Valid choices are:
- * launchPermission
- * productCodes
- * blockDeviceMapping
+ Valid choices are:
+ * launchPermission
+ * productCodes
+ * blockDeviceMapping
:rtype: :class:`boto.ec2.image.ImageAttribute`
:return: An ImageAttribute object representing the value of the
@@ -393,7 +391,7 @@ class EC2Connection(AWSQueryConnection):
:type operation: string
:param operation: Either add or remove (this is required for changing
- launchPermissions)
+ launchPermissions)
:type user_ids: list
:param user_ids: The Amazon IDs of users to add/remove attributes
@@ -403,8 +401,8 @@ class EC2Connection(AWSQueryConnection):
:type product_codes: list
:param product_codes: Amazon DevPay product code. Currently only one
- product code can be associated with an AMI. Once
- set, the product code cannot be changed or reset.
+ product code can be associated with an AMI. Once
+ set, the product code cannot be changed or reset.
"""
params = {'ImageId': image_id,
'Attribute': attribute,
@@ -1333,11 +1331,11 @@ class EC2Connection(AWSQueryConnection):
def allocate_address(self, domain=None):
"""
Allocate a new Elastic IP address and associate it with your account.
-
+
:type domain: string
- :param domain: Optional string. If domain is set to "vpc" the address
- will be allocated to VPC . Will return address
- object with allocation_id.
+ :param domain: Optional string. If domain is set to "vpc" the address
+ will be allocated to VPC . Will return address object with
+ allocation_id.
:rtype: :class:`boto.ec2.address.Address`
:return: The newly allocated Address
@@ -2773,11 +2771,9 @@ class EC2Connection(AWSQueryConnection):
single-tenant hardware and can only be launched within a VPC.
:type offering_type: string
- :param offering_type: The Reserved Instance offering type.
- Valid Values:
- * Heavy Utilization
- * Medium Utilization
- * Light Utilization
+ :param offering_type: The Reserved Instance offering type. Valid
+ Values: `"Heavy Utilization" | "Medium Utilization" | "Light
+ Utilization"`
:type include_marketplace: bool
:param include_marketplace: Include Marketplace offerings in the
@@ -3372,7 +3368,7 @@ class EC2Connection(AWSQueryConnection):
'DeviceIndex': device_index}
return self.get_status('AttachNetworkInterface', params, verb='POST')
- def detach_network_interface(self, attachement_id, force=False):
+ def detach_network_interface(self, attachment_id, force=False):
"""
Detaches a network interface from an instance.
@@ -3383,7 +3379,7 @@ class EC2Connection(AWSQueryConnection):
:param force: Set to true to force a detachment.
"""
- params = {'AttachmentId': network_interface_id}
+ params = {'AttachmentId': attachment_id}
if force:
params['Force'] = 'true'
return self.get_status('DetachNetworkInterface', params, verb='POST')
@@ -3408,4 +3404,49 @@ class EC2Connection(AWSQueryConnection):
"""
params = {}
return self.get_list('DescribeVmTypes', params, [('euca:item', VmType)], verb='POST')
-
+
+ def copy_image(self, source_region, source_image_id, name,
+ description=None, client_token=None):
+ params = {
+ 'SourceRegion': source_region,
+ 'SourceImageId': source_image_id,
+ 'Name': name
+ }
+ if description is not None:
+ params['Description'] = description
+ if client_token is not None:
+ params['ClientToken'] = client_token
+ image = self.get_object('CopyImage', params, CopyImage,
+ verb='POST')
+ return image
+
+ def describe_account_attributes(self, attribute_names=None):
+ params = {}
+ if attribute_names is not None:
+ self.build_list_params(params, attribute_names, 'AttributeName')
+ return self.get_list('DescribeAccountAttributes', params,
+ [('item', AccountAttribute)], verb='POST')
+
+ def describe_vpc_attribute(self, vpc_id, attribute=None):
+ params = {
+ 'VpcId': vpc_id
+ }
+ if attribute is not None:
+ params['Attribute'] = attribute
+ attr = self.get_object('DescribeVpcAttribute', params,
+ VPCAttribute, verb='POST')
+ return attr
+
+ def modify_vpc_attribute(self, vpc_id, enable_dns_support=None,
+ enable_dns_hostnames=None):
+ params = {
+ 'VpcId': vpc_id
+ }
+ if enable_dns_support is not None:
+ params['EnableDnsSupport.Value'] = (
+ 'true' if enable_dns_support else 'false')
+ if enable_dns_hostnames is not None:
+ params['EnableDnsHostnames.Value'] = (
+ 'true' if enable_dns_hostnames else 'false')
+ result = self.get_status('ModifyVpcAttribute', params, verb='POST')
+ return result
diff --git a/boto/ec2/image.py b/boto/ec2/image.py
index f00e55ab..0d42b487 100644
--- a/boto/ec2/image.py
+++ b/boto/ec2/image.py
@@ -15,7 +15,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -31,12 +31,12 @@ class ProductCodes(list):
def endElement(self, name, value, connection):
if name == 'productCode':
self.append(value)
-
+
class Image(TaggedEC2Object):
"""
Represents an EC2 Image
"""
-
+
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
self.id = None
@@ -94,7 +94,7 @@ class Image(TaggedEC2Object):
else:
raise Exception(
'Unexpected value of isPublic %s for image %s'%(
- value,
+ value,
self.id
)
)
@@ -151,7 +151,7 @@ class Image(TaggedEC2Object):
raise ValueError('%s is not a valid Image ID' % self.id)
return self.state
- def run(self, min_count=1, max_count=1, key_name=None,
+ def run(self, min_count=1, max_count=1, key_name=None,
security_groups=None, user_data=None,
addressing_type=None, instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
@@ -166,46 +166,46 @@ class Image(TaggedEC2Object):
"""
Runs this instance.
-
+
:type min_count: int
:param min_count: The minimum number of instances to start
-
+
:type max_count: int
:param max_count: The maximum number of instances to start
-
+
:type key_name: string
:param key_name: The name of the keypair to run this instance with.
-
- :type security_groups:
+
+ :type security_groups:
:param security_groups:
-
- :type user_data:
+
+ :type user_data:
:param user_data:
-
- :type addressing_type:
+
+ :type addressing_type:
:param daddressing_type:
-
+
:type instance_type: string
:param instance_type: The type of instance to run. Current choices are:
m1.small | m1.large | m1.xlarge | c1.medium |
c1.xlarge | m2.xlarge | m2.2xlarge |
m2.4xlarge | cc1.4xlarge
-
+
:type placement: string
:param placement: The availability zone in which to launch the instances
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the instances
-
+
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to launch the instances
-
+
:type monitoring_enabled: bool
:param monitoring_enabled: Enable CloudWatch monitoring on the instance.
-
+
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch the instances for VPC.
-
+
:type private_ip_address: string
:param private_ip_address: If you're using VPC, you can optionally use
this parameter to assign the instance a
@@ -236,7 +236,7 @@ class Image(TaggedEC2Object):
:param additional_info: Specifies additional information to make
available to the instance(s)
- :type security_group_ids:
+ :type security_group_ids:
:param security_group_ids:
:type instance_profile_name: string
@@ -244,7 +244,7 @@ class Image(TaggedEC2Object):
:type instance_profile_arn: string
:param instance_profile_arn: The ARN of an IAM instance profile to use.
-
+
:type tenancy: string
:param tenancy: The tenancy of the instance you want to launch. An
instance with a tenancy of 'dedicated' runs on
@@ -266,9 +266,9 @@ class Image(TaggedEC2Object):
monitoring_enabled, subnet_id,
block_device_map, disable_api_termination,
instance_initiated_shutdown_behavior,
- private_ip_address, placement_group,
+ private_ip_address, placement_group,
security_group_ids=security_group_ids,
- additional_info=additional_info,
+ additional_info=additional_info,
instance_profile_name=instance_profile_name,
instance_profile_arn=instance_profile_arn,
tenancy=tenancy)
@@ -348,3 +348,16 @@ class ImageAttribute:
self.ramdisk = value
else:
setattr(self, name, value)
+
+
+class CopyImage(object):
+ def __init__(self, parent=None):
+ self._parent = parent
+ self.image_id = None
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'imageId':
+ self.image_id = value
diff --git a/boto/ec2/instance.py b/boto/ec2/instance.py
index 30311e05..5be701f0 100644
--- a/boto/ec2/instance.py
+++ b/boto/ec2/instance.py
@@ -188,6 +188,8 @@ class Instance(TaggedEC2Object):
:ivar product_codes: A list of product codes associated with this instance.
:ivar ami_launch_index: This instances position within it's launch group.
:ivar monitored: A boolean indicating whether monitoring is enabled or not.
+ :ivar monitoring_state: A string value that contains the actual value
+ of the monitoring element returned by EC2.
:ivar spot_instance_request_id: The ID of the spot instance request
if this is a spot instance.
:ivar subnet_id: The VPC Subnet ID, if running in VPC.
@@ -223,6 +225,7 @@ class Instance(TaggedEC2Object):
self.product_codes = ProductCodes()
self.ami_launch_index = None
self.monitored = False
+ self.monitoring_state = None
self.spot_instance_request_id = None
self.subnet_id = None
self.vpc_id = None
@@ -361,6 +364,7 @@ class Instance(TaggedEC2Object):
self.ramdisk = value
elif name == 'state':
if self._in_monitoring_element:
+ self.monitoring_state = value
if value == 'enabled':
self.monitored = True
self._in_monitoring_element = False
diff --git a/boto/elasticache/__init__.py b/boto/elasticache/__init__.py
index b8b88d6b..fe35d707 100644
--- a/boto/elasticache/__init__.py
+++ b/boto/elasticache/__init__.py
@@ -25,7 +25,7 @@ from boto.regioninfo import RegionInfo
def regions():
"""
- Get all available regions for the AWS Elastic Beanstalk service.
+ Get all available regions for the AWS ElastiCache service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
diff --git a/boto/elasticache/layer1.py b/boto/elasticache/layer1.py
index 67a84da1..6c50438a 100644
--- a/boto/elasticache/layer1.py
+++ b/boto/elasticache/layer1.py
@@ -20,8 +20,9 @@
# IN THE SOFTWARE.
#
-import json
+
import boto
+from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
@@ -805,7 +806,7 @@ class ElastiCacheConnection(AWSQueryConnection):
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type. Valid Values: `"Light Utilization" |
- "Medium Utilization" | "Heavy Utilization" `
+ "Medium Utilization" | "Heavy Utilization"`
:type max_records: integer
:param max_records: The maximum number of records to include in the
@@ -878,7 +879,7 @@ class ElastiCacheConnection(AWSQueryConnection):
:param offering_type: The offering type filter value. Specify this
parameter to show only the available offerings matching the
specified offering type. Valid Values: `"Light Utilization" |
- "Medium Utilization" | "Heavy Utilization" `
+ "Medium Utilization" | "Heavy Utilization"`
:type max_records: integer
:param max_records: The maximum number of records to include in the
diff --git a/boto/elastictranscoder/layer1.py b/boto/elastictranscoder/layer1.py
index e9b50cd1..293c4f04 100644
--- a/boto/elastictranscoder/layer1.py
+++ b/boto/elastictranscoder/layer1.py
@@ -97,12 +97,12 @@ class ElasticTranscoderConnection(AWSAuthConnection):
which Elastic Transcoder puts the transcoded
files.
- :type input_name: structure
+ :type input_name: dict
:param input_name: A section of the request body that provides
information about the file that is being
transcoded.
- :type output: structure
+ :type output: dict
:param output: A section of the request body that provides information
about the transcoded (target) file.
@@ -141,9 +141,10 @@ class ElasticTranscoderConnection(AWSAuthConnection):
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want to use to create the pipeline.
- :type notifications: structure
+ :type notifications: dict
:param notifications: The () topic that you want to notify to report job
status.
+
To receive notifications, you must also subscribe
to the new topic in the console.
@@ -200,15 +201,15 @@ class ElasticTranscoderConnection(AWSAuthConnection):
:param container: The container type for the output file. This value
must be `mp4`.
- :type video: structure
+ :type video: dict
:param video: A section of the request body that specifies the video
parameters.
- :type audio: structure
+ :type audio: dict
:param audio: A section of the request body that specifies the audio
parameters
- :type thumbnails: structure
+ :type thumbnails: dict
:param thumbnails: A section of the request body that specifies the
thumbnail parameters, if any.
@@ -229,7 +230,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def delete_pipeline(self, id):
"""
To delete a pipeline, send a DELETE request to the
- `//pipelines/ [pipelineId] ` resource.
+ `//pipelines/ [pipelineId]` resource.
You can only delete a pipeline that has never been used or
that is not currently in use (doesn't contain any active
@@ -246,7 +247,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def delete_preset(self, id):
"""
To delete a preset, send a DELETE request to the `//presets/
- [presetId] ` resource.
+ [presetId]` resource.
If the preset has been used, you cannot delete it.
@@ -262,7 +263,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
page_token=None):
"""
To get a list of the jobs currently in a pipeline, send a GET
- request to the `//jobsByPipeline/ [pipelineId] ` resource.
+ request to the `//jobsByPipeline/ [pipelineId]` resource.
Elastic Transcoder returns all of the jobs currently in the
specified pipeline. The response body contains one element for
@@ -291,12 +292,12 @@ class ElasticTranscoderConnection(AWSAuthConnection):
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
- param=params)
+ params=params)
def list_jobs_by_status(self, status, ascending=None, page_token=None):
"""
To get a list of the jobs that have a specified status, send a
- GET request to the `//jobsByStatus/ [status] ` resource.
+ GET request to the `//jobsByStatus/ [status]` resource.
Elastic Transcoder returns all of the jobs that have the
specified status. The response body contains one element for
@@ -327,7 +328,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
- param=params)
+ params=params)
def list_pipelines(self):
"""
@@ -352,7 +353,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def read_job(self, id):
"""
To get detailed information about a job, send a GET request to
- the `//jobs/ [jobId] ` resource.
+ the `//jobs/ [jobId]` resource.
:type id: string
:param id: The identifier of the job for which you want to get detailed
@@ -365,7 +366,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def read_pipeline(self, id):
"""
To get detailed information about a pipeline, send a GET
- request to the `//pipelines/ [pipelineId] ` resource.
+ request to the `//pipelines/ [pipelineId]` resource.
:type id: string
:param id: The identifier of the pipeline to read.
@@ -377,7 +378,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
def read_preset(self, id):
"""
To get detailed information about a preset, send a GET request
- to the `//presets/ [presetId] ` resource.
+ to the `//presets/ [presetId]` resource.
:type id: string
:param id: The identifier of the preset for which you want to get
@@ -441,7 +442,7 @@ class ElasticTranscoderConnection(AWSAuthConnection):
:param id: The identifier of the pipeline for which you want to change
notification settings.
- :type notifications: structure
+ :type notifications: dict
:param notifications: The () topic that you want to notify to report job
status.
To receive notifications, you must also subscribe
diff --git a/boto/emr/connection.py b/boto/emr/connection.py
index 0f08bfea..95083abd 100644
--- a/boto/emr/connection.py
+++ b/boto/emr/connection.py
@@ -253,7 +253,7 @@ class EmrConnection(AWSQueryConnection):
:type hadoop_version: str
:param hadoop_version: Version of Hadoop to use. This no longer
- defaults to '0.20' and now uses the AMI default.
+ defaults to '0.20' and now uses the AMI default.
:type steps: list(boto.emr.Step)
:param steps: List of steps to add with the job
diff --git a/boto/exception.py b/boto/exception.py
index ebfd982e..9beee960 100644
--- a/boto/exception.py
+++ b/boto/exception.py
@@ -83,8 +83,8 @@ class BotoServerError(StandardError):
# then just ignore the error response.
if self.body:
try:
- h = handler.XmlHandler(self, self)
- xml.sax.parseString(self.body, h)
+ h = handler.XmlHandlerWrapper(self, self)
+ h.parseString(self.body)
except (TypeError, xml.sax.SAXParseException), pe:
# Remove unparsable message body so we don't include garbage
# in exception. But first, save self.body in self.error_message
diff --git a/boto/file/key.py b/boto/file/key.py
index d39c8c65..2f20cae5 100755
--- a/boto/file/key.py
+++ b/boto/file/key.py
@@ -37,8 +37,10 @@ class Key(object):
self.full_path = name
if name == '-':
self.name = None
+ self.size = None
else:
self.name = name
+ self.size = os.stat(name).st_size
self.key_type = key_type
if key_type == self.KEY_STREAM_READABLE:
self.fp = sys.stdin
@@ -68,9 +70,9 @@ class Key(object):
:type cb: int
:param num_cb: ignored in this subclass.
"""
- if self.key_type & self.KEY_STREAM_READABLE:
- raise BotoClientError('Stream is not Readable')
- elif self.key_type & self.KEY_STREAM_WRITABLE:
+ if self.key_type & self.KEY_STREAM_WRITABLE:
+ raise BotoClientError('Stream is not readable')
+ elif self.key_type & self.KEY_STREAM_READABLE:
key_file = self.fp
else:
key_file = open(self.full_path, 'rb')
@@ -114,9 +116,9 @@ class Key(object):
This is the same format returned by the compute_md5 method.
:param md5: ignored in this subclass.
"""
- if self.key_type & self.KEY_STREAM_WRITABLE:
+ if self.key_type & self.KEY_STREAM_READABLE:
raise BotoClientError('Stream is not writable')
- elif self.key_type & self.KEY_STREAM_READABLE:
+ elif self.key_type & self.KEY_STREAM_WRITABLE:
key_file = self.fp
else:
if not replace and os.path.exists(self.full_path):
@@ -127,6 +129,35 @@ class Key(object):
finally:
key_file.close()
+ def get_contents_to_file(self, fp, headers=None, cb=None, num_cb=None,
+ torrent=False, version_id=None,
+ res_download_handler=None, response_headers=None):
+ """
+ Copy contents from the current file to the file pointed to by 'fp'.
+
+ :type fp: File-like object
+ :param fp:
+
+ :type headers: dict
+ :param headers: Unused in this subclass.
+
+ :type cb: function
+ :param cb: Unused in this subclass.
+
+ :type cb: int
+ :param num_cb: Unused in this subclass.
+
+ :type torrent: bool
+ :param torrent: Unused in this subclass.
+
+ :type res_upload_handler: ResumableDownloadHandler
+ :param res_download_handler: Unused in this subclass.
+
+ :type response_headers: dict
+ :param response_headers: Unused in this subclass.
+ """
+ shutil.copyfileobj(self.fp, fp)
+
def get_contents_as_string(self, headers=None, cb=None, num_cb=10,
torrent=False):
"""
diff --git a/boto/fps/connection.py b/boto/fps/connection.py
index 3b9057e4..8f2aaee8 100644
--- a/boto/fps/connection.py
+++ b/boto/fps/connection.py
@@ -120,58 +120,65 @@ class FPSConnection(AWSQueryConnection):
'SenderTokenId', 'SettlementAmount.CurrencyCode'])
@api_action()
def settle_debt(self, action, response, **kw):
- """Allows a caller to initiate a transaction that atomically
- transfers money from a sender's payment instrument to the
- recipient, while decreasing corresponding debt balance.
+ """
+ Allows a caller to initiate a transaction that atomically transfers
+ money from a sender's payment instrument to the recipient, while
+ decreasing corresponding debt balance.
"""
return self.get_object(action, kw, response)
@requires(['TransactionId'])
@api_action()
def get_transaction_status(self, action, response, **kw):
- """Gets the latest status of a transaction.
+ """
+ Gets the latest status of a transaction.
"""
return self.get_object(action, kw, response)
@requires(['StartDate'])
@api_action()
def get_account_activity(self, action, response, **kw):
- """Returns transactions for a given date range.
+ """
+ Returns transactions for a given date range.
"""
return self.get_object(action, kw, response)
@requires(['TransactionId'])
@api_action()
def get_transaction(self, action, response, **kw):
- """Returns all details of a transaction.
+ """
+ Returns all details of a transaction.
"""
return self.get_object(action, kw, response)
@api_action()
def get_outstanding_debt_balance(self, action, response):
- """Returns the total outstanding balance for all the credit
- instruments for the given creditor account.
+ """
+ Returns the total outstanding balance for all the credit instruments
+ for the given creditor account.
"""
return self.get_object(action, {}, response)
@requires(['PrepaidInstrumentId'])
@api_action()
def get_prepaid_balance(self, action, response, **kw):
- """Returns the balance available on the given prepaid instrument.
+ """
+ Returns the balance available on the given prepaid instrument.
"""
return self.get_object(action, kw, response)
@api_action()
def get_total_prepaid_liability(self, action, response):
- """Returns the total liability held by the given account
- corresponding to all the prepaid instruments owned by the
- account.
+ """
+ Returns the total liability held by the given account corresponding to
+ all the prepaid instruments owned by the account.
"""
return self.get_object(action, {}, response)
@api_action()
def get_account_balance(self, action, response):
- """Returns the account balance for an account in real time.
+ """
+ Returns the account balance for an account in real time.
"""
return self.get_object(action, {}, response)
@@ -179,15 +186,17 @@ class FPSConnection(AWSQueryConnection):
@requires(['PaymentInstruction', 'TokenType'])
@api_action()
def install_payment_instruction(self, action, response, **kw):
- """Installs a payment instruction for caller.
+ """
+ Installs a payment instruction for caller.
"""
return self.get_object(action, kw, response)
@needs_caller_reference
@requires(['returnURL', 'pipelineName'])
def cbui_url(self, **kw):
- """Generate a signed URL for the Co-Branded service API given
- arguments as payload.
+ """
+ Generate a signed URL for the Co-Branded service API given arguments as
+ payload.
"""
sandbox = 'sandbox' in self.host and 'payments-sandbox' or 'payments'
endpoint = 'authorize.{0}.amazon.com'.format(sandbox)
@@ -220,9 +229,10 @@ class FPSConnection(AWSQueryConnection):
'TransactionAmount.CurrencyCode'])
@api_action()
def reserve(self, action, response, **kw):
- """Reserve API is part of the Reserve and Settle API conjunction
- that serve the purpose of a pay where the authorization and
- settlement have a timing difference.
+ """
+ Reserve API is part of the Reserve and Settle API conjunction that
+ serve the purpose of a pay where the authorization and settlement have
+ a timing difference.
"""
return self.get_object(action, kw, response)
@@ -232,15 +242,16 @@ class FPSConnection(AWSQueryConnection):
'TransactionAmount.CurrencyCode'])
@api_action()
def pay(self, action, response, **kw):
- """Allows calling applications to move money from a sender to
- a recipient.
+ """
+ Allows calling applications to move money from a sender to a recipient.
"""
return self.get_object(action, kw, response)
@requires(['TransactionId'])
@api_action()
def cancel(self, action, response, **kw):
- """Cancels an ongoing transaction and puts it in cancelled state.
+ """
+ Cancels an ongoing transaction and puts it in cancelled state.
"""
return self.get_object(action, kw, response)
@@ -249,8 +260,9 @@ class FPSConnection(AWSQueryConnection):
'TransactionAmount.CurrencyCode'])
@api_action()
def settle(self, action, response, **kw):
- """The Settle API is used in conjunction with the Reserve API and
- is used to settle previously reserved transaction.
+ """
+ The Settle API is used in conjunction with the Reserve API and is used
+ to settle previously reserved transaction.
"""
return self.get_object(action, kw, response)
@@ -259,50 +271,57 @@ class FPSConnection(AWSQueryConnection):
'CallerReference', 'RefundAmount.CurrencyCode'])
@api_action()
def refund(self, action, response, **kw):
- """Refunds a previously completed transaction.
+ """
+ Refunds a previously completed transaction.
"""
return self.get_object(action, kw, response)
@requires(['RecipientTokenId'])
@api_action()
def get_recipient_verification_status(self, action, response, **kw):
- """Returns the recipient status.
+ """
+ Returns the recipient status.
"""
return self.get_object(action, kw, response)
@requires(['CallerReference'], ['TokenId'])
@api_action()
def get_token_by_caller(self, action, response, **kw):
- """Returns the details of a particular token installed by this
- calling application using the subway co-branded UI.
+ """
+ Returns the details of a particular token installed by this calling
+ application using the subway co-branded UI.
"""
return self.get_object(action, kw, response)
@requires(['UrlEndPoint', 'HttpParameters'])
@api_action()
def verify_signature(self, action, response, **kw):
- """Verify the signature that FPS sent in IPN or callback urls.
+ """
+ Verify the signature that FPS sent in IPN or callback urls.
"""
return self.get_object(action, kw, response)
@api_action()
def get_tokens(self, action, response, **kw):
- """Returns a list of tokens installed on the given account.
+ """
+ Returns a list of tokens installed on the given account.
"""
return self.get_object(action, kw, response)
@requires(['TokenId'])
@api_action()
def get_token_usage(self, action, response, **kw):
- """Returns the usage of a token.
+ """
+ Returns the usage of a token.
"""
return self.get_object(action, kw, response)
@requires(['TokenId'])
@api_action()
def cancel_token(self, action, response, **kw):
- """Cancels any token installed by the calling application on
- its own account.
+ """
+ Cancels any token installed by the calling application on its own
+ account.
"""
return self.get_object(action, kw, response)
@@ -312,14 +331,16 @@ class FPSConnection(AWSQueryConnection):
'SenderTokenId', 'FundingAmount.CurrencyCode'])
@api_action()
def fund_prepaid(self, action, response, **kw):
- """Funds the prepaid balance on the given prepaid instrument.
+ """
+ Funds the prepaid balance on the given prepaid instrument.
"""
return self.get_object(action, kw, response)
@requires(['CreditInstrumentId'])
@api_action()
def get_debt_balance(self, action, response, **kw):
- """Returns the balance corresponding to the given credit instrument.
+ """
+ Returns the balance corresponding to the given credit instrument.
"""
return self.get_object(action, kw, response)
@@ -329,22 +350,25 @@ class FPSConnection(AWSQueryConnection):
'AdjustmentAmount.CurrencyCode'])
@api_action()
def write_off_debt(self, action, response, **kw):
- """Allows a creditor to write off the debt balance accumulated
- partially or fully at any time.
+ """
+ Allows a creditor to write off the debt balance accumulated partially
+ or fully at any time.
"""
return self.get_object(action, kw, response)
@requires(['SubscriptionId'])
@api_action()
def get_transactions_for_subscription(self, action, response, **kw):
- """Returns the transactions for a given subscriptionID.
+ """
+ Returns the transactions for a given subscriptionID.
"""
return self.get_object(action, kw, response)
@requires(['SubscriptionId'])
@api_action()
def get_subscription_details(self, action, response, **kw):
- """Returns the details of Subscription for a given subscriptionID.
+ """
+ Returns the details of Subscription for a given subscriptionID.
"""
return self.get_object(action, kw, response)
@@ -353,7 +377,8 @@ class FPSConnection(AWSQueryConnection):
@requires(['SubscriptionId'])
@api_action()
def cancel_subscription_and_refund(self, action, response, **kw):
- """Cancels a subscription.
+ """
+ Cancels a subscription.
"""
message = "If you specify a RefundAmount, " \
"you must specify CallerReference."
@@ -364,6 +389,7 @@ class FPSConnection(AWSQueryConnection):
@requires(['TokenId'])
@api_action()
def get_payment_instruction(self, action, response, **kw):
- """Gets the payment instruction of a token.
+ """
+ Gets the payment instruction of a token.
"""
return self.get_object(action, kw, response)
diff --git a/boto/glacier/concurrent.py b/boto/glacier/concurrent.py
index a956f066..af727ec2 100644
--- a/boto/glacier/concurrent.py
+++ b/boto/glacier/concurrent.py
@@ -96,6 +96,11 @@ class ConcurrentUploader(ConcurrentTransferer):
the archive parts. The part size must be a megabyte multiplied by
a power of two.
+ :type num_threads: int
+ :param num_threads: The number of threads to spawn for the thread pool.
+ The number of threads will control how much parts are being
+ concurrently uploaded.
+
"""
super(ConcurrentUploader, self).__init__(part_size, num_threads)
self._api = api
@@ -194,13 +199,18 @@ class TransferThread(threading.Thread):
except Empty:
continue
if work is _END_SENTINEL:
+ self._cleanup()
return
result = self._process_chunk(work)
self._result_queue.put(result)
+ self._cleanup()
def _process_chunk(self, work):
pass
+ def _cleanup(self):
+ pass
+
class UploadWorkerThread(TransferThread):
def __init__(self, api, vault_name, filename, upload_id,
@@ -219,14 +229,16 @@ class UploadWorkerThread(TransferThread):
def _process_chunk(self, work):
result = None
- for _ in xrange(self._num_retries):
+ for i in xrange(self._num_retries + 1):
try:
result = self._upload_chunk(work)
break
except self._retry_exceptions, e:
log.error("Exception caught uploading part number %s for "
- "vault %s, filename: %s", work[0], self._vault_name,
- self._filename)
+ "vault %s, attempt: (%s / %s), filename: %s, "
+ "exception: %s, msg: %s",
+ work[0], self._vault_name, i + 1, self._num_retries + 1,
+ self._filename, e.__class__, e)
time.sleep(self._time_between_retries)
result = e
return result
@@ -248,6 +260,9 @@ class UploadWorkerThread(TransferThread):
response.read()
return (part_number, tree_hash_bytes)
+ def _cleanup(self):
+ self._fileobj.close()
+
class ConcurrentDownloader(ConcurrentTransferer):
"""
diff --git a/boto/glacier/vault.py b/boto/glacier/vault.py
index e037adc7..6e47bf76 100644
--- a/boto/glacier/vault.py
+++ b/boto/glacier/vault.py
@@ -237,7 +237,8 @@ class Vault(object):
return resume_file_upload(
self, upload_id, part_size, file_obj, part_hash_map)
- def concurrent_create_archive_from_file(self, filename, description):
+ def concurrent_create_archive_from_file(self, filename, description,
+ **kwargs):
"""
Create a new archive from a file and upload the given
file.
@@ -250,6 +251,12 @@ class Vault(object):
:type filename: str
:param filename: A filename to upload
+ :param kwargs: Additional kwargs to pass through to
+ :py:class:`boto.glacier.concurrent.ConcurrentUploader`.
+ You can pass any argument besides the ``api`` and
+ ``vault_name`` param (these arguments are already
+ passed to the ``ConcurrentUploader`` for you).
+
:raises: `boto.glacier.exception.UploadArchiveError` is an error
occurs during the upload process.
@@ -257,7 +264,7 @@ class Vault(object):
:return: The archive id of the newly created archive
"""
- uploader = ConcurrentUploader(self.layer1, self.name)
+ uploader = ConcurrentUploader(self.layer1, self.name, **kwargs)
archive_id = uploader.upload(filename, description)
return archive_id
diff --git a/boto/gs/acl.py b/boto/gs/acl.py
index 047254cf..57bdce1c 100755
--- a/boto/gs/acl.py
+++ b/boto/gs/acl.py
@@ -51,11 +51,12 @@ CannedACLStrings = ['private', 'public-read', 'project-private',
SupportedPermissions = ['READ', 'WRITE', 'FULL_CONTROL']
"""A list of supported ACL permissions."""
-class ACL:
+
+class ACL(object):
def __init__(self, parent=None):
self.parent = parent
- self.entries = []
+ self.entries = Entries(self)
@property
def acl(self):
@@ -125,7 +126,7 @@ class ACL:
return s
-class Entries:
+class Entries(object):
def __init__(self, parent=None):
self.parent = parent
@@ -154,15 +155,17 @@ class Entries:
setattr(self, name, value)
def to_xml(self):
+ if not self.entry_list:
+ return ''
s = '<%s>' % ENTRIES
for entry in self.entry_list:
s += entry.to_xml()
s += '</%s>' % ENTRIES
return s
-
+
# Class that represents a single (Scope, Permission) entry in an ACL.
-class Entry:
+class Entry(object):
def __init__(self, scope=None, type=None, id=None, name=None,
email_address=None, domain=None, permission=None):
@@ -219,7 +222,8 @@ class Entry:
s += '</%s>' % ENTRY
return s
-class Scope:
+
+class Scope(object):
# Map from Scope type.lower() to lower-cased list of allowed sub-elems.
ALLOWED_SCOPE_TYPE_SUB_ELEMS = {
diff --git a/boto/gs/bucket.py b/boto/gs/bucket.py
index 57850a20..96c2bdc3 100644
--- a/boto/gs/bucket.py
+++ b/boto/gs/bucket.py
@@ -33,6 +33,7 @@ from boto.gs.cors import Cors
from boto.gs.key import Key as GSKey
from boto.s3.acl import Policy
from boto.s3.bucket import Bucket as S3Bucket
+from boto.utils import get_utf8_value
# constants for http query args
DEF_OBJ_ACL = 'defaultObjectAcl'
@@ -80,6 +81,9 @@ class Bucket(S3Bucket):
with the stored object in the response. See
http://goo.gl/06N3b for details.
+ :type version_id: string
+ :param version_id: Unused in this subclass.
+
:type generation: int
:param generation: A specific generation number to fetch the key at. If
not specified, the latest generation is fetched.
@@ -123,6 +127,9 @@ class Bucket(S3Bucket):
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
+ :type version_id: string
+ :param version_id: Unused in this subclass.
+
:type storage_class: string
:param storage_class: The storage class of the new key. By
default, the new key will use the standard storage class.
@@ -156,12 +163,10 @@ class Bucket(S3Bucket):
if src_generation:
headers = headers or {}
headers['x-goog-copy-source-generation'] = str(src_generation)
- super(Bucket, self).copy_key(new_key_name, src_bucket_name,
- src_key_name, metadata=metadata,
- storage_class=storage_class,
- preserve_acl=preserve_acl,
- encrypt_key=encrypt_key, headers=headers,
- query_args=query_args)
+ return super(Bucket, self).copy_key(
+ new_key_name, src_bucket_name, src_key_name, metadata=metadata,
+ storage_class=storage_class, preserve_acl=preserve_acl,
+ encrypt_key=encrypt_key, headers=headers, query_args=query_args)
def list_versions(self, prefix='', delimiter='', marker='',
generation_marker='', headers=None):
@@ -215,6 +220,12 @@ class Bucket(S3Bucket):
:type headers: dict
:param headers: A dictionary of header name/value pairs.
+ :type version_id: string
+ :param version_id: Unused in this subclass.
+
+ :type mfa_token: tuple or list of strings
+ :param mfa_token: Unused in this subclass.
+
:type generation: int
:param generation: The generation number of the key to delete. If not
specified, the latest generation number will be deleted.
@@ -245,6 +256,9 @@ class Bucket(S3Bucket):
:type headers: dict
:param headers: Additional headers to set during the request.
+ :type version_id: string
+ :param version_id: Unused in this subclass.
+
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
@@ -263,7 +277,8 @@ class Bucket(S3Bucket):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
self.set_xml_acl(acl_or_str.to_xml(), key_name, headers=headers,
- generation=generation, if_generation=if_generation,
+ generation=generation,
+ if_generation=if_generation,
if_metageneration=if_metageneration)
else:
self.set_canned_acl(acl_or_str, key_name, headers=headers,
@@ -316,6 +331,9 @@ class Bucket(S3Bucket):
:param dict headers: Additional headers to set during the request.
+ :type version_id: string
+ :param version_id: Unused in this subclass.
+
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned. This parameter is only valid when retrieving
@@ -325,11 +343,11 @@ class Bucket(S3Bucket):
"""
query_args = STANDARD_ACL
if generation:
- query_args += '&generation=%s' % str(generation)
+ query_args += '&generation=%s' % generation
return self._get_acl_helper(key_name, headers, query_args)
def get_xml_acl(self, key_name='', headers=None, version_id=None,
- generation=None):
+ generation=None):
"""Returns the ACL string of the bucket or an object in the bucket.
:param str key_name: The name of the object to get the ACL for. If not
@@ -337,6 +355,9 @@ class Bucket(S3Bucket):
:param dict headers: Additional headers to set during the request.
+ :type version_id: string
+ :param version_id: Unused in this subclass.
+
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned. This parameter is only valid when retrieving
@@ -346,7 +367,7 @@ class Bucket(S3Bucket):
"""
query_args = STANDARD_ACL
if generation:
- query_args += '&generation=%s' % str(generation)
+ query_args += '&generation=%s' % generation
return self._get_xml_acl_helper(key_name, headers, query_args)
def get_def_acl(self, headers=None):
@@ -370,14 +391,14 @@ class Bucket(S3Bucket):
if canned:
headers[self.connection.provider.acl_header] = acl_or_str
else:
- data = acl_or_str.encode('UTF-8')
+ data = acl_or_str
if generation:
- query_args += '&generation=%s' % str(generation)
+ query_args += '&generation=%s' % generation
if if_metageneration is not None and if_generation is None:
raise ValueError("Received if_metageneration argument with no "
- "if_generation argument. A meta-generation has no "
+ "if_generation argument. A metageneration has no "
"meaning without a content generation.")
if not key_name and (if_generation or if_metageneration):
raise ValueError("Received if_generation or if_metageneration "
@@ -387,8 +408,9 @@ class Bucket(S3Bucket):
if if_metageneration is not None:
headers['x-goog-if-metageneration-match'] = str(if_metageneration)
- response = self.connection.make_request('PUT', self.name, key_name,
- data=data, headers=headers, query_args=query_args)
+ response = self.connection.make_request(
+ 'PUT', get_utf8_value(self.name), get_utf8_value(key_name),
+ data=get_utf8_value(data), headers=headers, query_args=query_args)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
@@ -409,6 +431,9 @@ class Bucket(S3Bucket):
:type headers: dict
:param headers: Additional headers to set during the request.
+ :type version_id: string
+ :param version_id: Unused in this subclass.
+
:type query_args: str
:param query_args: The query parameters to pass with the request.
@@ -448,6 +473,9 @@ class Bucket(S3Bucket):
:type headers: dict
:param headers: Additional headers to set during the request.
+ :type version_id: string
+ :param version_id: Unused in this subclass.
+
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
@@ -484,8 +512,9 @@ class Bucket(S3Bucket):
raise ValueError("Provided canned ACL string (%s) is not valid."
% acl_str)
query_args = DEF_OBJ_ACL
- return self._set_acl_helper(acl_str, '', headers, query_args, None,
- None, None, canned=True)
+ return self._set_acl_helper(acl_str, '', headers, query_args,
+ generation=None, if_generation=None,
+ if_metageneration=None, canned=True)
def set_def_xml_acl(self, acl_str, headers=None):
"""Sets a bucket's default ACL to an XML string.
@@ -525,11 +554,9 @@ class Bucket(S3Bucket):
:param str cors: A string containing the CORS XML.
:param dict headers: Additional headers to send with the request.
"""
- cors_xml = cors.encode('UTF-8')
- response = self.connection.make_request('PUT', self.name,
- data=cors_xml,
- query_args=CORS_ARG,
- headers=headers)
+ response = self.connection.make_request(
+ 'PUT', get_utf8_value(self.name), data=get_utf8_value(cors),
+ query_args=CORS_ARG, headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
@@ -594,7 +621,8 @@ class Bucket(S3Bucket):
# Method with same signature as boto.s3.bucket.Bucket.add_user_grant(),
# to allow polymorphic treatment at application layer.
- def add_user_grant(self, permission, user_id, recursive=False, headers=None):
+ def add_user_grant(self, permission, user_id, recursive=False,
+ headers=None):
"""
Convenience method that provides a quick way to add a canonical user
grant to a bucket. This method retrieves the current ACL, creates a new
@@ -709,6 +737,56 @@ class Bucket(S3Bucket):
self.set_subresource('logging', xml_str, headers=headers)
+ def get_logging_config_with_xml(self, headers=None):
+ """Returns the current status of logging configuration on the bucket as
+ unparsed XML.
+
+ :param dict headers: Additional headers to send with the request.
+
+ :rtype: 2-Tuple
+ :returns: 2-tuple containing:
+
+ 1) A dictionary containing the parsed XML response from GCS. The
+ overall structure is:
+
+ * Logging
+
+ * LogObjectPrefix: Prefix that is prepended to log objects.
+ * LogBucket: Target bucket for log objects.
+
+ 2) Unparsed XML describing the bucket's logging configuration.
+ """
+ response = self.connection.make_request('GET', self.name,
+ query_args='logging',
+ headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+
+ if response.status != 200:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e, body
+
+ def get_logging_config(self, headers=None):
+ """Returns the current status of logging configuration on the bucket.
+
+ :param dict headers: Additional headers to send with the request.
+
+ :rtype: dict
+ :returns: A dictionary containing the parsed XML response from GCS. The
+ overall structure is:
+
+ * Logging
+
+ * LogObjectPrefix: Prefix that is prepended to log objects.
+ * LogBucket: Target bucket for log objects.
+ """
+ return self.get_logging_config_with_xml(headers)[0]
+
def configure_website(self, main_page_suffix=None, error_key=None,
headers=None):
"""Configure this bucket to act as a website
@@ -739,9 +817,9 @@ class Bucket(S3Bucket):
error_frag = ''
body = self.WebsiteBody % (main_page_frag, error_frag)
- response = self.connection.make_request('PUT', self.name, data=body,
- query_args='websiteConfig',
- headers=headers)
+ response = self.connection.make_request(
+ 'PUT', get_utf8_value(self.name), data=get_utf8_value(body),
+ query_args='websiteConfig', headers=headers)
body = response.read()
if response.status == 200:
return True
@@ -755,8 +833,8 @@ class Bucket(S3Bucket):
:param dict headers: Additional headers to send with the request.
:rtype: dict
- :returns: A dictionary containing a Python representation
- of the XML response from GCS. The overall structure is:
+ :returns: A dictionary containing the parsed XML response from GCS. The
+ overall structure is:
* WebsiteConfiguration
@@ -765,7 +843,7 @@ class Bucket(S3Bucket):
* NotFoundPage: name of an object to serve when site visitors
encounter a 404.
"""
- return self.get_website_configuration_xml(self, headers)[0]
+ return self.get_website_configuration_with_xml(headers)[0]
def get_website_configuration_with_xml(self, headers=None):
"""Returns the current status of website configuration on the bucket as
@@ -776,8 +854,8 @@ class Bucket(S3Bucket):
:rtype: 2-Tuple
:returns: 2-tuple containing:
- 1) A dictionary containing a Python representation of the XML
- response from GCS. The overall structure is:
+ 1) A dictionary containing the parsed XML response from GCS. The
+ overall structure is:
* WebsiteConfiguration
diff --git a/boto/gs/connection.py b/boto/gs/connection.py
index 6d5e00af..e7f2aeb6 100755
--- a/boto/gs/connection.py
+++ b/boto/gs/connection.py
@@ -23,6 +23,7 @@ from boto.gs.bucket import Bucket
from boto.s3.connection import S3Connection
from boto.s3.connection import SubdomainCallingFormat
from boto.s3.connection import check_lowercase_bucketname
+from boto.utils import get_utf8_value
class Location:
DEFAULT = 'US'
@@ -50,7 +51,7 @@ class GSConnection(S3Connection):
storage_class='STANDARD'):
"""
Creates a new bucket. By default it's located in the USA. You can
- pass Location.EU to create an European bucket. You can also pass
+ pass Location.EU to create bucket in the EU. You can also pass
a LocationConstraint for where the bucket should be located, and
a StorageClass describing how the data should be stored.
@@ -89,8 +90,9 @@ class GSConnection(S3Connection):
storage_class_elem = ''
data = ('<CreateBucketConfiguration>%s%s</CreateBucketConfiguration>'
% (location_elem, storage_class_elem))
- response = self.make_request('PUT', bucket_name, headers=headers,
- data=data)
+ response = self.make_request(
+ 'PUT', get_utf8_value(bucket_name), headers=headers,
+ data=get_utf8_value(data))
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
diff --git a/boto/gs/key.py b/boto/gs/key.py
index 1c45c0b5..7261b49f 100644
--- a/boto/gs/key.py
+++ b/boto/gs/key.py
@@ -27,6 +27,8 @@ import StringIO
from boto.exception import BotoClientError
from boto.s3.key import Key as S3Key
from boto.s3.keyfile import KeyFile
+from boto.utils import compute_hash
+from boto.utils import get_utf8_value
class Key(S3Key):
"""
@@ -47,17 +49,34 @@ class Key(S3Key):
:ivar last_modified: The string timestamp representing the last
time this object was modified in GS.
:ivar owner: The ID of the owner of this object.
- :ivar storage_class: The storage class of the object. Currently, one of:
+ :ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | DURABLE_REDUCED_AVAILABILITY.
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar generation: The generation number of the object.
- :ivar meta_generation: The generation number of the object metadata.
+ :ivar metageneration: The generation number of the object metadata.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
+ :ivar cloud_hashes: Dictionary of checksums as supplied by the storage
+ provider.
"""
- generation = None
- meta_generation = None
+
+ def __init__(self, bucket=None, name=None, generation=None):
+ super(Key, self).__init__(bucket=bucket, name=name)
+ self.generation = generation
+ self.meta_generation = None
+ self.cloud_hashes = {}
+ self.component_count = None
+
+ def __repr__(self):
+ if self.generation and self.metageneration:
+ ver_str = '#%s.%s' % (self.generation, self.metageneration)
+ else:
+ ver_str = ''
+ if self.bucket:
+ return '<Key: %s,%s%s>' % (self.bucket.name, self.name, ver_str)
+ else:
+ return '<Key: None,%s%s>' % (self.name, ver_str)
def endElement(self, name, value, connection):
if name == 'Key':
@@ -82,25 +101,172 @@ class Key(S3Key):
elif name == 'Generation':
self.generation = value
elif name == 'MetaGeneration':
- self.meta_generation = value
+ self.metageneration = value
else:
setattr(self, name, value)
def handle_version_headers(self, resp, force=False):
- self.meta_generation = resp.getheader('x-goog-metageneration', None)
+ self.metageneration = resp.getheader('x-goog-metageneration', None)
self.generation = resp.getheader('x-goog-generation', None)
+ def handle_addl_headers(self, headers):
+ for key, value in headers:
+ if key == 'x-goog-hash':
+ for hash_pair in value.split(','):
+ alg, b64_digest = hash_pair.strip().split('=', 1)
+ self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest)
+ elif key == 'x-goog-component-count':
+ self.component_count = int(value)
+
+
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
- response_headers=None):
+ response_headers=None, hash_algs=None):
query_args = None
if self.generation:
query_args = ['generation=%s' % self.generation]
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
override_num_retries=override_num_retries,
response_headers=response_headers,
+ hash_algs=hash_algs,
query_args=query_args)
+ def get_contents_to_file(self, fp, headers=None,
+ cb=None, num_cb=10,
+ torrent=False,
+ version_id=None,
+ res_download_handler=None,
+ response_headers=None,
+ hash_algs=None):
+ """
+ Retrieve an object from GCS using the name of the Key object as the
+ key in GCS. Write the contents of the object to the file pointed
+ to by 'fp'.
+
+ :type fp: File -like object
+ :param fp:
+
+ :type headers: dict
+ :param headers: additional HTTP headers that will be sent with
+ the GET request.
+
+ :type cb: function
+ :param cb: a callback function that will be called to report
+ progress on the upload. The callback should accept two
+ integer parameters, the first representing the number of
+ bytes that have been successfully transmitted to GCS and
+ the second representing the size of the to be transmitted
+ object.
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the granularity of
+ the callback by defining the maximum number of times the
+ callback will be called during the file transfer.
+
+ :type torrent: bool
+ :param torrent: If True, returns the contents of a torrent
+ file as a string.
+
+ :type res_upload_handler: ResumableDownloadHandler
+ :param res_download_handler: If provided, this handler will
+ perform the download.
+
+ :type response_headers: dict
+ :param response_headers: A dictionary containing HTTP
+ headers/values that will override any headers associated
+ with the stored object in the response. See
+ http://goo.gl/sMkcC for details.
+ """
+ if self.bucket != None:
+ if res_download_handler:
+ res_download_handler.get_file(self, fp, headers, cb, num_cb,
+ torrent=torrent,
+ version_id=version_id,
+ hash_algs=hash_algs)
+ else:
+ self.get_file(fp, headers, cb, num_cb, torrent=torrent,
+ version_id=version_id,
+ response_headers=response_headers,
+ hash_algs=hash_algs)
+
+ def compute_hash(self, fp, algorithm, size=None):
+ """
+ :type fp: file
+ :param fp: File pointer to the file to hash. The file
+ pointer will be reset to the same position before the
+ method returns.
+
+ :type algorithm: zero-argument constructor for hash objects that
+ implements update() and digest() (e.g. hashlib.md5)
+
+ :type size: int
+ :param size: (optional) The Maximum number of bytes to read
+ from the file pointer (fp). This is useful when uploading
+ a file in multiple parts where the file is being split
+ in place into different parts. Less bytes may be available.
+ """
+ hex_digest, b64_digest, data_size = compute_hash(
+ fp, size=size, hash_algorithm=algorithm)
+ # The internal implementation of compute_hash() needs to return the
+ # data size, but we don't want to return that value to the external
+ # caller because it changes the class interface (i.e. it might
+ # break some code), so we consume the third tuple value here and
+ # return the remainder of the tuple to the caller, thereby preserving
+ # the existing interface.
+ self.size = data_size
+ return (hex_digest, b64_digest)
+
+ def send_file(self, fp, headers=None, cb=None, num_cb=10,
+ query_args=None, chunked_transfer=False, size=None,
+ hash_algs=None):
+ """
+ Upload a file to GCS.
+
+ :type fp: file
+ :param fp: The file pointer to upload. The file pointer must
+ point point at the offset from which you wish to upload.
+ ie. if uploading the full file, it should point at the
+ start of the file. Normally when a file is opened for
+ reading, the fp will point at the first byte. See the
+ bytes parameter below for more info.
+
+ :type headers: dict
+ :param headers: The headers to pass along with the PUT request
+
+ :type num_cb: int
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the granularity of
+ the callback by defining the maximum number of times the
+ callback will be called during the file
+ transfer. Providing a negative integer will cause your
+ callback to be called with each buffer read.
+
+ :type query_args: string
+ :param query_args: Arguments to pass in the query string.
+
+ :type chunked_transfer: boolean
+ :param chunked_transfer: (optional) If true, we use chunked
+ Transfer-Encoding.
+
+ :type size: int
+ :param size: (optional) The Maximum number of bytes to read
+ from the file pointer (fp). This is useful when uploading
+ a file in multiple parts where you are splitting the file
+ up into different ranges to be uploaded. If not specified,
+ the default behaviour is to read all bytes from the file
+ pointer. Less bytes may be available.
+
+ :type hash_algs: dictionary
+ :param hash_algs: (optional) Dictionary of hash algorithms and
+ corresponding hashing class that implements update() and digest().
+ Defaults to {'md5': hashlib.md5}.
+ """
+ self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
+ query_args=query_args,
+ chunked_transfer=chunked_transfer, size=size,
+ hash_algs=hash_algs)
+
def delete(self):
return self.bucket.delete_key(self.name, version_id=self.version_id,
generation=self.generation)
@@ -279,7 +445,8 @@ class Key(S3Key):
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
- raise BotoClientError('"size" param not supported for resumable uploads.')
+ raise BotoClientError(
+ '"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
@@ -421,22 +588,21 @@ class Key(S3Key):
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
- # Clear out any previously computed md5 hashes, since we are setting the content.
- self.md5 = None
- self.base64md5 = None
+ # Clear out any previously computed hashes, since we are setting the
+ # content.
+ self.local_hashes = {}
- fp = open(filename, 'rb')
- self.set_contents_from_file(fp, headers, replace, cb, num_cb,
- policy, md5, res_upload_handler,
- if_generation=if_generation)
- fp.close()
+ with open(filename, 'rb') as fp:
+ self.set_contents_from_file(fp, headers, replace, cb, num_cb,
+ policy, md5, res_upload_handler,
+ if_generation=if_generation)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
if_generation=None):
"""
- Store an object in S3 using the name of the Key object as the
- key in S3 and the string 's' as the contents.
+ Store an object in GCS using the name of the Key object as the
+ key in GCS and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
@@ -450,10 +616,10 @@ class Key(S3Key):
:type cb: function
:param cb: a callback function that will be called to report
- progress on the upload. The callback should accept
+ progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
- transmitted to S3 and the second representing the
+ transmitted to GCS and the second representing the
size of the to be transmitted object.
:type cb: int
@@ -463,19 +629,19 @@ class Key(S3Key):
the maximum number of times the callback will
be called during the file transfer.
- :type policy: :class:`boto.s3.acl.CannedACLStrings`
+ :type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
- new key in S3.
+ new key in GCS.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
- second element. This is the same format returned by
+ second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
- of the file. Otherwise, the checksum will be computed.
+ of the file. Otherwise, the checksum will be computed.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
@@ -488,9 +654,7 @@ class Key(S3Key):
self.md5 = None
self.base64md5 = None
- if isinstance(s, unicode):
- s = s.encode("utf-8")
- fp = StringIO.StringIO(s)
+ fp = StringIO.StringIO(get_utf8_value(s))
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5,
if_generation=if_generation)
@@ -540,12 +704,6 @@ class Key(S3Key):
:param policy: A canned ACL policy that will be applied to the new key
in GS.
- :type reduced_redundancy: bool
- :param reduced_redundancy: If True, this will set the storage
- class of the new Key to be REDUCED_REDUNDANCY. The Reduced
- Redundancy Storage (RRS) feature of S3, provides lower
- redundancy at lower storage cost.
-
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
@@ -565,7 +723,7 @@ class Key(S3Key):
headers = kwargs.get('headers', {})
headers['x-goog-if-generation-match'] = str(if_generation)
kwargs['headers'] = headers
- return super(Key, self).set_contents_from_stream(*args, **kwargs)
+ super(Key, self).set_contents_from_stream(*args, **kwargs)
def set_acl(self, acl_or_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
@@ -659,7 +817,7 @@ class Key(S3Key):
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, headers=None, generation=None,
- if_generation=None, if_metageneration=None):
+ if_generation=None, if_metageneration=None):
"""Sets this objects's ACL using a predefined (canned) value.
:type acl_str: string
@@ -692,3 +850,42 @@ class Key(S3Key):
if_generation=if_generation,
if_metageneration=if_metageneration
)
+
+ def compose(self, components, content_type=None, headers=None):
+ """Create a new object from a sequence of existing objects.
+
+ The content of the object representing this Key will be the
+ concatenation of the given object sequence. For more detail, visit
+
+ https://developers.google.com/storage/docs/composite-objects
+
+ :type components list of Keys
+ :param components List of gs.Keys representing the component objects
+
+ :type content_type (optional) string
+ :param content_type Content type for the new composite object.
+ """
+ compose_req = []
+ for key in components:
+ if key.bucket.name != self.bucket.name:
+ raise BotoClientError(
+ 'GCS does not support inter-bucket composing')
+
+ generation_tag = ''
+ if key.generation:
+ generation_tag = ('<Generation>%s</Generation>'
+ % str(key.generation))
+ compose_req.append('<Component><Name>%s</Name>%s</Component>' %
+ (key.name, generation_tag))
+ compose_req_xml = ('<ComposeRequest>%s</ComposeRequest>' %
+ ''.join(compose_req))
+ headers = headers or {}
+ if content_type:
+ headers['Content-Type'] = content_type
+ resp = self.bucket.connection.make_request(
+ 'PUT', get_utf8_value(self.bucket.name), get_utf8_value(self.name),
+ headers=headers, query_args='compose',
+ data=get_utf8_value(compose_req_xml))
+ if resp.status < 200 or resp.status > 299:
+ raise self.bucket.connection.provider.storage_response_error(
+ resp.status, resp.reason, resp.read())
diff --git a/boto/gs/resumable_upload_handler.py b/boto/gs/resumable_upload_handler.py
index b9e41b01..b2ec8e8c 100644
--- a/boto/gs/resumable_upload_handler.py
+++ b/boto/gs/resumable_upload_handler.py
@@ -19,7 +19,6 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-import cgi
import errno
import httplib
import os
@@ -28,8 +27,7 @@ import re
import socket
import time
import urlparse
-import boto
-from boto import config
+from boto import config, UserAgent
from boto.connection import AWSAuthConnection
from boto.exception import InvalidUriError
from boto.exception import ResumableTransferDisposition
@@ -306,12 +304,12 @@ class ResumableUploadHandler(object):
self._save_tracker_uri_to_file()
def _upload_file_bytes(self, conn, http_conn, fp, file_length,
- total_bytes_uploaded, cb, num_cb, md5sum):
+ total_bytes_uploaded, cb, num_cb, headers):
"""
Makes one attempt to upload file bytes, using an existing resumable
upload connection.
- Returns etag from server upon success.
+ Returns (etag, generation, metageneration) from server upon success.
Raises ResumableUploadException if any problems occur.
"""
@@ -332,7 +330,10 @@ class ResumableUploadHandler(object):
# Content-Range header if the file is 0 bytes long, because the
# resumable upload protocol uses an *inclusive* end-range (so, sending
# 'bytes 0-0/1' would actually mean you're sending a 1-byte file).
- put_headers = {}
+ if not headers:
+ put_headers = {}
+ else:
+ put_headers = headers.copy()
if file_length:
if total_bytes_uploaded == file_length:
range_header = self._build_content_range_header(
@@ -357,7 +358,8 @@ class ResumableUploadHandler(object):
http_conn.set_debuglevel(0)
while buf:
http_conn.send(buf)
- md5sum.update(buf)
+ for alg in self.digesters:
+ self.digesters[alg].update(buf)
total_bytes_uploaded += len(buf)
if cb:
i += 1
@@ -365,6 +367,7 @@ class ResumableUploadHandler(object):
cb(total_bytes_uploaded, file_length)
i = 0
buf = fp.read(self.BUFFER_SIZE)
+ http_conn.set_debuglevel(conn.debug)
if cb:
cb(total_bytes_uploaded, file_length)
if total_bytes_uploaded != file_length:
@@ -376,12 +379,14 @@ class ResumableUploadHandler(object):
(total_bytes_uploaded, file_length),
ResumableTransferDisposition.ABORT)
resp = http_conn.getresponse()
- body = resp.read()
# Restore http connection debug level.
http_conn.set_debuglevel(conn.debug)
if resp.status == 200:
- return resp.getheader('etag') # Success
+ # Success.
+ return (resp.getheader('etag'),
+ resp.getheader('x-goog-generation'),
+ resp.getheader('x-goog-metageneration'))
# Retry timeout (408) and status 500 and 503 errors after a delay.
elif resp.status in [408, 500, 503]:
disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY
@@ -393,11 +398,11 @@ class ResumableUploadHandler(object):
(resp.status, resp.reason), disposition)
def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,
- num_cb, md5sum):
+ num_cb):
"""
Attempts a resumable upload.
- Returns etag from server upon success.
+ Returns (etag, generation, metageneration) from server upon success.
Raises ResumableUploadException if any problems occur.
"""
@@ -412,9 +417,9 @@ class ResumableUploadHandler(object):
if server_end:
# If the server already has some of the content, we need to
- # update the md5 with the bytes that have already been
+ # update the digesters with the bytes that have already been
# uploaded to ensure we get a complete hash in the end.
- print 'Catching up md5 for resumed upload'
+ print 'Catching up hash digest(s) for resumed upload'
fp.seek(0)
# Read local file's bytes through position server has. For
# example, if server has (0, 3) we want to read 3-0+1=4 bytes.
@@ -423,13 +428,14 @@ class ResumableUploadHandler(object):
chunk = fp.read(min(key.BufferSize, bytes_to_go))
if not chunk:
raise ResumableUploadException(
- 'Hit end of file during resumable upload md5 '
+ 'Hit end of file during resumable upload hash '
'catchup. This should not happen under\n'
'normal circumstances, as it indicates the '
'server has more bytes of this transfer\nthan'
' the current file size. Restarting upload.',
ResumableTransferDisposition.START_OVER)
- md5sum.update(chunk)
+ for alg in self.digesters:
+ self.digesters[alg].update(chunk)
bytes_to_go -= len(chunk)
if conn.debug >= 1:
@@ -469,7 +475,8 @@ class ResumableUploadHandler(object):
# and can report that progress on next attempt.
try:
return self._upload_file_bytes(conn, http_conn, fp, file_length,
- total_bytes_uploaded, cb, num_cb, md5sum)
+ total_bytes_uploaded, cb, num_cb,
+ headers)
except (ResumableUploadException, socket.error):
resp = self._query_server_state(conn, file_length)
if resp.status == 400:
@@ -532,9 +539,9 @@ class ResumableUploadHandler(object):
else:
self.progress_less_iterations += 1
if roll_back_md5:
- # Rollback any potential md5sum updates, as we did not
+ # Rollback any potential hash updates, as we did not
# make any progress in this iteration.
- self.md5sum = self.md5sum_before_attempt
+ self.digesters = self.digesters_before_attempt
if self.progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
@@ -551,7 +558,7 @@ class ResumableUploadHandler(object):
(self.progress_less_iterations, sleep_time_secs))
time.sleep(sleep_time_secs)
- def send_file(self, key, fp, headers, cb=None, num_cb=10):
+ def send_file(self, key, fp, headers, cb=None, num_cb=10, hash_algs=None):
"""
Upload a file to a key into a bucket on GS, using GS resumable upload
protocol.
@@ -579,6 +586,12 @@ class ResumableUploadHandler(object):
during the file transfer. Providing a negative integer will cause
your callback to be called with each buffer read.
+ :type hash_algs: dictionary
+ :param hash_algs: (optional) Dictionary mapping hash algorithm
+ descriptions to corresponding state-ful hashing objects that
+ implement update(), digest(), and copy() (e.g. hashlib.md5()).
+ Defaults to {'md5': md5()}.
+
Raises ResumableUploadException if a problem occurs during the transfer.
"""
@@ -589,20 +602,25 @@ class ResumableUploadHandler(object):
# that header.
CT = 'Content-Type'
if CT in headers and headers[CT] is None:
- del headers[CT]
+ del headers[CT]
+
+ headers['User-Agent'] = UserAgent
# Determine file size different ways for case where fp is actually a
# wrapper around a Key vs an actual file.
if isinstance(fp, KeyFile):
- file_length = fp.getkey().size
+ file_length = fp.getkey().size
else:
- fp.seek(0, os.SEEK_END)
- file_length = fp.tell()
- fp.seek(0)
+ fp.seek(0, os.SEEK_END)
+ file_length = fp.tell()
+ fp.seek(0)
debug = key.bucket.connection.debug
# Compute the MD5 checksum on the fly.
- self.md5sum = md5()
+ if hash_algs is None:
+ hash_algs = {'md5': md5}
+ self.digesters = dict(
+ (alg, hash_algs[alg]()) for alg in hash_algs or {})
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 5.
@@ -612,15 +630,20 @@ class ResumableUploadHandler(object):
while True: # Retry as long as we're making progress.
server_had_bytes_before_attempt = self.server_has_bytes
- self.md5sum_before_attempt = self.md5sum.copy()
+ self.digesters_before_attempt = dict(
+ (alg, self.digesters[alg].copy())
+ for alg in self.digesters)
try:
- etag = self._attempt_resumable_upload(key, fp, file_length,
- headers, cb, num_cb,
- self.md5sum)
-
- # Get the final md5 for the uploaded content.
- hd = self.md5sum.hexdigest()
- key.md5, key.base64md5 = key.get_md5_from_hexdigest(hd)
+ # Save generation and metageneration in class state so caller
+ # can find these values, for use in preconditions of future
+ # operations on the uploaded object.
+ (etag, self.generation, self.metageneration) = (
+ self._attempt_resumable_upload(key, fp, file_length,
+ headers, cb, num_cb))
+
+ # Get the final digests for the uploaded content.
+ for alg in self.digesters:
+ key.local_hashes[alg] = self.digesters[alg].digest()
# Upload succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
diff --git a/boto/handler.py b/boto/handler.py
index 8f37dff1..df065cca 100644
--- a/boto/handler.py
+++ b/boto/handler.py
@@ -19,6 +19,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+import StringIO
import xml.sax
class XmlHandler(xml.sax.ContentHandler):
@@ -42,3 +43,14 @@ class XmlHandler(xml.sax.ContentHandler):
def characters(self, content):
self.current_text += content
+
+
+class XmlHandlerWrapper(object):
+ def __init__(self, root_node, connection):
+ self.handler = XmlHandler(root_node, connection)
+ self.parser = xml.sax.make_parser()
+ self.parser.setContentHandler(self.handler)
+ self.parser.setFeature(xml.sax.handler.feature_external_ges, 0)
+
+ def parseString(self, content):
+ return self.parser.parse(StringIO.StringIO(content))
diff --git a/boto/https_connection.py b/boto/https_connection.py
index 760e6081..4cbf5182 100644
--- a/boto/https_connection.py
+++ b/boto/https_connection.py
@@ -106,6 +106,8 @@ class CertValidatingHTTPSConnection(httplib.HTTPConnection):
def connect(self):
"Connect to a host on a given (SSL) port."
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if hasattr(self, "timeout") and self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
boto.log.debug("wrapping ssl socket; CA certificate file=%s",
self.ca_certs)
diff --git a/boto/opsworks/__init__.py b/boto/opsworks/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/boto/opsworks/__init__.py
diff --git a/boto/opsworks/exceptions.py b/boto/opsworks/exceptions.py
new file mode 100644
index 00000000..da23e485
--- /dev/null
+++ b/boto/opsworks/exceptions.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.exception import JSONResponseError
+
+
+class ResourceNotFoundException(JSONResponseError):
+ pass
+
+
+class ValidationException(JSONResponseError):
+ pass
diff --git a/boto/opsworks/layer1.py b/boto/opsworks/layer1.py
new file mode 100644
index 00000000..8177ac12
--- /dev/null
+++ b/boto/opsworks/layer1.py
@@ -0,0 +1,1457 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import json
+import boto
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.opsworks import exceptions
+
+
+class OpsWorksConnection(AWSQueryConnection):
+ """
+ AWS OpsWorks
+ """
+ APIVersion = "2013-02-18"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "opsworks.us-east-1.amazonaws.com"
+ ServiceName = "OpsWorks"
+ TargetPrefix = "OpsWorks_20130218"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "ResourceNotFoundException": exceptions.ResourceNotFoundException,
+ "ValidationException": exceptions.ValidationException,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.get('region')
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ kwargs['host'] = region.endpoint
+ AWSQueryConnection.__init__(self, **kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def clone_stack(self, source_stack_id, service_role_arn, name=None,
+ region=None, attributes=None,
+ default_instance_profile_arn=None, default_os=None,
+ hostname_theme=None, default_availability_zone=None,
+ custom_json=None, use_custom_cookbooks=None,
+ custom_cookbooks_source=None, default_ssh_key_name=None,
+ clone_permissions=None, clone_app_ids=None):
+ """
+ Creates a clone of a specified stack.
+
+ :type source_stack_id: string
+ :param source_stack_id: The source stack ID.
+
+ :type name: string
+ :param name: The cloned stack name.
+
+ :type region: string
+ :param region: The cloned stack AWS region, such as "us-west-2". For
+ more information about AWS regions, see `Regions and Endpoints`_
+
+ :type attributes: map
+ :param attributes: A list of stack attributes and values as key/value
+ pairs to be added to the cloned stack.
+
+ :type service_role_arn: string
+ :param service_role_arn: The stack AWS Identity and Access Management
+ (IAM) role, which allows OpsWorks to work with AWS resources on
+ your behalf. You must set this parameter to the Amazon Resource
+ Name (ARN) for an existing IAM role. If you create a stack by using
+ the OpsWorks console, it creates the role for you. You can obtain
+ an existing stack's IAM ARN programmatically by calling
+ DescribePermissions. For more information about IAM ARNs, see
+ `Using Identifiers`_.
+
+ :type default_instance_profile_arn: string
+ :param default_instance_profile_arn: The ARN of an IAM profile that is
+ the default profile for all of the stack's EC2 instances. For more
+ information about IAM ARNs, see `Using Identifiers`_.
+
+ :type default_os: string
+ :param default_os: The cloned stack default operating system, which
+ must be either "Amazon Linux" or "Ubuntu 12.04 LTS".
+
+ :type hostname_theme: string
+ :param hostname_theme: The stack's host name theme, with spaces are
+ replaced by underscores. The theme is used to generate hostnames
+ for the stack's instances. By default, `HostnameTheme` is set to
+ Layer_Dependent, which creates hostnames by appending integers to
+ the layer's shortname. The other themes are:
+
+ + Baked_Goods
+ + Clouds
+ + European_Cities
+ + Fruits
+ + Greek_Deities
+ + Legendary_Creatures_from_Japan
+ + Planets_and_Moons
+ + Roman_Deities
+ + Scottish_Islands
+ + US_Cities
+ + Wild_Cats
+
+
+ To obtain a generated hostname, call `GetHostNameSuggestion`, which
+ returns a hostname based on the current theme.
+
+ :type default_availability_zone: string
+ :param default_availability_zone: The cloned stack's Availability Zone.
+ For more information, see `Regions and Endpoints`_.
+
+ :type custom_json: string
+ :param custom_json:
+ A string that contains user-defined, custom JSON. It is used to
+ override the corresponding default stack configuration JSON values.
+ The string should be in the following format and must escape
+ characters such as '"'.:
+ `"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
+
+ :type use_custom_cookbooks: boolean
+ :param use_custom_cookbooks: Whether to use custom cookbooks.
+
+ :type custom_cookbooks_source: dict
+ :param custom_cookbooks_source:
+
+ :type default_ssh_key_name: string
+ :param default_ssh_key_name: A default SSH key for the stack instances.
+ You can override this value when you create or update an instance.
+
+ :type clone_permissions: boolean
+ :param clone_permissions: Whether to clone the source stack's
+ permissions.
+
+ :type clone_app_ids: list
+ :param clone_app_ids: A list of source stack app IDs to be included in
+ the cloned stack.
+
+ """
+ params = {
+ 'SourceStackId': source_stack_id,
+ 'ServiceRoleArn': service_role_arn,
+ }
+ if name is not None:
+ params['Name'] = name
+ if region is not None:
+ params['Region'] = region
+ if attributes is not None:
+ params['Attributes'] = attributes
+ if default_instance_profile_arn is not None:
+ params['DefaultInstanceProfileArn'] = default_instance_profile_arn
+ if default_os is not None:
+ params['DefaultOs'] = default_os
+ if hostname_theme is not None:
+ params['HostnameTheme'] = hostname_theme
+ if default_availability_zone is not None:
+ params['DefaultAvailabilityZone'] = default_availability_zone
+ if custom_json is not None:
+ params['CustomJson'] = custom_json
+ if use_custom_cookbooks is not None:
+ params['UseCustomCookbooks'] = use_custom_cookbooks
+ if custom_cookbooks_source is not None:
+ params['CustomCookbooksSource'] = custom_cookbooks_source
+ if default_ssh_key_name is not None:
+ params['DefaultSshKeyName'] = default_ssh_key_name
+ if clone_permissions is not None:
+ params['ClonePermissions'] = clone_permissions
+ if clone_app_ids is not None:
+ params['CloneAppIds'] = clone_app_ids
+ return self.make_request(action='CloneStack',
+ body=json.dumps(params))
+
+ def create_app(self, stack_id, name, type, description=None,
+ app_source=None, domains=None, enable_ssl=None,
+ ssl_configuration=None, attributes=None):
+ """
+ Creates an app for a specified stack.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ :type name: string
+ :param name: The app name.
+
+ :type description: string
+ :param description: A description of the app.
+
+ :type type: string
+ :param type: The app type. Each supported type is associated with a
+ particular layer. For example, PHP applications are associated with
+ a PHP layer. OpsWorks deploys an application to those instances
+ that are members of the corresponding layer.
+
+ :type app_source: dict
+ :param app_source: A `Source` object that specifies the app repository.
+
+ :type domains: list
+ :param domains: The app virtual host settings, with multiple domains
+ separated by commas. For example: `'www.mysite.com, mysite.com'`
+
+ :type enable_ssl: boolean
+ :param enable_ssl: Whether to enable SSL for the app.
+
+ :type ssl_configuration: dict
+ :param ssl_configuration: An `SslConfiguration` object with the SSL
+ configuration.
+
+ :type attributes: map
+ :param attributes: One or more user-defined key/value pairs to be added
+ to the stack attributes bag.
+
+ """
+ params = {'StackId': stack_id, 'Name': name, 'Type': type, }
+ if description is not None:
+ params['Description'] = description
+ if app_source is not None:
+ params['AppSource'] = app_source
+ if domains is not None:
+ params['Domains'] = domains
+ if enable_ssl is not None:
+ params['EnableSsl'] = enable_ssl
+ if ssl_configuration is not None:
+ params['SslConfiguration'] = ssl_configuration
+ if attributes is not None:
+ params['Attributes'] = attributes
+ return self.make_request(action='CreateApp',
+ body=json.dumps(params))
+
+ def create_deployment(self, stack_id, command, app_id=None,
+ instance_ids=None, comment=None, custom_json=None):
+ """
+ Deploys a stack or app.
+
+
+ + App deployment generates a `deploy` event, which runs the
+ associated recipes and passes them a JSON stack configuration
+ object that includes information about the app.
+ + Stack deployment runs the `deploy` recipes but does not
+ raise an event.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ :type app_id: string
+ :param app_id: The app ID, for app deployments.
+
+ :type instance_ids: list
+ :param instance_ids: The instance IDs for the deployment targets.
+
+ :type command: dict
+ :param command: A `DeploymentCommand` object that describes details of
+ the operation.
+
+ :type comment: string
+ :param comment: A user-defined comment.
+
+ :type custom_json: string
+ :param custom_json:
+ A string that contains user-defined, custom JSON. It is used to
+ override the corresponding default stack configuration JSON values.
+ The string should be in the following format and must escape
+ characters such as '"'.:
+ `"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
+
+ """
+ params = {'StackId': stack_id, 'Command': command, }
+ if app_id is not None:
+ params['AppId'] = app_id
+ if instance_ids is not None:
+ params['InstanceIds'] = instance_ids
+ if comment is not None:
+ params['Comment'] = comment
+ if custom_json is not None:
+ params['CustomJson'] = custom_json
+ return self.make_request(action='CreateDeployment',
+ body=json.dumps(params))
+
+ def create_instance(self, stack_id, layer_ids, instance_type,
+ auto_scaling_type=None, hostname=None, os=None,
+ ssh_key_name=None, availability_zone=None):
+ """
+ Creates an instance in a specified stack.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ :type layer_ids: list
+ :param layer_ids: An array that contains the instance layer IDs.
+
+ :type instance_type: string
+ :param instance_type:
+ The instance type, which can be one of the following:
+
+
+ + m1.small
+ + m1.medium
+ + m1.large
+ + m1.xlarge
+ + c1.medium
+ + c1.xlarge
+ + m2.xlarge
+ + m2.2xlarge
+ + m2.4xlarge
+
+ :type auto_scaling_type: string
+ :param auto_scaling_type:
+ The instance auto scaling type, which has three possible values:
+
+
+ + **AlwaysRunning**: A 24x7 instance, which is not affected by auto
+ scaling.
+ + **TimeBasedAutoScaling**: A time-based auto scaling instance, which
+ is started and stopped based on a specified schedule. To specify
+ the schedule, call SetTimeBasedAutoScaling.
+ + **LoadBasedAutoScaling**: A load-based auto scaling instance, which
+ is started and stopped based on load metrics. To use load-based
+ auto scaling, you must enable it for the instance layer and
+ configure the thresholds by calling SetLoadBasedAutoScaling.
+
+ :type hostname: string
+ :param hostname: The instance host name.
+
+ :type os: string
+ :param os: The instance operating system.
+
+ :type ssh_key_name: string
+ :param ssh_key_name: The instance SSH key name.
+
+ :type availability_zone: string
+ :param availability_zone: The instance Availability Zone. For more
+ information, see `Regions and Endpoints`_.
+
+ """
+ params = {
+ 'StackId': stack_id,
+ 'LayerIds': layer_ids,
+ 'InstanceType': instance_type,
+ }
+ if auto_scaling_type is not None:
+ params['AutoScalingType'] = auto_scaling_type
+ if hostname is not None:
+ params['Hostname'] = hostname
+ if os is not None:
+ params['Os'] = os
+ if ssh_key_name is not None:
+ params['SshKeyName'] = ssh_key_name
+ if availability_zone is not None:
+ params['AvailabilityZone'] = availability_zone
+ return self.make_request(action='CreateInstance',
+ body=json.dumps(params))
+
+ def create_layer(self, stack_id, type, name, shortname, attributes=None,
+ custom_instance_profile_arn=None,
+ custom_security_group_ids=None, packages=None,
+ volume_configurations=None, enable_auto_healing=None,
+ auto_assign_elastic_ips=None, custom_recipes=None):
+ """
+ Creates a layer.
+
+ :type stack_id: string
+ :param stack_id: The layer stack ID.
+
+ :type type: string
+ :param type: The layer type. A stack cannot have more than one layer of
+ the same type.
+
+ :type name: string
+ :param name: The layer name, which is used by the console.
+
+ :type shortname: string
+ :param shortname: The layer short name, which is used internally by
+ OpsWorks and by Chef recipes. The shortname is also used as the
+ name for the directory where your app files are installed. It can
+ have a maximum of 200 characters, which are limited to the
+ alphanumeric characters, '-', '_', and '.'.
+
+ :type attributes: map
+ :param attributes: One or more user-defined key/value pairs to be added
+ to the stack attributes bag.
+
+ :type custom_instance_profile_arn: string
+ :param custom_instance_profile_arn: The ARN of an IAM profile that to
+ be used for the layer's EC2 instances. For more information about
+ IAM ARNs, see `Using Identifiers`_.
+
+ :type custom_security_group_ids: list
+ :param custom_security_group_ids: An array containing the layer custom
+ security group IDs.
+
+ :type packages: list
+ :param packages: An array of `Package` objects that describe the layer
+ packages.
+
+ :type volume_configurations: list
+ :param volume_configurations: A `VolumeConfigurations` object that
+ describes the layer Amazon EBS volumes.
+
+ :type enable_auto_healing: boolean
+ :param enable_auto_healing: Whether to disable auto healing for the
+ layer.
+
+ :type auto_assign_elastic_ips: boolean
+ :param auto_assign_elastic_ips: Whether to automatically assign an
+ `Elastic IP address`_ to the layer.
+
+ :type custom_recipes: dict
+ :param custom_recipes: A `LayerCustomRecipes` object that specifies the
+ layer custom recipes.
+
+ """
+ params = {
+ 'StackId': stack_id,
+ 'Type': type,
+ 'Name': name,
+ 'Shortname': shortname,
+ }
+ if attributes is not None:
+ params['Attributes'] = attributes
+ if custom_instance_profile_arn is not None:
+ params['CustomInstanceProfileArn'] = custom_instance_profile_arn
+ if custom_security_group_ids is not None:
+ params['CustomSecurityGroupIds'] = custom_security_group_ids
+ if packages is not None:
+ params['Packages'] = packages
+ if volume_configurations is not None:
+ params['VolumeConfigurations'] = volume_configurations
+ if enable_auto_healing is not None:
+ params['EnableAutoHealing'] = enable_auto_healing
+ if auto_assign_elastic_ips is not None:
+ params['AutoAssignElasticIps'] = auto_assign_elastic_ips
+ if custom_recipes is not None:
+ params['CustomRecipes'] = custom_recipes
+ return self.make_request(action='CreateLayer',
+ body=json.dumps(params))
+
+ def create_stack(self, name, region, service_role_arn,
+ default_instance_profile_arn, attributes=None,
+ default_os=None, hostname_theme=None,
+ default_availability_zone=None, custom_json=None,
+ use_custom_cookbooks=None, custom_cookbooks_source=None,
+ default_ssh_key_name=None):
+ """
+ Creates a new stack.
+
+ :type name: string
+ :param name: The stack name.
+
+ :type region: string
+ :param region: The stack AWS region, such as "us-west-2". For more
+ information about Amazon regions, see `Regions and Endpoints`_.
+
+ :type attributes: map
+ :param attributes: One or more user-defined key/value pairs to be added
+ to the stack attributes bag.
+
+ :type service_role_arn: string
+ :param service_role_arn: The stack AWS Identity and Access Management
+ (IAM) role, which allows OpsWorks to work with AWS resources on
+ your behalf. You must set this parameter to the Amazon Resource
+ Name (ARN) for an existing IAM role. For more information about IAM
+ ARNs, see `Using Identifiers`_.
+
+ :type default_instance_profile_arn: string
+ :param default_instance_profile_arn: The ARN of an IAM profile that is
+ the default profile for all of the stack's EC2 instances. For more
+ information about IAM ARNs, see `Using Identifiers`_.
+
+ :type default_os: string
+ :param default_os: The cloned stack default operating system, which
+ must be either "Amazon Linux" or "Ubuntu 12.04 LTS".
+
+ :type hostname_theme: string
+ :param hostname_theme: The stack's host name theme, with spaces are
+ replaced by underscores. The theme is used to generate hostnames
+ for the stack's instances. By default, `HostnameTheme` is set to
+ Layer_Dependent, which creates hostnames by appending integers to
+ the layer's shortname. The other themes are:
+
+ + Baked_Goods
+ + Clouds
+ + European_Cities
+ + Fruits
+ + Greek_Deities
+ + Legendary_Creatures_from_Japan
+ + Planets_and_Moons
+ + Roman_Deities
+ + Scottish_Islands
+ + US_Cities
+ + Wild_Cats
+
+
+ To obtain a generated hostname, call `GetHostNameSuggestion`, which
+ returns a hostname based on the current theme.
+
+ :type default_availability_zone: string
+ :param default_availability_zone: The stack default Availability Zone.
+ For more information, see `Regions and Endpoints`_.
+
+ :type custom_json: string
+ :param custom_json:
+ A string that contains user-defined, custom JSON. It is used to
+ override the corresponding default stack configuration JSON values.
+ The string should be in the following format and must escape
+ characters such as '"'.:
+ `"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
+
+ :type use_custom_cookbooks: boolean
+ :param use_custom_cookbooks: Whether the stack uses custom cookbooks.
+
+ :type custom_cookbooks_source: dict
+ :param custom_cookbooks_source:
+
+ :type default_ssh_key_name: string
+ :param default_ssh_key_name: A default SSH key for the stack instances.
+ You can override this value when you create or update an instance.
+
+ """
+ params = {
+ 'Name': name,
+ 'Region': region,
+ 'ServiceRoleArn': service_role_arn,
+ 'DefaultInstanceProfileArn': default_instance_profile_arn,
+ }
+ if attributes is not None:
+ params['Attributes'] = attributes
+ if default_os is not None:
+ params['DefaultOs'] = default_os
+ if hostname_theme is not None:
+ params['HostnameTheme'] = hostname_theme
+ if default_availability_zone is not None:
+ params['DefaultAvailabilityZone'] = default_availability_zone
+ if custom_json is not None:
+ params['CustomJson'] = custom_json
+ if use_custom_cookbooks is not None:
+ params['UseCustomCookbooks'] = use_custom_cookbooks
+ if custom_cookbooks_source is not None:
+ params['CustomCookbooksSource'] = custom_cookbooks_source
+ if default_ssh_key_name is not None:
+ params['DefaultSshKeyName'] = default_ssh_key_name
+ return self.make_request(action='CreateStack',
+ body=json.dumps(params))
+
+ def create_user_profile(self, iam_user_arn, ssh_username=None,
+ ssh_public_key=None):
+ """
+ Creates a new user.
+
+ :type iam_user_arn: string
+ :param iam_user_arn: The user's IAM ARN.
+
+ :type ssh_username: string
+ :param ssh_username: The user's SSH user name.
+
+ :type ssh_public_key: string
+ :param ssh_public_key: The user's public SSH key.
+
+ """
+ params = {'IamUserArn': iam_user_arn, }
+ if ssh_username is not None:
+ params['SshUsername'] = ssh_username
+ if ssh_public_key is not None:
+ params['SshPublicKey'] = ssh_public_key
+ return self.make_request(action='CreateUserProfile',
+ body=json.dumps(params))
+
+ def delete_app(self, app_id):
+ """
+ Deletes a specified app.
+
+ :type app_id: string
+ :param app_id: The app ID.
+
+ """
+ params = {'AppId': app_id, }
+ return self.make_request(action='DeleteApp',
+ body=json.dumps(params))
+
+ def delete_instance(self, instance_id, delete_elastic_ip=None,
+ delete_volumes=None):
+ """
+ Deletes a specified instance.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ :type delete_elastic_ip: boolean
+ :param delete_elastic_ip: Whether to delete the instance Elastic IP
+ address.
+
+ :type delete_volumes: boolean
+ :param delete_volumes: Whether to delete the instance Amazon EBS
+ volumes.
+
+ """
+ params = {'InstanceId': instance_id, }
+ if delete_elastic_ip is not None:
+ params['DeleteElasticIp'] = delete_elastic_ip
+ if delete_volumes is not None:
+ params['DeleteVolumes'] = delete_volumes
+ return self.make_request(action='DeleteInstance',
+ body=json.dumps(params))
+
+ def delete_layer(self, layer_id):
+ """
+ Deletes a specified layer. You must first remove all
+ associated instances.
+
+ :type layer_id: string
+ :param layer_id: The layer ID.
+
+ """
+ params = {'LayerId': layer_id, }
+ return self.make_request(action='DeleteLayer',
+ body=json.dumps(params))
+
+ def delete_stack(self, stack_id):
+ """
+ Deletes a specified stack. You must first delete all instances
+ and layers.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ """
+ params = {'StackId': stack_id, }
+ return self.make_request(action='DeleteStack',
+ body=json.dumps(params))
+
+ def delete_user_profile(self, iam_user_arn):
+ """
+ Deletes a user.
+
+ :type iam_user_arn: string
+ :param iam_user_arn: The user's IAM ARN.
+
+ """
+ params = {'IamUserArn': iam_user_arn, }
+ return self.make_request(action='DeleteUserProfile',
+ body=json.dumps(params))
+
+ def describe_apps(self, stack_id=None, app_ids=None):
+ """
+ Requests a description of a specified set of apps.
+
+ :type stack_id: string
+ :param stack_id:
+ The app stack ID.
+
+ :type app_ids: list
+ :param app_ids: An array of app IDs for the apps to be described.
+
+ """
+ params = {}
+ if stack_id is not None:
+ params['StackId'] = stack_id
+ if app_ids is not None:
+ params['AppIds'] = app_ids
+ return self.make_request(action='DescribeApps',
+ body=json.dumps(params))
+
+ def describe_commands(self, deployment_id=None, instance_id=None,
+ command_ids=None):
+ """
+ Describes the results of specified commands.
+
+ :type deployment_id: string
+ :param deployment_id: The deployment ID.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ :type command_ids: list
+ :param command_ids: An array of IDs for the commands to be described.
+
+ """
+ params = {}
+ if deployment_id is not None:
+ params['DeploymentId'] = deployment_id
+ if instance_id is not None:
+ params['InstanceId'] = instance_id
+ if command_ids is not None:
+ params['CommandIds'] = command_ids
+ return self.make_request(action='DescribeCommands',
+ body=json.dumps(params))
+
+ def describe_deployments(self, stack_id=None, app_id=None,
+ deployment_ids=None):
+ """
+ Requests a description of a specified set of deployments.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ :type app_id: string
+ :param app_id: The app ID.
+
+ :type deployment_ids: list
+ :param deployment_ids: An array of deployment IDs to be described.
+
+ """
+ params = {}
+ if stack_id is not None:
+ params['StackId'] = stack_id
+ if app_id is not None:
+ params['AppId'] = app_id
+ if deployment_ids is not None:
+ params['DeploymentIds'] = deployment_ids
+ return self.make_request(action='DescribeDeployments',
+ body=json.dumps(params))
+
+ def describe_elastic_ips(self, instance_id=None, ips=None):
+ """
+ Describes an instance's `Elastic IP addresses`_.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ :type ips: list
+ :param ips: An array of Elastic IP addresses to be described.
+
+ """
+ params = {}
+ if instance_id is not None:
+ params['InstanceId'] = instance_id
+ if ips is not None:
+ params['Ips'] = ips
+ return self.make_request(action='DescribeElasticIps',
+ body=json.dumps(params))
+
+ def describe_instances(self, stack_id=None, layer_id=None, app_id=None,
+ instance_ids=None):
+ """
+ Requests a description of a set of instances associated with a
+ specified ID or IDs.
+
+ :type stack_id: string
+ :param stack_id: A stack ID.
+
+ :type layer_id: string
+ :param layer_id: A layer ID.
+
+ :type app_id: string
+ :param app_id: An app ID.
+
+ :type instance_ids: list
+ :param instance_ids: An array of instance IDs to be described.
+
+ """
+ params = {}
+ if stack_id is not None:
+ params['StackId'] = stack_id
+ if layer_id is not None:
+ params['LayerId'] = layer_id
+ if app_id is not None:
+ params['AppId'] = app_id
+ if instance_ids is not None:
+ params['InstanceIds'] = instance_ids
+ return self.make_request(action='DescribeInstances',
+ body=json.dumps(params))
+
+ def describe_layers(self, stack_id, layer_ids=None):
+ """
+ Requests a description of one or more layers in a specified
+ stack.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ :type layer_ids: list
+ :param layer_ids: An array of layer IDs that specify the layers to be
+ described.
+
+ """
+ params = {'StackId': stack_id, }
+ if layer_ids is not None:
+ params['LayerIds'] = layer_ids
+ return self.make_request(action='DescribeLayers',
+ body=json.dumps(params))
+
+ def describe_load_based_auto_scaling(self, layer_ids):
+ """
+ Describes load-based auto scaling configurations for specified
+ layers.
+
+ :type layer_ids: list
+ :param layer_ids: An array of layer IDs.
+
+ """
+ params = {'LayerIds': layer_ids, }
+ return self.make_request(action='DescribeLoadBasedAutoScaling',
+ body=json.dumps(params))
+
+ def describe_permissions(self, iam_user_arn, stack_id):
+ """
+ Describes the permissions for a specified stack. You must
+ specify at least one of the two request values.
+
+ :type iam_user_arn: string
+ :param iam_user_arn: The user's IAM ARN. For more information about IAM
+ ARNs, see `Using Identifiers`_.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ """
+ params = {'IamUserArn': iam_user_arn, 'StackId': stack_id, }
+ return self.make_request(action='DescribePermissions',
+ body=json.dumps(params))
+
+ def describe_raid_arrays(self, instance_id=None, raid_array_ids=None):
+ """
+ Describe an instance's RAID arrays.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ :type raid_array_ids: list
+ :param raid_array_ids: An array of RAID array IDs to be described.
+
+ """
+ params = {}
+ if instance_id is not None:
+ params['InstanceId'] = instance_id
+ if raid_array_ids is not None:
+ params['RaidArrayIds'] = raid_array_ids
+ return self.make_request(action='DescribeRaidArrays',
+ body=json.dumps(params))
+
+ def describe_service_errors(self, stack_id=None, instance_id=None,
+ service_error_ids=None):
+ """
+ Describes OpsWorks service errors.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ :type service_error_ids: list
+ :param service_error_ids: An array of service error IDs to be
+ described.
+
+ """
+ params = {}
+ if stack_id is not None:
+ params['StackId'] = stack_id
+ if instance_id is not None:
+ params['InstanceId'] = instance_id
+ if service_error_ids is not None:
+ params['ServiceErrorIds'] = service_error_ids
+ return self.make_request(action='DescribeServiceErrors',
+ body=json.dumps(params))
+
+ def describe_stacks(self, stack_ids=None):
+ """
+ Requests a description of one or more stacks.
+
+ :type stack_ids: list
+ :param stack_ids: An array of stack IDs that specify the stacks to be
+ described.
+
+ """
+ params = {}
+ if stack_ids is not None:
+ params['StackIds'] = stack_ids
+ return self.make_request(action='DescribeStacks',
+ body=json.dumps(params))
+
+ def describe_time_based_auto_scaling(self, instance_ids):
+ """
+ Describes time-based auto scaling configurations for specified
+ instances.
+
+ :type instance_ids: list
+ :param instance_ids: An array of instance IDs.
+
+ """
+ params = {'InstanceIds': instance_ids, }
+ return self.make_request(action='DescribeTimeBasedAutoScaling',
+ body=json.dumps(params))
+
+ def describe_user_profiles(self, iam_user_arns):
+ """
+ Describe specified users.
+
+ :type iam_user_arns: list
+ :param iam_user_arns: An array of IAM user ARNs that identify the users
+ to be described.
+
+ """
+ params = {'IamUserArns': iam_user_arns, }
+ return self.make_request(action='DescribeUserProfiles',
+ body=json.dumps(params))
+
+ def describe_volumes(self, instance_id=None, raid_array_id=None,
+ volume_ids=None):
+ """
+ Describes an instance's Amazon EBS volumes.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ :type raid_array_id: string
+ :param raid_array_id: The RAID array ID.
+
+ :type volume_ids: list
+ :param volume_ids: Am array of volume IDs to be described.
+
+ """
+ params = {}
+ if instance_id is not None:
+ params['InstanceId'] = instance_id
+ if raid_array_id is not None:
+ params['RaidArrayId'] = raid_array_id
+ if volume_ids is not None:
+ params['VolumeIds'] = volume_ids
+ return self.make_request(action='DescribeVolumes',
+ body=json.dumps(params))
+
+ def get_hostname_suggestion(self, layer_id):
+ """
+ Gets a generated hostname for the specified layer, based on
+ the current hostname theme.
+
+ :type layer_id: string
+ :param layer_id: The layer ID.
+
+ """
+ params = {'LayerId': layer_id, }
+ return self.make_request(action='GetHostnameSuggestion',
+ body=json.dumps(params))
+
+ def reboot_instance(self, instance_id):
+ """
+ Reboots a specified instance.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ """
+ params = {'InstanceId': instance_id, }
+ return self.make_request(action='RebootInstance',
+ body=json.dumps(params))
+
+ def set_load_based_auto_scaling(self, layer_id, enable=None,
+ up_scaling=None, down_scaling=None):
+ """
+ Specify the load-based auto scaling configuration for a
+ specified layer.
+
+ To use load-based auto scaling, you must create a set of load-
+ based auto scaling instances. Load-based auto scaling operates
+ only on the instances from that set, so you must ensure that
+ you have created enough instances to handle the maximum
+ anticipated load.
+
+ :type layer_id: string
+ :param layer_id: The layer ID.
+
+ :type enable: boolean
+ :param enable: Enables load-based auto scaling for the layer.
+
+ :type up_scaling: dict
+ :param up_scaling: An `AutoScalingThresholds` object with the upscaling
+ threshold configuration. If the load exceeds these thresholds for a
+ specified amount of time, OpsWorks starts a specified number of
+ instances.
+
+ :type down_scaling: dict
+ :param down_scaling: An `AutoScalingThresholds` object with the
+ downscaling threshold configuration. If the load falls below these
+ thresholds for a specified amount of time, OpsWorks stops a
+ specified number of instances.
+
+ """
+ params = {'LayerId': layer_id, }
+ if enable is not None:
+ params['Enable'] = enable
+ if up_scaling is not None:
+ params['UpScaling'] = up_scaling
+ if down_scaling is not None:
+ params['DownScaling'] = down_scaling
+ return self.make_request(action='SetLoadBasedAutoScaling',
+ body=json.dumps(params))
+
+ def set_permission(self, stack_id, iam_user_arn, allow_ssh=None,
+ allow_sudo=None):
+ """
+ Specifies a stack's permissions.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ :type iam_user_arn: string
+ :param iam_user_arn: The user's IAM ARN.
+
+ :type allow_ssh: boolean
+ :param allow_ssh: The user is allowed to use SSH to communicate with
+ the instance.
+
+ :type allow_sudo: boolean
+ :param allow_sudo: The user is allowed to use **sudo** to elevate
+ privileges.
+
+ """
+ params = {'StackId': stack_id, 'IamUserArn': iam_user_arn, }
+ if allow_ssh is not None:
+ params['AllowSsh'] = allow_ssh
+ if allow_sudo is not None:
+ params['AllowSudo'] = allow_sudo
+ return self.make_request(action='SetPermission',
+ body=json.dumps(params))
+
+ def set_time_based_auto_scaling(self, instance_id,
+ auto_scaling_schedule=None):
+ """
+ Specify the time-based auto scaling configuration for a
+ specified instance.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ :type auto_scaling_schedule: dict
+ :param auto_scaling_schedule: An `AutoScalingSchedule` with the
+ instance schedule.
+
+ """
+ params = {'InstanceId': instance_id, }
+ if auto_scaling_schedule is not None:
+ params['AutoScalingSchedule'] = auto_scaling_schedule
+ return self.make_request(action='SetTimeBasedAutoScaling',
+ body=json.dumps(params))
+
+ def start_instance(self, instance_id):
+ """
+ Starts a specified instance.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ """
+ params = {'InstanceId': instance_id, }
+ return self.make_request(action='StartInstance',
+ body=json.dumps(params))
+
+ def start_stack(self, stack_id):
+ """
+ Starts stack's instances.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ """
+ params = {'StackId': stack_id, }
+ return self.make_request(action='StartStack',
+ body=json.dumps(params))
+
+ def stop_instance(self, instance_id):
+ """
+ Stops a specified instance. When you stop a standard instance,
+ the data disappears and must be reinstalled when you restart
+ the instance. You can stop an Amazon EBS-backed instance
+ without losing data.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ """
+ params = {'InstanceId': instance_id, }
+ return self.make_request(action='StopInstance',
+ body=json.dumps(params))
+
+ def stop_stack(self, stack_id):
+ """
+ Stops a specified stack.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ """
+ params = {'StackId': stack_id, }
+ return self.make_request(action='StopStack',
+ body=json.dumps(params))
+
+ def update_app(self, app_id, name=None, description=None, type=None,
+ app_source=None, domains=None, enable_ssl=None,
+ ssl_configuration=None, attributes=None):
+ """
+ Updates a specified app.
+
+ :type app_id: string
+ :param app_id: The app ID.
+
+ :type name: string
+ :param name: The app name.
+
+ :type description: string
+ :param description: A description of the app.
+
+ :type type: string
+ :param type: The app type.
+
+ :type app_source: dict
+ :param app_source: A `Source` object that specifies the app repository.
+
+ :type domains: list
+ :param domains: The app's virtual host settings, with multiple domains
+ separated by commas. For example: `'www.mysite.com, mysite.com'`
+
+ :type enable_ssl: boolean
+ :param enable_ssl: Whether SSL is enabled for the app.
+
+ :type ssl_configuration: dict
+ :param ssl_configuration: An `SslConfiguration` object with the SSL
+ configuration.
+
+ :type attributes: map
+ :param attributes: One or more user-defined key/value pairs to be added
+ to the stack attributes bag.
+
+ """
+ params = {'AppId': app_id, }
+ if name is not None:
+ params['Name'] = name
+ if description is not None:
+ params['Description'] = description
+ if type is not None:
+ params['Type'] = type
+ if app_source is not None:
+ params['AppSource'] = app_source
+ if domains is not None:
+ params['Domains'] = domains
+ if enable_ssl is not None:
+ params['EnableSsl'] = enable_ssl
+ if ssl_configuration is not None:
+ params['SslConfiguration'] = ssl_configuration
+ if attributes is not None:
+ params['Attributes'] = attributes
+ return self.make_request(action='UpdateApp',
+ body=json.dumps(params))
+
+ def update_instance(self, instance_id, layer_ids=None,
+ instance_type=None, auto_scaling_type=None,
+ hostname=None, os=None, ssh_key_name=None):
+ """
+ Updates a specified instance.
+
+ :type instance_id: string
+ :param instance_id: The instance ID.
+
+ :type layer_ids: list
+ :param layer_ids: The instance's layer IDs.
+
+ :type instance_type: string
+ :param instance_type:
+ The instance type, which can be one of the following:
+
+
+ + m1.small
+ + m1.medium
+ + m1.large
+ + m1.xlarge
+ + c1.medium
+ + c1.xlarge
+ + m2.xlarge
+ + m2.2xlarge
+ + m2.4xlarge
+
+ :type auto_scaling_type: string
+ :param auto_scaling_type:
+ The instance's auto scaling type, which has three possible values:
+
+
+ + **AlwaysRunning**: A 24x7 instance, which is not affected by auto
+ scaling.
+ + **TimeBasedAutoScaling**: A time-based auto scaling instance, which
+ is started and stopped based on a specified schedule.
+ + **LoadBasedAutoScaling**: A load-based auto scaling instance, which
+ is started and stopped based on load metrics.
+
+ :type hostname: string
+ :param hostname: The instance host name.
+
+ :type os: string
+ :param os: The instance operating system.
+
+ :type ssh_key_name: string
+ :param ssh_key_name: The instance SSH key name.
+
+ """
+ params = {'InstanceId': instance_id, }
+ if layer_ids is not None:
+ params['LayerIds'] = layer_ids
+ if instance_type is not None:
+ params['InstanceType'] = instance_type
+ if auto_scaling_type is not None:
+ params['AutoScalingType'] = auto_scaling_type
+ if hostname is not None:
+ params['Hostname'] = hostname
+ if os is not None:
+ params['Os'] = os
+ if ssh_key_name is not None:
+ params['SshKeyName'] = ssh_key_name
+ return self.make_request(action='UpdateInstance',
+ body=json.dumps(params))
+
+ def update_layer(self, layer_id, name=None, shortname=None,
+ attributes=None, custom_instance_profile_arn=None,
+ custom_security_group_ids=None, packages=None,
+ volume_configurations=None, enable_auto_healing=None,
+ auto_assign_elastic_ips=None, custom_recipes=None):
+ """
+ Updates a specified layer.
+
+ :type layer_id: string
+ :param layer_id: The layer ID.
+
+ :type name: string
+ :param name: The layer name, which is used by the console.
+
+ :type shortname: string
+ :param shortname: The layer short name, which is used internally by
+ OpsWorks, by Chef. The shortname is also used as the name for the
+ directory where your app files are installed. It can have a maximum
+ of 200 characters and must be in the following format:
+ /\A[a-z0-9\-\_\.]+\Z/.
+
+ :type attributes: map
+ :param attributes: One or more user-defined key/value pairs to be added
+ to the stack attributes bag.
+
+ :type custom_instance_profile_arn: string
+ :param custom_instance_profile_arn: The ARN of an IAM profile to be
+ used for all of the layer's EC2 instances. For more information
+ about IAM ARNs, see `Using Identifiers`_.
+
+ :type custom_security_group_ids: list
+ :param custom_security_group_ids: An array containing the layer's
+ custom security group IDs.
+
+ :type packages: list
+ :param packages: An array of `Package` objects that describe the
+ layer's packages.
+
+ :type volume_configurations: list
+ :param volume_configurations: A `VolumeConfigurations` object that
+ describes the layer's Amazon EBS volumes.
+
+ :type enable_auto_healing: boolean
+ :param enable_auto_healing: Whether to disable auto healing for the
+ layer.
+
+ :type auto_assign_elastic_ips: boolean
+ :param auto_assign_elastic_ips: Whether to automatically assign an
+ `Elastic IP address`_ to the layer.
+
+ :type custom_recipes: dict
+ :param custom_recipes: A `LayerCustomRecipes` object that specifies the
+ layer's custom recipes.
+
+ """
+ params = {'LayerId': layer_id, }
+ if name is not None:
+ params['Name'] = name
+ if shortname is not None:
+ params['Shortname'] = shortname
+ if attributes is not None:
+ params['Attributes'] = attributes
+ if custom_instance_profile_arn is not None:
+ params['CustomInstanceProfileArn'] = custom_instance_profile_arn
+ if custom_security_group_ids is not None:
+ params['CustomSecurityGroupIds'] = custom_security_group_ids
+ if packages is not None:
+ params['Packages'] = packages
+ if volume_configurations is not None:
+ params['VolumeConfigurations'] = volume_configurations
+ if enable_auto_healing is not None:
+ params['EnableAutoHealing'] = enable_auto_healing
+ if auto_assign_elastic_ips is not None:
+ params['AutoAssignElasticIps'] = auto_assign_elastic_ips
+ if custom_recipes is not None:
+ params['CustomRecipes'] = custom_recipes
+ return self.make_request(action='UpdateLayer',
+ body=json.dumps(params))
+
+ def update_stack(self, stack_id, name=None, attributes=None,
+ service_role_arn=None,
+ default_instance_profile_arn=None, default_os=None,
+ hostname_theme=None, default_availability_zone=None,
+ custom_json=None, use_custom_cookbooks=None,
+ custom_cookbooks_source=None, default_ssh_key_name=None):
+ """
+ Updates a specified stack.
+
+ :type stack_id: string
+ :param stack_id: The stack ID.
+
+ :type name: string
+ :param name: The stack's new name.
+
+ :type attributes: map
+ :param attributes: One or more user-defined key/value pairs to be added
+ to the stack attributes bag.
+
+ :type service_role_arn: string
+ :param service_role_arn: The stack AWS Identity and Access Management
+ (IAM) role, which allows OpsWorks to work with AWS resources on
+ your behalf. You must set this parameter to the Amazon Resource
+ Name (ARN) for an existing IAM role. For more information about IAM
+ ARNs, see `Using Identifiers`_.
+
+ :type default_instance_profile_arn: string
+ :param default_instance_profile_arn: The ARN of an IAM profile that is
+ the default profile for all of the stack's EC2 instances. For more
+ information about IAM ARNs, see `Using Identifiers`_.
+
+ :type default_os: string
+ :param default_os: The cloned stack default operating system, which
+ must be either "Amazon Linux" or "Ubuntu 12.04 LTS".
+
+ :type hostname_theme: string
+ :param hostname_theme: The stack's new host name theme, with spaces are
+ replaced by underscores. The theme is used to generate hostnames
+ for the stack's instances. By default, `HostnameTheme` is set to
+ Layer_Dependent, which creates hostnames by appending integers to
+ the layer's shortname. The other themes are:
+
+ + Baked_Goods
+ + Clouds
+ + European_Cities
+ + Fruits
+ + Greek_Deities
+ + Legendary_Creatures_from_Japan
+ + Planets_and_Moons
+ + Roman_Deities
+ + Scottish_Islands
+ + US_Cities
+ + Wild_Cats
+
+
+ To obtain a generated hostname, call `GetHostNameSuggestion`, which
+ returns a hostname based on the current theme.
+
+ :type default_availability_zone: string
+ :param default_availability_zone: The stack new default Availability
+ Zone. For more information, see `Regions and Endpoints`_.
+
+ :type custom_json: string
+ :param custom_json:
+ A string that contains user-defined, custom JSON. It is used to
+ override the corresponding default stack configuration JSON values.
+ The string should be in the following format and must escape
+ characters such as '"'.:
+ `"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
+
+ :type use_custom_cookbooks: boolean
+ :param use_custom_cookbooks: Whether the stack uses custom cookbooks.
+
+ :type custom_cookbooks_source: dict
+ :param custom_cookbooks_source:
+
+ :type default_ssh_key_name: string
+ :param default_ssh_key_name: A default SSH key for the stack instances.
+ You can override this value when you create or update an instance.
+
+ """
+ params = {'StackId': stack_id, }
+ if name is not None:
+ params['Name'] = name
+ if attributes is not None:
+ params['Attributes'] = attributes
+ if service_role_arn is not None:
+ params['ServiceRoleArn'] = service_role_arn
+ if default_instance_profile_arn is not None:
+ params['DefaultInstanceProfileArn'] = default_instance_profile_arn
+ if default_os is not None:
+ params['DefaultOs'] = default_os
+ if hostname_theme is not None:
+ params['HostnameTheme'] = hostname_theme
+ if default_availability_zone is not None:
+ params['DefaultAvailabilityZone'] = default_availability_zone
+ if custom_json is not None:
+ params['CustomJson'] = custom_json
+ if use_custom_cookbooks is not None:
+ params['UseCustomCookbooks'] = use_custom_cookbooks
+ if custom_cookbooks_source is not None:
+ params['CustomCookbooksSource'] = custom_cookbooks_source
+ if default_ssh_key_name is not None:
+ params['DefaultSshKeyName'] = default_ssh_key_name
+ return self.make_request(action='UpdateStack',
+ body=json.dumps(params))
+
+ def update_user_profile(self, iam_user_arn, ssh_username=None,
+ ssh_public_key=None):
+ """
+ Updates a specified user's SSH name and public key.
+
+ :type iam_user_arn: string
+ :param iam_user_arn: The user IAM ARN.
+
+ :type ssh_username: string
+ :param ssh_username: The user's new SSH user name.
+
+ :type ssh_public_key: string
+ :param ssh_public_key: The user's new SSH public key.
+
+ """
+ params = {'IamUserArn': iam_user_arn, }
+ if ssh_username is not None:
+ params['SshUsername'] = ssh_username
+ if ssh_public_key is not None:
+ params['SshPublicKey'] = ssh_public_key
+ return self.make_request(action='UpdateUserProfile',
+ body=json.dumps(params))
+
+ def make_request(self, action, body):
+ headers = {
+ 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
+ 'Host': self.region.endpoint,
+ 'Content-Type': 'application/x-amz-json-1.1',
+ 'Content-Length': str(len(body)),
+ }
+ http_request = self.build_base_http_request(
+ method='POST', path='/', auth_path='/', params={},
+ headers=headers, data=body)
+ response = self._mexe(http_request, sender=None,
+ override_num_retries=10)
+ response_body = response.read()
+ boto.log.debug(response_body)
+ if response.status == 200:
+ if response_body:
+ return json.loads(response_body)
+ else:
+ json_body = json.loads(response_body)
+ fault_name = json_body.get('__type', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
+
diff --git a/boto/provider.py b/boto/provider.py
index c587e020..8a990ed1 100644
--- a/boto/provider.py
+++ b/boto/provider.py
@@ -275,10 +275,16 @@ class Provider(object):
boto.log.debug("Retrieving credentials from metadata server.")
from boto.utils import get_instance_metadata
timeout = config.getfloat('Boto', 'metadata_service_timeout', 1.0)
- metadata = get_instance_metadata(timeout=timeout, num_retries=1)
- # I'm assuming there's only one role on the instance profile.
- if metadata and 'iam' in metadata:
- security = metadata['iam']['security-credentials'].values()[0]
+ attempts = config.getint('Boto', 'metadata_service_num_attempts', 1)
+ # The num_retries arg is actually the total number of attempts made,
+ # so the config options is named *_num_attempts to make this more
+ # clear to users.
+ metadata = get_instance_metadata(
+ timeout=timeout, num_retries=attempts,
+ data='meta-data/iam/security-credentials')
+ if metadata:
+ # I'm assuming there's only one role on the instance profile.
+ security = metadata.values()[0]
self._access_key = security['AccessKeyId']
self._secret_key = self._convert_key_to_str(security['SecretAccessKey'])
self._security_token = security['Token']
diff --git a/boto/rds/dbsecuritygroup.py b/boto/rds/dbsecuritygroup.py
index 6a69ddb0..37836066 100644
--- a/boto/rds/dbsecuritygroup.py
+++ b/boto/rds/dbsecuritygroup.py
@@ -28,13 +28,18 @@ class DBSecurityGroup(object):
"""
Represents an RDS database security group
- Properties reference available from the AWS documentation at http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSecurityGroup.html
+ Properties reference available from the AWS documentation at
+ http://docs.amazonwebservices.com/AmazonRDS/latest/APIReference/API_DeleteDBSecurityGroup.html
- :ivar Status: The current status of the security group. Possibile values are [ active, ? ]. Reference documentation lacks specifics of possibilities
- :ivar connection: boto.rds.RDSConnection associated with the current object
+ :ivar Status: The current status of the security group. Possible values are
+ [ active, ? ]. Reference documentation lacks specifics of possibilities
+ :ivar connection: :py:class:`boto.rds.RDSConnection` associated with the current object
:ivar description: The description of the security group
- :ivar ec2_groups: List of EC2SecurityGroup objects that this security group PERMITS
- :ivar ip_ranges: List of IPRange objects (containing CIDR addresses) that this security group PERMITS
+ :ivar ec2_groups: List of :py:class:`EC2 Security Group
+ <boto.ec2.securitygroup.SecurityGroup>` objects that this security
+ group PERMITS
+ :ivar ip_ranges: List of :py:class:`boto.rds.dbsecuritygroup.IPRange`
+ objects (containing CIDR addresses) that this security group PERMITS
:ivar name: Name of the security group
:ivar owner_id: ID of the owner of the security group. Can be 'None'
"""
@@ -83,13 +88,14 @@ class DBSecurityGroup(object):
You need to pass in either a CIDR block to authorize or
and EC2 SecurityGroup.
- @type cidr_ip: string
- @param cidr_ip: A valid CIDR IP range to authorize
+ :type cidr_ip: string
+ :param cidr_ip: A valid CIDR IP range to authorize
- @type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>`
+ :type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup`
+ :param ec2_group: An EC2 security group to authorize
- @rtype: bool
- @return: True if successful.
+ :rtype: bool
+ :return: True if successful.
"""
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
@@ -108,13 +114,14 @@ class DBSecurityGroup(object):
You need to pass in either a CIDR block or
an EC2 SecurityGroup from which to revoke access.
- @type cidr_ip: string
- @param cidr_ip: A valid CIDR IP range to revoke
+ :type cidr_ip: string
+ :param cidr_ip: A valid CIDR IP range to revoke
- @type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>`
+ :type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup`
+ :param ec2_group: An EC2 security group to revoke
- @rtype: bool
- @return: True if successful.
+ :rtype: bool
+ :return: True if successful.
"""
if isinstance(ec2_group, SecurityGroup):
group_name = ec2_group.name
@@ -131,6 +138,8 @@ class DBSecurityGroup(object):
class IPRange(object):
"""
Describes a CIDR address range for use in a DBSecurityGroup
+
+ :ivar cidr_ip: IP Address range
"""
def __init__(self, parent=None):
@@ -174,4 +183,4 @@ class EC2SecurityGroup(object):
elif name == 'EC2SecurityGroupOwnerId':
self.owner_id = value
else:
- setattr(self, name, value) \ No newline at end of file
+ setattr(self, name, value)
diff --git a/boto/redshift/__init__.py b/boto/redshift/__init__.py
new file mode 100644
index 00000000..15601e78
--- /dev/null
+++ b/boto/redshift/__init__.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+# All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.regioninfo import RegionInfo
+
+
+def regions():
+ """
+ Get all available regions for the AWS Redshift service.
+
+ :rtype: list
+ :return: A list of :class:`boto.regioninfo.RegionInfo`
+ """
+ from boto.redshift.layer1 import RedshiftConnection
+ cls = RedshiftConnection
+ return [
+ RegionInfo(name='us-east-1',
+ endpoint='redshift.us-east-1.amazonaws.com',
+ connection_cls=cls),
+ RegionInfo(name='us-west-2',
+ endpoint='redshift.us-west-2.amazonaws.com',
+ connection_cls=cls),
+ ]
+
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
+
diff --git a/boto/redshift/exceptions.py b/boto/redshift/exceptions.py
new file mode 100644
index 00000000..92779d08
--- /dev/null
+++ b/boto/redshift/exceptions.py
@@ -0,0 +1,182 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.exception import JSONResponseError
+
+
+class ClusterNotFoundFault(JSONResponseError):
+ pass
+
+
+class InvalidClusterSnapshotStateFault(JSONResponseError):
+ pass
+
+
+class ClusterSnapshotNotFoundFault(JSONResponseError):
+ pass
+
+
+class ClusterNotFoundFault(JSONResponseError):
+ pass
+
+
+class ClusterSecurityGroupQuotaExceededFault(JSONResponseError):
+ pass
+
+
+class ReservedNodeOfferingNotFoundFault(JSONResponseError):
+ pass
+
+
+class InvalidSubnet(JSONResponseError):
+ pass
+
+
+class ClusterSubnetGroupQuotaExceededFault(JSONResponseError):
+ pass
+
+
+class InvalidClusterStateFault(JSONResponseError):
+ pass
+
+
+class InvalidClusterParameterGroupStateFault(JSONResponseError):
+ pass
+
+
+class ClusterParameterGroupAlreadyExistsFault(JSONResponseError):
+ pass
+
+
+class InvalidClusterSecurityGroupStateFault(JSONResponseError):
+ pass
+
+
+class InvalidRestoreFault(JSONResponseError):
+ pass
+
+
+class AuthorizationNotFoundFault(JSONResponseError):
+ pass
+
+
+class ResizeNotFoundFault(JSONResponseError):
+ pass
+
+
+class NumberOfNodesQuotaExceededFault(JSONResponseError):
+ pass
+
+
+class ClusterSnapshotAlreadyExistsFault(JSONResponseError):
+ pass
+
+
+class AuthorizationQuotaExceededFault(JSONResponseError):
+ pass
+
+
+class AuthorizationAlreadyExistsFault(JSONResponseError):
+ pass
+
+
+class ClusterSnapshotQuotaExceededFault(JSONResponseError):
+ pass
+
+
+class ReservedNodeNotFoundFault(JSONResponseError):
+ pass
+
+
+class ReservedNodeAlreadyExistsFault(JSONResponseError):
+ pass
+
+
+class ClusterSecurityGroupAlreadyExistsFault(JSONResponseError):
+ pass
+
+
+class ClusterParameterGroupNotFoundFault(JSONResponseError):
+ pass
+
+
+class ReservedNodeQuotaExceededFault(JSONResponseError):
+ pass
+
+
+class ClusterQuotaExceededFault(JSONResponseError):
+ pass
+
+
+class ClusterSubnetQuotaExceededFault(JSONResponseError):
+ pass
+
+
+class UnsupportedOptionFault(JSONResponseError):
+ pass
+
+
+class InvalidVPCNetworkStateFault(JSONResponseError):
+ pass
+
+
+class ClusterSecurityGroupNotFoundFault(JSONResponseError):
+ pass
+
+
+class InvalidClusterSubnetGroupStateFault(JSONResponseError):
+ pass
+
+
+class ClusterSubnetGroupAlreadyExistsFault(JSONResponseError):
+ pass
+
+
+class NumberOfNodesPerClusterLimitExceededFault(JSONResponseError):
+ pass
+
+
+class ClusterSubnetGroupNotFoundFault(JSONResponseError):
+ pass
+
+
+class ClusterParameterGroupQuotaExceededFault(JSONResponseError):
+ pass
+
+
+class ClusterAlreadyExistsFault(JSONResponseError):
+ pass
+
+
+class InsufficientClusterCapacityFault(JSONResponseError):
+ pass
+
+
+class InvalidClusterSubnetStateFault(JSONResponseError):
+ pass
+
+
+class SubnetAlreadyInUse(JSONResponseError):
+ pass
+
+
+class InvalidParameterCombinationFault(JSONResponseError):
+ pass
diff --git a/boto/redshift/layer1.py b/boto/redshift/layer1.py
new file mode 100644
index 00000000..f57ec0a6
--- /dev/null
+++ b/boto/redshift/layer1.py
@@ -0,0 +1,2076 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import json
+import boto
+from boto.connection import AWSQueryConnection
+from boto.regioninfo import RegionInfo
+from boto.exception import JSONResponseError
+from boto.redshift import exceptions
+
+
+class RedshiftConnection(AWSQueryConnection):
+ """
+ Amazon Redshift **Overview**
+ This is the Amazon Redshift API Reference. This guide provides
+ descriptions and samples of the Amazon Redshift API.
+
+ Amazon Redshift manages all the work of setting up, operating, and
+ scaling a data warehouse: provisioning capacity, monitoring and
+ backing up the cluster, and applying patches and upgrades to the
+ Amazon Redshift engine. You can focus on using your data to
+ acquire new insights for your business and customers.
+ **Are You a First-Time Amazon Redshift User?**
+ If you are a first-time user of Amazon Redshift, we recommend that
+ you begin by reading the following sections:
+
+
+
+ + Service Highlights and Pricing - The `product detail page`_
+ provides the Amazon Redshift value proposition, service highlights
+ and pricing.
+ + Getting Started - The `Getting Started Guide`_ includes an
+ example that walks you through the process of creating a cluster,
+ creating database tables, uploading data, and testing queries.
+
+
+
+ After you complete the Getting Started Guide, we recommend that
+ you explore one of the following guides:
+
+
+ + Cluster Management - If you are responsible for managing Amazon
+ Redshift clusters, the `Cluster Management Guide`_ shows you how
+ to create and manage Amazon Redshift clusters. If you are an
+ application developer, you can use the Amazon Redshift Query API
+ to manage clusters programmatically. Additionally, the AWS SDK
+ libraries that wrap the underlying Amazon Redshift API simplify
+ your programming tasks. If you prefer a more interactive way of
+ managing clusters, you can use the Amazon Redshift console and the
+ AWS command line interface (AWS CLI). For information about the
+ API and CLI, go to the following manuals :
+
+ + API Reference ( this document )
+ + `CLI Reference`_
+
+ + Amazon Redshift Database Database Developer - If you are a
+ database developer, the Amazon Redshift `Database Developer
+ Guide`_ explains how to design, build, query, and maintain the
+ databases that make up your data warehouse.
+
+
+ For a list of supported AWS regions where you can provision a
+ cluster, go to the `Regions and Endpoints`_ section in the Amazon
+ Web Services Glossary .
+ """
+ APIVersion = "2012-12-01"
+ DefaultRegionName = "us-east-1"
+ DefaultRegionEndpoint = "redshift.us-east-1.amazonaws.com"
+ ResponseError = JSONResponseError
+
+ _faults = {
+ "ClusterNotFound": exceptions.ClusterNotFoundFault,
+ "InvalidClusterSnapshotState": exceptions.InvalidClusterSnapshotStateFault,
+ "ClusterSnapshotNotFound": exceptions.ClusterSnapshotNotFoundFault,
+ "ClusterSecurityGroupQuotaExceeded": exceptions.ClusterSecurityGroupQuotaExceededFault,
+ "ReservedNodeOfferingNotFound": exceptions.ReservedNodeOfferingNotFoundFault,
+ "InvalidSubnet": exceptions.InvalidSubnet,
+ "ClusterSubnetGroupQuotaExceeded": exceptions.ClusterSubnetGroupQuotaExceededFault,
+ "InvalidClusterState": exceptions.InvalidClusterStateFault,
+ "InvalidClusterParameterGroupState": exceptions.InvalidClusterParameterGroupStateFault,
+ "ClusterParameterGroupAlreadyExists": exceptions.ClusterParameterGroupAlreadyExistsFault,
+ "InvalidClusterSecurityGroupState": exceptions.InvalidClusterSecurityGroupStateFault,
+ "InvalidRestore": exceptions.InvalidRestoreFault,
+ "AuthorizationNotFound": exceptions.AuthorizationNotFoundFault,
+ "ResizeNotFound": exceptions.ResizeNotFoundFault,
+ "NumberOfNodesQuotaExceeded": exceptions.NumberOfNodesQuotaExceededFault,
+ "ClusterSnapshotAlreadyExists": exceptions.ClusterSnapshotAlreadyExistsFault,
+ "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceededFault,
+ "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExistsFault,
+ "ClusterSnapshotQuotaExceeded": exceptions.ClusterSnapshotQuotaExceededFault,
+ "ReservedNodeNotFound": exceptions.ReservedNodeNotFoundFault,
+ "ReservedNodeAlreadyExists": exceptions.ReservedNodeAlreadyExistsFault,
+ "ClusterSecurityGroupAlreadyExists": exceptions.ClusterSecurityGroupAlreadyExistsFault,
+ "ClusterParameterGroupNotFound": exceptions.ClusterParameterGroupNotFoundFault,
+ "ReservedNodeQuotaExceeded": exceptions.ReservedNodeQuotaExceededFault,
+ "ClusterQuotaExceeded": exceptions.ClusterQuotaExceededFault,
+ "ClusterSubnetQuotaExceeded": exceptions.ClusterSubnetQuotaExceededFault,
+ "UnsupportedOption": exceptions.UnsupportedOptionFault,
+ "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkStateFault,
+ "ClusterSecurityGroupNotFound": exceptions.ClusterSecurityGroupNotFoundFault,
+ "InvalidClusterSubnetGroupState": exceptions.InvalidClusterSubnetGroupStateFault,
+ "ClusterSubnetGroupAlreadyExists": exceptions.ClusterSubnetGroupAlreadyExistsFault,
+ "NumberOfNodesPerClusterLimitExceeded": exceptions.NumberOfNodesPerClusterLimitExceededFault,
+ "ClusterSubnetGroupNotFound": exceptions.ClusterSubnetGroupNotFoundFault,
+ "ClusterParameterGroupQuotaExceeded": exceptions.ClusterParameterGroupQuotaExceededFault,
+ "ClusterAlreadyExists": exceptions.ClusterAlreadyExistsFault,
+ "InsufficientClusterCapacity": exceptions.InsufficientClusterCapacityFault,
+ "InvalidClusterSubnetState": exceptions.InvalidClusterSubnetStateFault,
+ "SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
+ "InvalidParameterCombination": exceptions.InvalidParameterCombinationFault,
+ }
+
+
+ def __init__(self, **kwargs):
+ region = kwargs.pop('region', None)
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ kwargs['host'] = region.endpoint
+ AWSQueryConnection.__init__(self, **kwargs)
+ self.region = region
+
+ def _required_auth_capability(self):
+ return ['hmac-v4']
+
+ def authorize_cluster_security_group_ingress(self,
+ cluster_security_group_name,
+ cidrip=None,
+ ec2_security_group_name=None,
+ ec2_security_group_owner_id=None):
+ """
+ Adds an inbound (ingress) rule to an Amazon Redshift security
+ group. Depending on whether the application accessing your
+ cluster is running on the Internet or an EC2 instance, you can
+ authorize inbound access to either a Classless Interdomain
+ Routing (CIDR) IP address range or an EC2 security group. You
+ can add as many as 20 ingress rules to an Amazon Redshift
+ security group.
+ The EC2 security group must be defined in the AWS region where
+ the cluster resides.
+ For an overview of CIDR blocks, see the Wikipedia article on
+ `Classless Inter-Domain Routing`_.
+
+ You must also associate the security group with a cluster so
+ that clients running on these IP addresses or the EC2 instance
+ are authorized to connect to the cluster. For information
+ about managing security groups, go to `Working with Security
+ Groups`_ in the Amazon Redshift Management Guide .
+
+ :type cluster_security_group_name: string
+ :param cluster_security_group_name: The name of the security group to
+ which the ingress rule is added.
+
+ :type cidrip: string
+ :param cidrip: The IP range to be added the Amazon Redshift security
+ group.
+
+ :type ec2_security_group_name: string
+ :param ec2_security_group_name: The EC2 security group to be added the
+ Amazon Redshift security group.
+
+ :type ec2_security_group_owner_id: string
+ :param ec2_security_group_owner_id: The AWS account number of the owner
+ of the security group specified by the EC2SecurityGroupName
+ parameter. The AWS Access Key ID is not an acceptable value.
+ Example: `111122223333`
+
+ """
+ params = {
+ 'ClusterSecurityGroupName': cluster_security_group_name,
+ }
+ if cidrip is not None:
+ params['CIDRIP'] = cidrip
+ if ec2_security_group_name is not None:
+ params['EC2SecurityGroupName'] = ec2_security_group_name
+ if ec2_security_group_owner_id is not None:
+ params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
+ return self._make_request(
+ action='AuthorizeClusterSecurityGroupIngress',
+ verb='POST',
+ path='/', params=params)
+
+ def copy_cluster_snapshot(self, source_snapshot_identifier,
+ target_snapshot_identifier):
+ """
+ Copies the specified automated cluster snapshot to a new
+ manual cluster snapshot. The source must be an automated
+ snapshot and it must be in the available state.
+
+ When you delete a cluster, Amazon Redshift deletes any
+ automated snapshots of the cluster. Also, when the retention
+ period of the snapshot expires, Amazon Redshift automatically
+ deletes it. If you want to keep an automated snapshot for a
+ longer period, you can make a manual copy of the snapshot.
+ Manual snapshots are retained until you delete them.
+
+ For more information about working with snapshots, go to
+ `Amazon Redshift Snapshots`_ in the Amazon Redshift Management
+ Guide .
+
+ :type source_snapshot_identifier: string
+ :param source_snapshot_identifier:
+ The identifier for the source snapshot.
+
+ Constraints:
+
+
+ + Must be the identifier for a valid automated snapshot whose state is
+ "available".
+
+ :type target_snapshot_identifier: string
+ :param target_snapshot_identifier:
+ The identifier given to the new manual snapshot.
+
+ Constraints:
+
+
+ + Cannot be null, empty, or blank.
+ + Must contain from 1 to 255 alphanumeric characters or hyphens.
+ + First character must be a letter.
+ + Cannot end with a hyphen or contain two consecutive hyphens.
+ + Must be unique for the AWS account that is making the request.
+
+ """
+ params = {
+ 'SourceSnapshotIdentifier': source_snapshot_identifier,
+ 'TargetSnapshotIdentifier': target_snapshot_identifier,
+ }
+ return self._make_request(
+ action='CopyClusterSnapshot',
+ verb='POST',
+ path='/', params=params)
+
+ def create_cluster(self, cluster_identifier, node_type, master_username,
+ master_user_password, db_name=None, cluster_type=None,
+ cluster_security_groups=None,
+ vpc_security_group_ids=None,
+ cluster_subnet_group_name=None,
+ availability_zone=None,
+ preferred_maintenance_window=None,
+ cluster_parameter_group_name=None,
+ automated_snapshot_retention_period=None, port=None,
+ cluster_version=None, allow_version_upgrade=None,
+ number_of_nodes=None, publicly_accessible=None,
+ encrypted=None):
+ """
+ Creates a new cluster. To create the cluster in virtual
+ private cloud (VPC), you must provide cluster subnet group
+ name. If you don't provide a cluster subnet group name or the
+ cluster security group parameter, Amazon Redshift creates a
+ non-VPC cluster, it associates the default cluster security
+ group with the cluster. For more information about managing
+ clusters, go to `Amazon Redshift Clusters`_ in the Amazon
+ Redshift Management Guide .
+
+ :type db_name: string
+ :param db_name:
+ The name of the first database to be created when the cluster is
+ created.
+
+ To create additional databases after the cluster is created, connect to
+ the cluster with a SQL client and use SQL commands to create a
+ database. For more information, go to `Create a Database`_ in the
+ Amazon Redshift Developer Guide.
+
+ Default: `dev`
+
+ Constraints:
+
+
+ + Must contain 1 to 64 alphanumeric characters.
+ + Must contain only lowercase letters.
+ + Cannot be a word that is reserved by the service. A list of reserved
+ words can be found in `Reserved Words`_ in the Amazon Redshift
+ Developer Guide.
+
+ :type cluster_identifier: string
+ :param cluster_identifier: A unique identifier for the cluster. You use
+ this identifier to refer to the cluster for any subsequent cluster
+ operations such as deleting or modifying. The identifier also
+ appears in the Amazon Redshift console.
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens.
+ + Alphabetic characters must be lowercase.
+ + First character must be a letter.
+ + Cannot end with a hyphen or contain two consecutive hyphens.
+ + Must be unique for all clusters within an AWS account.
+
+
+ Example: `myexamplecluster`
+
+ :type cluster_type: string
+ :param cluster_type: The type of the cluster. When cluster type is
+ specified as
+
+ + `single-node`, the **NumberOfNodes** parameter is not required.
+ + `multi-node`, the **NumberOfNodes** parameter is required.
+
+
+ Valid Values: `multi-node` | `single-node`
+
+ Default: `multi-node`
+
+ :type node_type: string
+ :param node_type: The node type to be provisioned for the cluster. For
+ information about node types, go to ` Working with Clusters`_ in
+ the Amazon Redshift Management Guide .
+ Valid Values: `dw.hs1.xlarge` | `dw.hs1.8xlarge`.
+
+ :type master_username: string
+ :param master_username:
+ The user name associated with the master user account for the cluster
+ that is being created.
+
+ Constraints:
+
+
+ + Must be 1 - 128 alphanumeric characters.
+ + First character must be a letter.
+ + Cannot be a reserved word. A list of reserved words can be found in
+ `Reserved Words`_ in the Amazon Redshift Developer Guide.
+
+ :type master_user_password: string
+ :param master_user_password:
+ The password associated with the master user account for the cluster
+ that is being created.
+
+ Constraints:
+
+
+ + Must be between 8 and 64 characters in length.
+ + Must contain at least one uppercase letter.
+ + Must contain at least one lowercase letter.
+ + Must contain one number.
+
+ :type cluster_security_groups: list
+ :param cluster_security_groups: A list of security groups to be
+ associated with this cluster.
+ Default: The default cluster security group for Amazon Redshift.
+
+ :type vpc_security_group_ids: list
+ :param vpc_security_group_ids: A list of Virtual Private Cloud (VPC)
+ security groups to be associated with the cluster.
+ Default: The default VPC security group is associated with the cluster.
+
+ :type cluster_subnet_group_name: string
+ :param cluster_subnet_group_name: The name of a cluster subnet group to
+ be associated with this cluster.
+ If this parameter is not provided the resulting cluster will be
+ deployed outside virtual private cloud (VPC).
+
+ :type availability_zone: string
+ :param availability_zone: The EC2 Availability Zone (AZ) in which you
+ want Amazon Redshift to provision the cluster. For example, if you
+ have several EC2 instances running in a specific Availability Zone,
+ then you might want the cluster to be provisioned in the same zone
+ in order to decrease network latency.
+ Default: A random, system-chosen Availability Zone in the region that
+ is specified by the endpoint.
+
+ Example: `us-east-1d`
+
+ Constraint: The specified Availability Zone must be in the same region
+ as the current endpoint.
+
+ :type preferred_maintenance_window: string
+ :param preferred_maintenance_window: The weekly time range (in UTC)
+ during which automated cluster maintenance can occur.
+ Format: `ddd:hh24:mi-ddd:hh24:mi`
+
+ Default: A 30-minute window selected at random from an 8-hour block of
+ time per region, occurring on a random day of the week. The
+ following list shows the time blocks for each region from which the
+ default maintenance windows are assigned.
+
+
+ + **US-East (Northern Virginia) Region:** 03:00-11:00 UTC
+ + **US-West (Northern California) Region:** 06:00-14:00 UTC
+ + **EU (Ireland) Region:** 22:00-06:00 UTC
+ + **Asia Pacific (Singapore) Region:** 14:00-22:00 UTC
+ + **Asia Pacific (Tokyo) Region: ** 17:00-03:00 UTC
+
+
+ Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
+
+ Constraints: Minimum 30-minute window.
+
+ :type cluster_parameter_group_name: string
+ :param cluster_parameter_group_name:
+ The name of the parameter group to be associated with this cluster.
+
+ Default: The default Amazon Redshift cluster parameter group. For
+ information about the default parameter group, go to `Working with
+ Amazon Redshift Parameter Groups`_
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters or hyphens.
+ + First character must be a letter.
+ + Cannot end with a hyphen or contain two consecutive hyphens.
+
+ :type automated_snapshot_retention_period: integer
+ :param automated_snapshot_retention_period: The number of days that
+ automated snapshots are retained. If the value is 0, automated
+ snapshots are disabled. Even if automated snapshots are disabled,
+ you can still create manual snapshots when you want with
+ CreateClusterSnapshot.
+ Default: `1`
+
+ Constraints: Must be a value from 0 to 35.
+
+ :type port: integer
+ :param port: The port number on which the cluster accepts incoming
+ connections.
+ The cluster is accessible only via the JDBC and ODBC connection
+ strings. Part of the connection string requires the port on which
+ the cluster will listen for incoming connections.
+
+ Default: `5439`
+
+ Valid Values: `1150-65535`
+
+ :type cluster_version: string
+ :param cluster_version: The version of the Amazon Redshift engine
+ software that you want to deploy on the cluster.
+ The version selected runs on all the nodes in the cluster.
+
+ Constraints: Only version 1.0 is currently available.
+
+ Example: `1.0`
+
+ :type allow_version_upgrade: boolean
+ :param allow_version_upgrade: If `True`, upgrades can be applied during
+ the maintenance window to the Amazon Redshift engine that is
+ running on the cluster.
+ When a new version of the Amazon Redshift engine is released, you can
+ request that the service automatically apply upgrades during the
+ maintenance window to the Amazon Redshift engine that is running on
+ your cluster.
+
+ Default: `True`
+
+ :type number_of_nodes: integer
+ :param number_of_nodes: The number of compute nodes in the cluster.
+ This parameter is required when the **ClusterType** parameter is
+ specified as `multi-node`.
+ For information about determining how many nodes you need, go to `
+ Working with Clusters`_ in the Amazon Redshift Management Guide .
+
+ If you don't specify this parameter, you get a single-node cluster.
+ When requesting a multi-node cluster, you must specify the number
+ of nodes that you want in the cluster.
+
+ Default: `1`
+
+ Constraints: Value must be at least 1 and no more than 100.
+
+ :type publicly_accessible: boolean
+ :param publicly_accessible: If `True`, the cluster can be accessed from
+ a public network.
+
+ :type encrypted: boolean
+ :param encrypted: If `True`, the data in cluster is encrypted at rest.
+ Default: false
+
+ """
+ params = {
+ 'ClusterIdentifier': cluster_identifier,
+ 'NodeType': node_type,
+ 'MasterUsername': master_username,
+ 'MasterUserPassword': master_user_password,
+ }
+ if db_name is not None:
+ params['DBName'] = db_name
+ if cluster_type is not None:
+ params['ClusterType'] = cluster_type
+ if cluster_security_groups is not None:
+ self.build_list_params(params,
+ cluster_security_groups,
+ 'ClusterSecurityGroups.member')
+ if vpc_security_group_ids is not None:
+ self.build_list_params(params,
+ vpc_security_group_ids,
+ 'VpcSecurityGroupIds.member')
+ if cluster_subnet_group_name is not None:
+ params['ClusterSubnetGroupName'] = cluster_subnet_group_name
+ if availability_zone is not None:
+ params['AvailabilityZone'] = availability_zone
+ if preferred_maintenance_window is not None:
+ params['PreferredMaintenanceWindow'] = preferred_maintenance_window
+ if cluster_parameter_group_name is not None:
+ params['ClusterParameterGroupName'] = cluster_parameter_group_name
+ if automated_snapshot_retention_period is not None:
+ params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period
+ if port is not None:
+ params['Port'] = port
+ if cluster_version is not None:
+ params['ClusterVersion'] = cluster_version
+ if allow_version_upgrade is not None:
+ params['AllowVersionUpgrade'] = str(
+ allow_version_upgrade).lower()
+ if number_of_nodes is not None:
+ params['NumberOfNodes'] = number_of_nodes
+ if publicly_accessible is not None:
+ params['PubliclyAccessible'] = str(
+ publicly_accessible).lower()
+ if encrypted is not None:
+ params['Encrypted'] = str(
+ encrypted).lower()
+ return self._make_request(
+ action='CreateCluster',
+ verb='POST',
+ path='/', params=params)
+
+ def create_cluster_parameter_group(self, parameter_group_name,
+ parameter_group_family, description):
+ """
+ Creates an Amazon Redshift parameter group.
+
+ Creating parameter groups is independent of creating clusters.
+ You can associate a cluster with a parameter group when you
+ create the cluster. You can also associate an existing cluster
+ with a parameter group after the cluster is created by using
+ ModifyCluster.
+
+ Parameters in the parameter group define specific behavior
+ that applies to the databases you create on the cluster. For
+ more information about managing parameter groups, go to
+ `Amazon Redshift Parameter Groups`_ in the Amazon Redshift
+ Management Guide .
+
+ :type parameter_group_name: string
+ :param parameter_group_name:
+ The name of the cluster parameter group.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters or hyphens
+ + First character must be a letter.
+ + Cannot end with a hyphen or contain two consecutive hyphens.
+ + Must be unique withing your AWS account.
+
+
+ This value is stored as a lower-case string.
+
+ :type parameter_group_family: string
+ :param parameter_group_family: The Amazon Redshift engine version to
+ which the cluster parameter group applies. The cluster engine
+ version determines the set of parameters.
+ To get a list of valid parameter group family names, you can call
+ DescribeClusterParameterGroups. By default, Amazon Redshift returns
+ a list of all the parameter groups that are owned by your AWS
+ account, including the default parameter groups for each Amazon
+ Redshift engine version. The parameter group family names
+ associated with the default parameter groups provide you the valid
+ values. For example, a valid family name is "redshift-1.0".
+
+ :type description: string
+ :param description: A description of the parameter group.
+
+ """
+ params = {
+ 'ParameterGroupName': parameter_group_name,
+ 'ParameterGroupFamily': parameter_group_family,
+ 'Description': description,
+ }
+ return self._make_request(
+ action='CreateClusterParameterGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def create_cluster_security_group(self, cluster_security_group_name,
+ description):
+ """
+ Creates a new Amazon Redshift security group. You use security
+ groups to control access to non-VPC clusters.
+
+ For information about managing security groups, go to`Amazon
+ Redshift Cluster Security Groups`_ in the Amazon Redshift
+ Management Guide .
+
+ :type cluster_security_group_name: string
+ :param cluster_security_group_name: The name for the security group.
+ Amazon Redshift stores the value as a lowercase string.
+ Constraints:
+
+
+ + Must contain no more than 255 alphanumeric characters or hyphens.
+ + Must not be "Default".
+ + Must be unique for all security groups that are created by your AWS
+ account.
+
+
+ Example: `examplesecuritygroup`
+
+ :type description: string
+ :param description: A description for the security group.
+
+ """
+ params = {
+ 'ClusterSecurityGroupName': cluster_security_group_name,
+ 'Description': description,
+ }
+ return self._make_request(
+ action='CreateClusterSecurityGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def create_cluster_snapshot(self, snapshot_identifier,
+ cluster_identifier):
+ """
+ Creates a manual snapshot of the specified cluster. The
+ cluster must be in the "available" state.
+
+ For more information about working with snapshots, go to
+ `Amazon Redshift Snapshots`_ in the Amazon Redshift Management
+ Guide .
+
+ :type snapshot_identifier: string
+ :param snapshot_identifier: A unique identifier for the snapshot that
+ you are requesting. This identifier must be unique for all
+ snapshots within the AWS account.
+ Constraints:
+
+
+ + Cannot be null, empty, or blank
+ + Must contain from 1 to 255 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+
+ Example: `my-snapshot-id`
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The cluster identifier for which you want a
+ snapshot.
+
+ """
+ params = {
+ 'SnapshotIdentifier': snapshot_identifier,
+ 'ClusterIdentifier': cluster_identifier,
+ }
+ return self._make_request(
+ action='CreateClusterSnapshot',
+ verb='POST',
+ path='/', params=params)
+
+ def create_cluster_subnet_group(self, cluster_subnet_group_name,
+ description, subnet_ids):
+ """
+ Creates a new Amazon Redshift subnet group. You must provide a
+ list of one or more subnets in your existing Amazon Virtual
+ Private Cloud (Amazon VPC) when creating Amazon Redshift
+ subnet group.
+
+ For information about subnet groups, go to`Amazon Redshift
+ Cluster Subnet Groups`_ in the Amazon Redshift Management
+ Guide .
+
+ :type cluster_subnet_group_name: string
+ :param cluster_subnet_group_name: The name for the subnet group. Amazon
+ Redshift stores the value as a lowercase string.
+ Constraints:
+
+
+ + Must contain no more than 255 alphanumeric characters or hyphens.
+ + Must not be "Default".
+ + Must be unique for all subnet groups that are created by your AWS
+ account.
+
+
+ Example: `examplesubnetgroup`
+
+ :type description: string
+ :param description: A description for the subnet group.
+
+ :type subnet_ids: list
+ :param subnet_ids: An array of VPC subnet IDs. A maximum of 20 subnets
+ can be modified in a single request.
+
+ """
+ params = {
+ 'ClusterSubnetGroupName': cluster_subnet_group_name,
+ 'Description': description,
+ }
+ self.build_list_params(params,
+ subnet_ids,
+ 'SubnetIds.member')
+ return self._make_request(
+ action='CreateClusterSubnetGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_cluster(self, cluster_identifier,
+ skip_final_cluster_snapshot=None,
+ final_cluster_snapshot_identifier=None):
+ """
+ Deletes a previously provisioned cluster. A successful
+ response from the web service indicates that the request was
+ received correctly. If a final cluster snapshot is requested
+ the status of the cluster will be "final-snapshot" while the
+ snapshot is being taken, then it's "deleting" once Amazon
+ Redshift begins deleting the cluster. Use DescribeClusters to
+ monitor the status of the deletion. The delete operation
+ cannot be canceled or reverted once submitted. For more
+ information about managing clusters, go to `Amazon Redshift
+ Clusters`_ in the Amazon Redshift Management Guide .
+
+ :type cluster_identifier: string
+ :param cluster_identifier:
+ The identifier of the cluster to be deleted.
+
+ Constraints:
+
+
+ + Must contain lowercase characters.
+ + Must contain from 1 to 63 alphanumeric characters or hyphens.
+ + First character must be a letter.
+ + Cannot end with a hyphen or contain two consecutive hyphens.
+
+ :type skip_final_cluster_snapshot: boolean
+ :param skip_final_cluster_snapshot: Determines whether a final snapshot
+ of the cluster is created before Amazon Redshift deletes the
+ cluster. If `True`, a final cluster snapshot is not created. If
+ `False`, a final cluster snapshot is created before the cluster is
+ deleted.
+ The FinalClusterSnapshotIdentifier parameter must be specified if
+ SkipFinalClusterSnapshot is `False`.
+
+ Default: `False`
+
+ :type final_cluster_snapshot_identifier: string
+ :param final_cluster_snapshot_identifier:
+ The identifier of the final snapshot that is to be created immediately
+ before deleting the cluster. If this parameter is provided,
+ SkipFinalClusterSnapshot must be `False`.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters.
+ + First character must be a letter.
+ + Cannot end with a hyphen or contain two consecutive hyphens.
+
+ """
+ params = {'ClusterIdentifier': cluster_identifier, }
+ if skip_final_cluster_snapshot is not None:
+ params['SkipFinalClusterSnapshot'] = str(
+ skip_final_cluster_snapshot).lower()
+ if final_cluster_snapshot_identifier is not None:
+ params['FinalClusterSnapshotIdentifier'] = final_cluster_snapshot_identifier
+ return self._make_request(
+ action='DeleteCluster',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_cluster_parameter_group(self, parameter_group_name):
+ """
+ Deletes a specified Amazon Redshift parameter group. You
+ cannot delete a parameter group if it is associated with a
+ cluster.
+
+ :type parameter_group_name: string
+ :param parameter_group_name:
+ The name of the parameter group to be deleted.
+
+ Constraints:
+
+
+ + Must be the name of an existing cluster parameter group.
+ + Cannot delete a default cluster parameter group.
+
+ """
+ params = {'ParameterGroupName': parameter_group_name, }
+ return self._make_request(
+ action='DeleteClusterParameterGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_cluster_security_group(self, cluster_security_group_name):
+ """
+ Deletes an Amazon Redshift security group.
+ You cannot delete a security group that is associated with any
+ clusters. You cannot delete the default security group.
+ For information about managing security groups, go to`Amazon
+ Redshift Cluster Security Groups`_ in the Amazon Redshift
+ Management Guide .
+
+ :type cluster_security_group_name: string
+ :param cluster_security_group_name: The name of the cluster security
+ group to be deleted.
+
+ """
+ params = {
+ 'ClusterSecurityGroupName': cluster_security_group_name,
+ }
+ return self._make_request(
+ action='DeleteClusterSecurityGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_cluster_snapshot(self, snapshot_identifier):
+ """
+ Deletes the specified manual snapshot. The snapshot must be in
+ the "available" state.
+
+ Unlike automated snapshots, manual snapshots are retained even
+ after you delete your cluster. Amazon Redshift does not delete
+ your manual snapshots. You must delete manual snapshot
+ explicitly to avoid getting charged.
+
+ :type snapshot_identifier: string
+ :param snapshot_identifier: The unique identifier of the manual
+ snapshot to be deleted.
+ Constraints: Must be the name of an existing snapshot that is in the
+ `available` state.
+
+ """
+ params = {'SnapshotIdentifier': snapshot_identifier, }
+ return self._make_request(
+ action='DeleteClusterSnapshot',
+ verb='POST',
+ path='/', params=params)
+
+ def delete_cluster_subnet_group(self, cluster_subnet_group_name):
+ """
+ Deletes the specified cluster subnet group.
+
+ :type cluster_subnet_group_name: string
+ :param cluster_subnet_group_name: The name of the cluster subnet group
+ name to be deleted.
+
+ """
+ params = {
+ 'ClusterSubnetGroupName': cluster_subnet_group_name,
+ }
+ return self._make_request(
+ action='DeleteClusterSubnetGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_cluster_parameter_groups(self, parameter_group_name=None,
+ max_records=None, marker=None):
+ """
+ Returns a list of Amazon Redshift parameter groups, including
+ parameter groups you created and the default parameter group.
+ For each parameter group, the response includes the parameter
+ group name, description, and parameter group family name. You
+ can optionally specify a name to retrieve the description of a
+ specific parameter group.
+
+ For more information about managing parameter groups, go to
+ `Amazon Redshift Parameter Groups`_ in the Amazon Redshift
+ Management Guide .
+
+ :type parameter_group_name: string
+ :param parameter_group_name: The name of a specific parameter group for
+ which to return details. By default, details about all parameter
+ groups and the default parameter group are returned.
+
+ :type max_records: integer
+ :param max_records: The maximum number of parameter group records to
+ include in the response. If more records exist than the specified
+ `MaxRecords` value, the response includes a marker that you can use
+ in a subsequent DescribeClusterParameterGroups request to retrieve
+ the next set of records.
+ Default: `100`
+
+ Constraints: Value must be at least 20 and no more than 100.
+
+ :type marker: string
+ :param marker: An optional marker returned by a previous
+ DescribeClusterParameterGroups request to indicate the first
+ parameter group that the current request will return.
+
+ """
+ params = {}
+ if parameter_group_name is not None:
+ params['ParameterGroupName'] = parameter_group_name
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeClusterParameterGroups',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_cluster_parameters(self, parameter_group_name, source=None,
+ max_records=None, marker=None):
+ """
+ Returns a detailed list of parameters contained within the
+ specified Amazon Redshift parameter group. For each parameter
+ the response includes information such as parameter name,
+ description, data type, value, whether the parameter value is
+ modifiable, and so on.
+
+ You can specify source filter to retrieve parameters of only
+ specific type. For example, to retrieve parameters that were
+ modified by a user action such as from
+ ModifyClusterParameterGroup, you can specify source equal to
+ user .
+
+ For more information about managing parameter groups, go to
+ `Amazon Redshift Parameter Groups`_ in the Amazon Redshift
+ Management Guide .
+
+ :type parameter_group_name: string
+ :param parameter_group_name: The name of a cluster parameter group for
+ which to return details.
+
+ :type source: string
+ :param source: The parameter types to return. Specify `user` to show
+ parameters that are different form the default. Similarly, specify
+ `engine-default` to show parameters that are the same as the
+ default parameter group.
+ Default: All parameter types returned.
+
+ Valid Values: `user` | `engine-default`
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, response includes a marker that you can specify in your
+ subsequent request to retrieve remaining result.
+ Default: `100`
+
+ Constraints: Value must be at least 20 and no more than 100.
+
+ :type marker: string
+ :param marker: An optional marker returned from a previous
+ **DescribeClusterParameters** request. If this parameter is
+ specified, the response includes only records beyond the specified
+ marker, up to the value specified by `MaxRecords`.
+
+ """
+ params = {'ParameterGroupName': parameter_group_name, }
+ if source is not None:
+ params['Source'] = source
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeClusterParameters',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_cluster_security_groups(self,
+ cluster_security_group_name=None,
+ max_records=None, marker=None):
+ """
+ Returns information about Amazon Redshift security groups. If
+ the name of a security group is specified, the response will
+ contain only information about only that security group.
+
+ For information about managing security groups, go to`Amazon
+ Redshift Cluster Security Groups`_ in the Amazon Redshift
+ Management Guide .
+
+ :type cluster_security_group_name: string
+ :param cluster_security_group_name: The name of a cluster security
+ group for which you are requesting details. You can specify either
+ the **Marker** parameter or a **ClusterSecurityGroupName**
+ parameter, but not both.
+ Example: `securitygroup1`
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to be included in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a marker is included in the response, which you can use in a
+ subsequent DescribeClusterSecurityGroups request.
+ Default: `100`
+
+ Constraints: Value must be at least 20 and no more than 100.
+
+ :type marker: string
+ :param marker: An optional marker returned by a previous
+ DescribeClusterSecurityGroups request to indicate the first
+ security group that the current request will return. You can
+ specify either the **Marker** parameter or a
+ **ClusterSecurityGroupName** parameter, but not both.
+
+ """
+ params = {}
+ if cluster_security_group_name is not None:
+ params['ClusterSecurityGroupName'] = cluster_security_group_name
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeClusterSecurityGroups',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_cluster_snapshots(self, cluster_identifier=None,
+ snapshot_identifier=None,
+ snapshot_type=None, start_time=None,
+ end_time=None, max_records=None,
+ marker=None):
+ """
+ Returns one or more snapshot objects, which contain metadata
+ about your cluster snapshots. By default, this operation
+ returns information about all snapshots of all clusters that
+ are owned by the AWS account.
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The identifier of the cluster for which
+ information about snapshots is requested.
+
+ :type snapshot_identifier: string
+ :param snapshot_identifier: The snapshot identifier of the snapshot
+ about which to return information.
+
+ :type snapshot_type: string
+ :param snapshot_type: The type of snapshots for which you are
+ requesting information. By default, snapshots of all types are
+ returned.
+ Valid Values: `automated` | `manual`
+
+ :type start_time: timestamp
+ :param start_time: A value that requests only snapshots created at or
+ after the specified time. The time value is specified in ISO 8601
+ format. For more information about ISO 8601, go to the `ISO8601
+ Wikipedia page.`_
+ Example: `2012-07-16T18:00:00Z`
+
+ :type end_time: timestamp
+ :param end_time: A time value that requests only snapshots created at
+ or before the specified time. The time value is specified in ISO
+ 8601 format. For more information about ISO 8601, go to the
+ `ISO8601 Wikipedia page.`_
+ Example: `2012-07-16T18:00:00Z`
+
+ :type max_records: integer
+ :param max_records: The maximum number of snapshot records to include
+ in the response. If more records exist than the specified
+ `MaxRecords` value, the response returns a marker that you can use
+ in a subsequent DescribeClusterSnapshots request in order to
+ retrieve the next set of snapshot records.
+ Default: `100`
+
+ Constraints: Must be at least 20 and no more than 100.
+
+ :type marker: string
+ :param marker: An optional marker returned by a previous
+ DescribeClusterSnapshots request to indicate the first snapshot
+ that the request will return.
+
+ """
+ params = {}
+ if cluster_identifier is not None:
+ params['ClusterIdentifier'] = cluster_identifier
+ if snapshot_identifier is not None:
+ params['SnapshotIdentifier'] = snapshot_identifier
+ if snapshot_type is not None:
+ params['SnapshotType'] = snapshot_type
+ if start_time is not None:
+ params['StartTime'] = start_time
+ if end_time is not None:
+ params['EndTime'] = end_time
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeClusterSnapshots',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_cluster_subnet_groups(self, cluster_subnet_group_name=None,
+ max_records=None, marker=None):
+ """
+ Returns one or more cluster subnet group objects, which
+ contain metadata about your cluster subnet groups. By default,
+ this operation returns information about all cluster subnet
+ groups that are defined in you AWS account.
+
+ :type cluster_subnet_group_name: string
+ :param cluster_subnet_group_name: The name of the cluster subnet group
+ for which information is requested.
+
+ :type max_records: integer
+ :param max_records: The maximum number of cluster subnet group records
+ to include in the response. If more records exist than the
+ specified `MaxRecords` value, the response returns a marker that
+ you can use in a subsequent DescribeClusterSubnetGroups request in
+ order to retrieve the next set of cluster subnet group records.
+ Default: 100
+
+ Constraints: Must be at least 20 and no more than 100.
+
+ :type marker: string
+ :param marker: An optional marker returned by a previous
+ DescribeClusterSubnetGroups request to indicate the first cluster
+ subnet group that the current request will return.
+
+ """
+ params = {}
+ if cluster_subnet_group_name is not None:
+ params['ClusterSubnetGroupName'] = cluster_subnet_group_name
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeClusterSubnetGroups',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_cluster_versions(self, cluster_version=None,
+ cluster_parameter_group_family=None,
+ max_records=None, marker=None):
+ """
+ Returns descriptions of the available Amazon Redshift cluster
+ versions. You can call this operation even before creating any
+ clusters to learn more about the Amazon Redshift versions. For
+ more information about managing clusters, go to `Amazon
+ Redshift Clusters`_ in the Amazon Redshift Management Guide
+
+ :type cluster_version: string
+ :param cluster_version: The specific cluster version to return.
+ Example: `1.0`
+
+ :type cluster_parameter_group_family: string
+ :param cluster_parameter_group_family:
+ The name of a specific cluster parameter group family to return details
+ for.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more than the `MaxRecords` value is available, a
+ marker is included in the response so that the following results
+ can be retrieved.
+ Default: `100`
+
+ Constraints: Value must be at least 20 and no more than 100.
+
+ :type marker: string
+ :param marker: The marker returned from a previous request. If this
+ parameter is specified, the response includes records beyond the
+ marker only, up to `MaxRecords`.
+
+ """
+ params = {}
+ if cluster_version is not None:
+ params['ClusterVersion'] = cluster_version
+ if cluster_parameter_group_family is not None:
+ params['ClusterParameterGroupFamily'] = cluster_parameter_group_family
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeClusterVersions',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_clusters(self, cluster_identifier=None, max_records=None,
+ marker=None):
+ """
+ Returns properties of provisioned clusters including general
+ cluster properties, cluster database properties, maintenance
+ and backup properties, and security and access properties.
+ This operation supports pagination. For more information about
+ managing clusters, go to `Amazon Redshift Clusters`_ in the
+ Amazon Redshift Management Guide .
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The unique identifier of a cluster whose
+ properties you are requesting. This parameter isn't case sensitive.
+ The default is that all clusters defined for an account are returned.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records that the response can
+ include. If more records exist than the specified `MaxRecords`
+ value, a `marker` is included in the response that can be used in a
+ new **DescribeClusters** request to continue listing results.
+ Default: `100`
+
+ Constraints: Value must be at least 20 and no more than 100.
+
+ :type marker: string
+ :param marker: An optional marker returned by a previous
+ **DescribeClusters** request to indicate the first cluster that the
+ current **DescribeClusters** request will return.
+ You can specify either a **Marker** parameter or a
+ **ClusterIdentifier** parameter in a **DescribeClusters** request,
+ but not both.
+
+ """
+ params = {}
+ if cluster_identifier is not None:
+ params['ClusterIdentifier'] = cluster_identifier
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeClusters',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_default_cluster_parameters(self, parameter_group_family,
+ max_records=None, marker=None):
+ """
+ Returns a list of parameter settings for the specified
+ parameter group family.
+
+ For more information about managing parameter groups, go to
+ `Amazon Redshift Parameter Groups`_ in the Amazon Redshift
+ Management Guide .
+
+ :type parameter_group_family: string
+ :param parameter_group_family: The name of the cluster parameter group
+ family.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a marker is included in the response so that the remaining
+ results may be retrieved.
+ Default: `100`
+
+ Constraints: Value must be at least 20 and no more than 100.
+
+ :type marker: string
+ :param marker: An optional marker returned from a previous
+ **DescribeDefaultClusterParameters** request. If this parameter is
+ specified, the response includes only records beyond the marker, up
+ to the value specified by `MaxRecords`.
+
+ """
+ params = {'ParameterGroupFamily': parameter_group_family, }
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeDefaultClusterParameters',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_events(self, source_identifier=None, source_type=None,
+ start_time=None, end_time=None, duration=None,
+ max_records=None, marker=None):
+ """
+ Returns events related to clusters, security groups,
+ snapshots, and parameter groups for the past 14 days. Events
+ specific to a particular cluster, security group, snapshot or
+ parameter group can be obtained by providing the name as a
+ parameter. By default, the past hour of events are returned.
+
+ :type source_identifier: string
+ :param source_identifier:
+ The identifier of the event source for which events will be returned.
+ If this parameter is not specified, then all sources are included
+ in the response.
+
+ Constraints:
+
+ If SourceIdentifier is supplied, SourceType must also be provided.
+
+
+ + Specify a cluster identifier when SourceType is `cluster`.
+ + Specify a cluster security group name when SourceType is `cluster-
+ security-group`.
+ + Specify a cluster parameter group name when SourceType is `cluster-
+ parameter-group`.
+ + Specify a cluster snapshot identifier when SourceType is `cluster-
+ snapshot`.
+
+ :type source_type: string
+ :param source_type:
+ The event source to retrieve events for. If no value is specified, all
+ events are returned.
+
+ Constraints:
+
+ If SourceType is supplied, SourceIdentifier must also be provided.
+
+
+ + Specify `cluster` when SourceIdentifier is a cluster identifier.
+ + Specify `cluster-security-group` when SourceIdentifier is a cluster
+ security group name.
+ + Specify `cluster-parameter-group` when SourceIdentifier is a cluster
+ parameter group name.
+ + Specify `cluster-snapshot` when SourceIdentifier is a cluster
+ snapshot identifier.
+
+ :type start_time: timestamp
+ :param start_time: The beginning of the time interval to retrieve
+ events for, specified in ISO 8601 format. For more information
+ about ISO 8601, go to the `ISO8601 Wikipedia page.`_
+ Example: `2009-07-08T18:00Z`
+
+ :type end_time: timestamp
+ :param end_time: The end of the time interval for which to retrieve
+ events, specified in ISO 8601 format. For more information about
+ ISO 8601, go to the `ISO8601 Wikipedia page.`_
+ Example: `2009-07-08T18:00Z`
+
+ :type duration: integer
+ :param duration: The number of minutes prior to the time of the request
+ for which to retrieve events. For example, if the request is sent
+ at 18:00 and you specify a duration of 60, then only events which
+ have occurred after 17:00 will be returned.
+ Default: `60`
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a marker is included in the response so that the remaining
+ results may be retrieved.
+ Default: `100`
+
+ Constraints: Value must be at least 20 and no more than 100.
+
+ :type marker: string
+ :param marker: An optional marker returned from a previous
+ **DescribeEvents** request. If this parameter is specified, the
+ response includes only records beyond the marker, up to the value
+ specified by `MaxRecords`.
+
+ """
+ params = {}
+ if source_identifier is not None:
+ params['SourceIdentifier'] = source_identifier
+ if source_type is not None:
+ params['SourceType'] = source_type
+ if start_time is not None:
+ params['StartTime'] = start_time
+ if end_time is not None:
+ params['EndTime'] = end_time
+ if duration is not None:
+ params['Duration'] = duration
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeEvents',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_orderable_cluster_options(self, cluster_version=None,
+ node_type=None, max_records=None,
+ marker=None):
+ """
+ Returns a list of orderable cluster options. Before you create
+ a new cluster you can use this operation to find what options
+ are available, such as the EC2 Availability Zones (AZ) in the
+ specific AWS region that you can specify, and the node types
+ you can request. The node types differ by available storage,
+ memory, CPU and price. With the cost involved you might want
+ to obtain a list of cluster options in the specific region and
+ specify values when creating a cluster. For more information
+ about managing clusters, go to `Amazon Redshift Clusters`_ in
+ the Amazon Redshift Management Guide
+
+ :type cluster_version: string
+ :param cluster_version: The version filter value. Specify this
+ parameter to show only the available offerings matching the
+ specified version.
+ Default: All versions.
+
+ Constraints: Must be one of the version returned from
+ DescribeClusterVersions.
+
+ :type node_type: string
+ :param node_type: The node type filter value. Specify this parameter to
+ show only the available offerings matching the specified node type.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a marker is included in the response so that the remaining
+ results may be retrieved.
+ Default: `100`
+
+ Constraints: minimum 20, maximum 100.
+
+ :type marker: string
+ :param marker: An optional marker returned from a previous
+ **DescribeOrderableClusterOptions** request. If this parameter is
+ specified, the response includes only records beyond the marker, up
+ to the value specified by `MaxRecords`.
+
+ """
+ params = {}
+ if cluster_version is not None:
+ params['ClusterVersion'] = cluster_version
+ if node_type is not None:
+ params['NodeType'] = node_type
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeOrderableClusterOptions',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_reserved_node_offerings(self,
+ reserved_node_offering_id=None,
+ max_records=None, marker=None):
+ """
+ Returns a list of the available reserved node offerings by
+ Amazon Redshift with their descriptions including the node
+ type, the fixed and recurring costs of reserving the node and
+ duration the node will be reserved for you. These descriptions
+ help you determine which reserve node offering you want to
+ purchase. You then use the unique offering ID in you call to
+ PurchaseReservedNodeOffering to reserve one or more nodes for
+ your Amazon Redshift cluster.
+
+ For more information about managing parameter groups, go to
+ `Purchasing Reserved Nodes`_ in the Amazon Redshift Management
+ Guide .
+
+ :type reserved_node_offering_id: string
+ :param reserved_node_offering_id: The unique identifier for the
+ offering.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a marker is included in the response so that the remaining
+ results may be retrieved.
+ Default: `100`
+
+ Constraints: minimum 20, maximum 100.
+
+ :type marker: string
+ :param marker: An optional marker returned by a previous
+ DescribeReservedNodeOfferings request to indicate the first
+ offering that the request will return.
+ You can specify either a **Marker** parameter or a
+ **ClusterIdentifier** parameter in a DescribeClusters request, but
+ not both.
+
+ """
+ params = {}
+ if reserved_node_offering_id is not None:
+ params['ReservedNodeOfferingId'] = reserved_node_offering_id
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeReservedNodeOfferings',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_reserved_nodes(self, reserved_node_id=None,
+ max_records=None, marker=None):
+ """
+ Returns the descriptions of the reserved nodes.
+
+ :type reserved_node_id: string
+ :param reserved_node_id: Identifier for the node reservation.
+
+ :type max_records: integer
+ :param max_records: The maximum number of records to include in the
+ response. If more records exist than the specified `MaxRecords`
+ value, a marker is included in the response so that the remaining
+ results may be retrieved.
+ Default: `100`
+
+ Constraints: minimum 20, maximum 100.
+
+ :type marker: string
+ :param marker: An optional marker returned by a previous
+ DescribeReservedNodes request to indicate the first parameter group
+ that the current request will return.
+
+ """
+ params = {}
+ if reserved_node_id is not None:
+ params['ReservedNodeId'] = reserved_node_id
+ if max_records is not None:
+ params['MaxRecords'] = max_records
+ if marker is not None:
+ params['Marker'] = marker
+ return self._make_request(
+ action='DescribeReservedNodes',
+ verb='POST',
+ path='/', params=params)
+
+ def describe_resize(self, cluster_identifier):
+ """
+ Returns information about the last resize operation for the
+ specified cluster. If no resize operation has ever been
+ initiated for the specified cluster, a `HTTP 404` error is
+ returned. If a resize operation was initiated and completed,
+ the status of the resize remains as `SUCCEEDED` until the next
+ resize.
+
+ A resize operation can be requested using ModifyCluster and
+ specifying a different number or type of nodes for the
+ cluster.
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The unique identifier of a cluster whose
+ resize progress you are requesting. This parameter isn't case-
+ sensitive.
+ By default, resize operations for all clusters defined for an AWS
+ account are returned.
+
+ """
+ params = {'ClusterIdentifier': cluster_identifier, }
+ return self._make_request(
+ action='DescribeResize',
+ verb='POST',
+ path='/', params=params)
+
+ def modify_cluster(self, cluster_identifier, cluster_type=None,
+ node_type=None, number_of_nodes=None,
+ cluster_security_groups=None,
+ vpc_security_group_ids=None,
+ master_user_password=None,
+ cluster_parameter_group_name=None,
+ automated_snapshot_retention_period=None,
+ preferred_maintenance_window=None,
+ cluster_version=None, allow_version_upgrade=None):
+ """
+ Modifies the settings for a cluster. For example, you can add
+ another security or parameter group, update the preferred
+ maintenance window, or change the master user password.
+ Resetting a cluster password or modifying the security groups
+ associated with a cluster do not need a reboot. However,
+ modifying parameter group requires a reboot for parameters to
+ take effect. For more information about managing clusters, go
+ to `Amazon Redshift Clusters`_ in the Amazon Redshift
+ Management Guide
+
+ You can also change node type and the number of nodes to scale
+ up or down the cluster. When resizing a cluster, you must
+ specify both the number of nodes and the node type even if one
+ of the parameters does not change. If you specify the same
+ number of nodes and node type that are already configured for
+ the cluster, an error is returned.
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The unique identifier of the cluster to be
+ modified.
+ Example: `examplecluster`
+
+ :type cluster_type: string
+ :param cluster_type: The new cluster type.
+ When you submit your cluster resize request, your existing cluster goes
+ into a read-only mode. After Amazon Redshift provisions a new
+ cluster based on your resize requirements, there will be outage for
+ a period while the old cluster is deleted and your connection is
+ switched to the new cluster. You can use DescribeResize to track
+ the progress of the resize request.
+
+ Valid Values: ` multi-node | single-node `
+
+ :type node_type: string
+ :param node_type: The new node type of the cluster. If you specify a
+ new node type, you must also specify the number of nodes parameter
+ also.
+ When you submit your request to resize a cluster, Amazon Redshift sets
+ access permissions for the cluster to read-only. After Amazon
+ Redshift provisions a new cluster according to your resize
+ requirements, there will be a temporary outage while the old
+ cluster is deleted and your connection is switched to the new
+ cluster. When the new connection is complete, the original access
+ permissions for the cluster are restored. You can use the
+ DescribeResize to track the progress of the resize request.
+
+ Valid Values: ` dw.hs1.xlarge` | `dw.hs1.8xlarge`
+
+ :type number_of_nodes: integer
+ :param number_of_nodes: The new number of nodes of the cluster. If you
+ specify a new number of nodes, you must also specify the node type
+ parameter also.
+ When you submit your request to resize a cluster, Amazon Redshift sets
+ access permissions for the cluster to read-only. After Amazon
+ Redshift provisions a new cluster according to your resize
+ requirements, there will be a temporary outage while the old
+ cluster is deleted and your connection is switched to the new
+ cluster. When the new connection is complete, the original access
+ permissions for the cluster are restored. You can use
+ DescribeResize to track the progress of the resize request.
+
+ Valid Values: Integer greater than `0`.
+
+ :type cluster_security_groups: list
+ :param cluster_security_groups:
+ A list of cluster security groups to be authorized on this cluster.
+ This change is asynchronously applied as soon as possible.
+
+ Security groups currently associated with the cluster and not in the
+ list of groups to apply, will be revoked from the cluster.
+
+ Constraints:
+
+
+ + Must be 1 to 255 alphanumeric characters or hyphens
+ + First character must be a letter
+ + Cannot end with a hyphen or contain two consecutive hyphens
+
+ :type vpc_security_group_ids: list
+ :param vpc_security_group_ids: A list of Virtual Private Cloud (VPC)
+ security groups to be associated with the cluster.
+
+ :type master_user_password: string
+ :param master_user_password:
+ The new password for the cluster master user. This change is
+ asynchronously applied as soon as possible. Between the time of the
+ request and the completion of the request, the `MasterUserPassword`
+ element exists in the `PendingModifiedValues` element of the
+ operation response.
+ Operations never return the password, so this operation provides a way
+ to regain access to the master user account for a cluster if the
+ password is lost.
+
+
+ Default: Uses existing setting.
+
+ Constraints:
+
+
+ + Must be between 8 and 64 characters in length.
+ + Must contain at least one uppercase letter.
+ + Must contain at least one lowercase letter.
+ + Must contain one number.
+
+ :type cluster_parameter_group_name: string
+ :param cluster_parameter_group_name: The name of the cluster parameter
+ group to apply to this cluster. This change is applied only after
+ the cluster is rebooted. To reboot a cluster use RebootCluster.
+ Default: Uses existing setting.
+
+ Constraints: The cluster parameter group must be in the same parameter
+ group family that matches the cluster version.
+
+ :type automated_snapshot_retention_period: integer
+ :param automated_snapshot_retention_period: The number of days that
+ automated snapshots are retained. If the value is 0, automated
+ snapshots are disabled. Even if automated snapshots are disabled,
+ you can still create manual snapshots when you want with
+ CreateClusterSnapshot.
+ If you decrease the automated snapshot retention period from its
+ current value, existing automated snapshots which fall outside of
+ the new retention period will be immediately deleted.
+
+ Default: Uses existing setting.
+
+ Constraints: Must be a value from 0 to 35.
+
+ :type preferred_maintenance_window: string
+ :param preferred_maintenance_window: The weekly time range (in UTC)
+ during which system maintenance can occur, if necessary. If system
+ maintenance is necessary during the window, it may result in an
+ outage.
+ This maintenance window change is made immediately. If the new
+ maintenance window indicates the current time, there must be at
+ least 120 minutes between the current time and end of the window in
+ order to ensure that pending changes are applied.
+
+ Default: Uses existing setting.
+
+ Format: ddd:hh24:mi-ddd:hh24:mi, for example `wed:07:30-wed:08:00`.
+
+ Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
+
+ Constraints: Must be at least 30 minutes.
+
+ :type cluster_version: string
+ :param cluster_version: The new version number of the Amazon Redshift
+ engine to upgrade to.
+ For major version upgrades, if a non-default cluster parameter group is
+ currently in use, a new cluster parameter group in the cluster
+ parameter group family for the new version must be specified. The
+ new cluster parameter group can be the default for that cluster
+ parameter group family. For more information about managing
+ parameter groups, go to `Amazon Redshift Parameter Groups`_ in the
+ Amazon Redshift Management Guide .
+
+ Example: `1.0`
+
+ :type allow_version_upgrade: boolean
+ :param allow_version_upgrade: If `True`, upgrades will be applied
+ automatically to the cluster during the maintenance window.
+ Default: `False`
+
+ """
+ params = {'ClusterIdentifier': cluster_identifier, }
+ if cluster_type is not None:
+ params['ClusterType'] = cluster_type
+ if node_type is not None:
+ params['NodeType'] = node_type
+ if number_of_nodes is not None:
+ params['NumberOfNodes'] = number_of_nodes
+ if cluster_security_groups is not None:
+ self.build_list_params(params,
+ cluster_security_groups,
+ 'ClusterSecurityGroups.member')
+ if vpc_security_group_ids is not None:
+ self.build_list_params(params,
+ vpc_security_group_ids,
+ 'VpcSecurityGroupIds.member')
+ if master_user_password is not None:
+ params['MasterUserPassword'] = master_user_password
+ if cluster_parameter_group_name is not None:
+ params['ClusterParameterGroupName'] = cluster_parameter_group_name
+ if automated_snapshot_retention_period is not None:
+ params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period
+ if preferred_maintenance_window is not None:
+ params['PreferredMaintenanceWindow'] = preferred_maintenance_window
+ if cluster_version is not None:
+ params['ClusterVersion'] = cluster_version
+ if allow_version_upgrade is not None:
+ params['AllowVersionUpgrade'] = str(
+ allow_version_upgrade).lower()
+ return self._make_request(
+ action='ModifyCluster',
+ verb='POST',
+ path='/', params=params)
+
+ def modify_cluster_parameter_group(self, parameter_group_name,
+ parameters):
+ """
+ Modifies the parameters of a parameter group.
+
+ For more information about managing parameter groups, go to
+ `Amazon Redshift Parameter Groups`_ in the Amazon Redshift
+ Management Guide .
+
+ :type parameter_group_name: string
+ :param parameter_group_name: The name of the parameter group to be
+ modified.
+
+ :type parameters: list
+ :param parameters: An array of parameters to be modified. A maximum of
+ 20 parameters can be modified in a single request.
+ For each parameter to be modified, you must supply at least the
+ parameter name and parameter value; other name-value pairs of the
+ parameter are optional.
+
+ """
+ params = {'ParameterGroupName': parameter_group_name, }
+ self.build_complex_list_params(
+ params, parameters,
+ 'Parameters.member',
+ ('ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion'))
+ return self._make_request(
+ action='ModifyClusterParameterGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def modify_cluster_subnet_group(self, cluster_subnet_group_name,
+ subnet_ids, description=None):
+ """
+ Modifies a cluster subnet group to include the specified list
+ of VPC subnets. The operation replaces the existing list of
+ subnets with the new list of subnets.
+
+ :type cluster_subnet_group_name: string
+ :param cluster_subnet_group_name: The name of the subnet group to be
+ modified.
+
+ :type description: string
+ :param description: A text description of the subnet group to be
+ modified.
+
+ :type subnet_ids: list
+ :param subnet_ids: An array of VPC subnet IDs. A maximum of 20 subnets
+ can be modified in a single request.
+
+ """
+ params = {
+ 'ClusterSubnetGroupName': cluster_subnet_group_name,
+ }
+ self.build_list_params(params,
+ subnet_ids,
+ 'SubnetIds.member')
+ if description is not None:
+ params['Description'] = description
+ return self._make_request(
+ action='ModifyClusterSubnetGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def purchase_reserved_node_offering(self, reserved_node_offering_id,
+ node_count=None):
+ """
+ Allows you to purchase reserved nodes. Amazon Redshift offers
+ a predefined set of reserved node offerings. You can purchase
+ one of the offerings. You can call the
+ DescribeReservedNodeOfferings API to obtain the available
+ reserved node offerings. You can call this API by providing a
+ specific reserved node offering and the number of nodes you
+ want to reserve.
+
+ For more information about managing parameter groups, go to
+ `Purchasing Reserved Nodes`_ in the Amazon Redshift Management
+ Guide .
+
+ :type reserved_node_offering_id: string
+ :param reserved_node_offering_id: The unique identifier of the reserved
+ node offering you want to purchase.
+
+ :type node_count: integer
+ :param node_count: The number of reserved nodes you want to purchase.
+ Default: `1`
+
+ """
+ params = {
+ 'ReservedNodeOfferingId': reserved_node_offering_id,
+ }
+ if node_count is not None:
+ params['NodeCount'] = node_count
+ return self._make_request(
+ action='PurchaseReservedNodeOffering',
+ verb='POST',
+ path='/', params=params)
+
+ def reboot_cluster(self, cluster_identifier):
+ """
+ Reboots a cluster. This action is taken as soon as possible.
+ It results in a momentary outage to the cluster, during which
+ the cluster status is set to `rebooting`. A cluster event is
+ created when the reboot is completed. Any pending cluster
+ modifications (see ModifyCluster) are applied at this reboot.
+ For more information about managing clusters, go to `Amazon
+ Redshift Clusters`_ in the Amazon Redshift Management Guide
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The cluster identifier.
+
+ """
+ params = {'ClusterIdentifier': cluster_identifier, }
+ return self._make_request(
+ action='RebootCluster',
+ verb='POST',
+ path='/', params=params)
+
+ def reset_cluster_parameter_group(self, parameter_group_name,
+ reset_all_parameters=None,
+ parameters=None):
+ """
+ Sets one or more parameters of the specified parameter group
+ to their default values and sets the source values of the
+ parameters to "engine-default". To reset the entire parameter
+ group specify the ResetAllParameters parameter. For parameter
+ changes to take effect you must reboot any associated
+ clusters.
+
+ :type parameter_group_name: string
+ :param parameter_group_name: The name of the cluster parameter group to
+ be reset.
+
+ :type reset_all_parameters: boolean
+ :param reset_all_parameters: If `True`, all parameters in the specified
+ parameter group will be reset to their default values.
+ Default: `True`
+
+ :type parameters: list
+ :param parameters: An array of names of parameters to be reset. If
+ ResetAllParameters option is not used, then at least one parameter
+ name must be supplied.
+ Constraints: A maximum of 20 parameters can be reset in a single
+ request.
+
+ """
+ params = {'ParameterGroupName': parameter_group_name, }
+ if reset_all_parameters is not None:
+ params['ResetAllParameters'] = str(
+ reset_all_parameters).lower()
+ if parameters is not None:
+ self.build_complex_list_params(
+ params, parameters,
+ 'Parameters.member',
+ ('ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion'))
+ return self._make_request(
+ action='ResetClusterParameterGroup',
+ verb='POST',
+ path='/', params=params)
+
+ def restore_from_cluster_snapshot(self, cluster_identifier,
+ snapshot_identifier, port=None,
+ availability_zone=None,
+ allow_version_upgrade=None,
+ cluster_subnet_group_name=None,
+ publicly_accessible=None):
+ """
+ Creates a new cluster from a snapshot. Amazon Redshift creates
+ the resulting cluster with the same configuration as the
+ original cluster from which the snapshot was created, except
+ that the new cluster is created with the default cluster
+ security and parameter group. After Amazon Redshift creates
+ the cluster you can use the ModifyCluster API to associate a
+ different security group and different parameter group with
+ the restored cluster.
+
+ If a snapshot is taken of a cluster in VPC, you can restore it
+ only in VPC. In this case, you must provide a cluster subnet
+ group where you want the cluster restored. If snapshot is
+ taken of a cluster outside VPC, then you can restore it only
+ outside VPC.
+
+ For more information about working with snapshots, go to
+ `Amazon Redshift Snapshots`_ in the Amazon Redshift Management
+ Guide .
+
+ :type cluster_identifier: string
+ :param cluster_identifier: The identifier of the cluster that will be
+ created from restoring the snapshot.
+
+ Constraints:
+
+
+ + Must contain from 1 to 63 alphanumeric characters or hyphens.
+ + Alphabetic characters must be lowercase.
+ + First character must be a letter.
+ + Cannot end with a hyphen or contain two consecutive hyphens.
+ + Must be unique for all clusters within an AWS account.
+
+ :type snapshot_identifier: string
+ :param snapshot_identifier: The name of the snapshot from which to
+ create the new cluster. This parameter isn't case sensitive.
+ Example: `my-snapshot-id`
+
+ :type port: integer
+ :param port: The port number on which the cluster accepts connections.
+ Default: The same port as the original cluster.
+
+ Constraints: Must be between `1115` and `65535`.
+
+ :type availability_zone: string
+ :param availability_zone: The Amazon EC2 Availability Zone in which to
+ restore the cluster.
+ Default: A random, system-chosen Availability Zone.
+
+ Example: `us-east-1a`
+
+ :type allow_version_upgrade: boolean
+ :param allow_version_upgrade: If `True`, upgrades can be applied during
+ the maintenance window to the Amazon Redshift engine that is
+ running on the cluster.
+ Default: `True`
+
+ :type cluster_subnet_group_name: string
+ :param cluster_subnet_group_name: The name of the subnet group where
+ you want to cluster restored.
+ A snapshot of cluster in VPC can be restored only in VPC. Therefore,
+ you must provide subnet group name where you want the cluster
+ restored.
+
+ :type publicly_accessible: boolean
+ :param publicly_accessible: If `True`, the cluster can be accessed from
+ a public network.
+
+ """
+ params = {
+ 'ClusterIdentifier': cluster_identifier,
+ 'SnapshotIdentifier': snapshot_identifier,
+ }
+ if port is not None:
+ params['Port'] = port
+ if availability_zone is not None:
+ params['AvailabilityZone'] = availability_zone
+ if allow_version_upgrade is not None:
+ params['AllowVersionUpgrade'] = str(
+ allow_version_upgrade).lower()
+ if cluster_subnet_group_name is not None:
+ params['ClusterSubnetGroupName'] = cluster_subnet_group_name
+ if publicly_accessible is not None:
+ params['PubliclyAccessible'] = str(
+ publicly_accessible).lower()
+ return self._make_request(
+ action='RestoreFromClusterSnapshot',
+ verb='POST',
+ path='/', params=params)
+
+ def revoke_cluster_security_group_ingress(self,
+ cluster_security_group_name,
+ cidrip=None,
+ ec2_security_group_name=None,
+ ec2_security_group_owner_id=None):
+ """
+ Revokes an ingress rule in an Amazon Redshift security group
+ for a previously authorized IP range or Amazon EC2 security
+ group. To add an ingress rule, see
+ AuthorizeClusterSecurityGroupIngress. For information about
+ managing security groups, go to`Amazon Redshift Cluster
+ Security Groups`_ in the Amazon Redshift Management Guide .
+
+ :type cluster_security_group_name: string
+ :param cluster_security_group_name: The name of the security Group from
+ which to revoke the ingress rule.
+
+ :type cidrip: string
+ :param cidrip: The IP range for which to revoke access. This range must
+ be a valid Classless Inter-Domain Routing (CIDR) block of IP
+ addresses. If `CIDRIP` is specified, `EC2SecurityGroupName` and
+ `EC2SecurityGroupOwnerId` cannot be provided.
+
+ :type ec2_security_group_name: string
+ :param ec2_security_group_name: The name of the EC2 Security Group
+ whose access is to be revoked. If `EC2SecurityGroupName` is
+ specified, `EC2SecurityGroupOwnerId` must also be provided and
+ `CIDRIP` cannot be provided.
+
+ :type ec2_security_group_owner_id: string
+ :param ec2_security_group_owner_id: The AWS account number of the owner
+ of the security group specified in the `EC2SecurityGroupName`
+ parameter. The AWS access key ID is not an acceptable value. If
+ `EC2SecurityGroupOwnerId` is specified, `EC2SecurityGroupName` must
+ also be provided. and `CIDRIP` cannot be provided.
+ Example: `111122223333`
+
+ """
+ params = {
+ 'ClusterSecurityGroupName': cluster_security_group_name,
+ }
+ if cidrip is not None:
+ params['CIDRIP'] = cidrip
+ if ec2_security_group_name is not None:
+ params['EC2SecurityGroupName'] = ec2_security_group_name
+ if ec2_security_group_owner_id is not None:
+ params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
+ return self._make_request(
+ action='RevokeClusterSecurityGroupIngress',
+ verb='POST',
+ path='/', params=params)
+
+ def _make_request(self, action, verb, path, params):
+ params['ContentType'] = 'JSON'
+ response = self.make_request(action=action, verb='POST',
+ path='/', params=params)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ json_body = json.loads(body)
+ fault_name = json_body.get('Error', {}).get('Code', None)
+ exception_class = self._faults.get(fault_name, self.ResponseError)
+ raise exception_class(response.status, response.reason,
+ body=json_body)
diff --git a/boto/route53/connection.py b/boto/route53/connection.py
index 94d81a8c..221b29b2 100644
--- a/boto/route53/connection.py
+++ b/boto/route53/connection.py
@@ -143,7 +143,7 @@ class Route53Connection(AWSAuthConnection):
:type hosted_zone_name: str
:param hosted_zone_name: The fully qualified domain name for the Hosted
- Zone
+ Zone
"""
if hosted_zone_name[-1] != '.':
diff --git a/boto/s3/__init__.py b/boto/s3/__init__.py
index d42a6e74..30d610d2 100644
--- a/boto/s3/__init__.py
+++ b/boto/s3/__init__.py
@@ -71,6 +71,9 @@ def regions():
S3RegionInfo(name='eu-west-1',
endpoint='s3-eu-west-1.amazonaws.com',
connection_cls=S3Connection),
+ S3RegionInfo(name='sa-east-1',
+ endpoint='s3-sa-east-1.amazonaws.com',
+ connection_cls=S3Connection),
]
diff --git a/boto/s3/bucket.py b/boto/s3/bucket.py
index c36a4079..89f0fcb3 100644
--- a/boto/s3/bucket.py
+++ b/boto/s3/bucket.py
@@ -86,14 +86,6 @@ class Bucket(object):
<MfaDelete>%s</MfaDelete>
</VersioningConfiguration>"""
- WebsiteBody = """<?xml version="1.0" encoding="UTF-8"?>
- <WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- <IndexDocument><Suffix>%s</Suffix></IndexDocument>
- %s
- </WebsiteConfiguration>"""
-
- WebsiteErrorFragment = """<ErrorDocument><Key>%s</Key></ErrorDocument>"""
-
VersionRE = '<Status>([A-Za-z]+)</Status>'
MFADeleteRE = '<MfaDelete>([A-Za-z]+)</MfaDelete>'
@@ -209,6 +201,7 @@ class Bucket(object):
k.handle_version_headers(response)
k.handle_encryption_headers(response)
k.handle_restore_headers(response)
+ k.handle_addl_headers(response.getheaders())
return k, response
else:
if response.status == 404:
@@ -592,6 +585,8 @@ class Bucket(object):
created or removed and what version_id the delete created
or removed.
"""
+ if not key_name:
+ raise ValueError('Empty key names are not allowed')
return self._delete_key_internal(key_name, headers=headers,
version_id=version_id,
mfa_token=mfa_token,
@@ -620,6 +615,7 @@ class Bucket(object):
k = self.key_class(self)
k.name = key_name
k.handle_version_headers(response)
+ k.handle_addl_headers(response.getheaders())
return k
def copy_key(self, new_key_name, src_bucket_name,
@@ -713,6 +709,7 @@ class Bucket(object):
if hasattr(key, 'Error'):
raise provider.storage_copy_error(key.Code, key.Message, body)
key.handle_version_headers(response)
+ key.handle_addl_headers(response.getheaders())
if preserve_acl:
self.set_xml_acl(acl, new_key_name)
return key
@@ -1252,8 +1249,20 @@ class Bucket(object):
config = website.WebsiteConfiguration(
suffix, error_key, redirect_all_requests_to,
routing_rules)
- body = config.to_xml()
- response = self.connection.make_request('PUT', self.name, data=body,
+ return self.set_website_configuration(config, headers=headers)
+
+ def set_website_configuration(self, config, headers=None):
+ """
+ :type config: boto.s3.website.WebsiteConfiguration
+ :param config: Configuration data
+ """
+ return self.set_website_configuration_xml(config.to_xml(),
+ headers=headers)
+
+
+ def set_website_configuration_xml(self, xml, headers=None):
+ """Upload xml website configuration"""
+ response = self.connection.make_request('PUT', self.name, data=xml,
query_args='website',
headers=headers)
body = response.read()
@@ -1283,6 +1292,16 @@ class Bucket(object):
"""
return self.get_website_configuration_with_xml(headers)[0]
+ def get_website_configuration_obj(self, headers=None):
+ """Get the website configuration as a
+ :class:`boto.s3.website.WebsiteConfiguration` object.
+ """
+ config_xml = self.get_website_configuration_xml(headers=headers)
+ config = website.WebsiteConfiguration()
+ h = handler.XmlHandler(config, self)
+ xml.sax.parseString(config_xml, h)
+ return config
+
def get_website_configuration_with_xml(self, headers=None):
"""
Returns the current status of website configuration on the bucket as
@@ -1300,6 +1319,15 @@ class Bucket(object):
* Key : name of object to serve when an error occurs
2) unparsed XML describing the bucket's website configuration.
"""
+
+ body = self.get_website_configuration_xml(headers=headers)
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e, body
+
+ def get_website_configuration_xml(self, headers=None):
+ """Get raw website configuration xml"""
response = self.connection.make_request('GET', self.name,
query_args='website', headers=headers)
body = response.read()
@@ -1308,11 +1336,7 @@ class Bucket(object):
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
-
- e = boto.jsonresponse.Element()
- h = boto.jsonresponse.XmlHandler(e, None)
- h.parse(body)
- return e, body
+ return body
def delete_website_configuration(self, headers=None):
"""
diff --git a/boto/s3/connection.py b/boto/s3/connection.py
index fcb983b5..7808d530 100644
--- a/boto/s3/connection.py
+++ b/boto/s3/connection.py
@@ -148,17 +148,20 @@ class Location:
class S3Connection(AWSAuthConnection):
- DefaultHost = 's3.amazonaws.com'
+ DefaultHost = boto.config.get('s3', 'host', 's3.amazonaws.com')
+ DefaultCallingFormat = boto.config.get('s3', 'calling_format', 'boto.s3.connection.SubdomainCallingFormat')
QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=DefaultHost, debug=0, https_connection_factory=None,
- calling_format=SubdomainCallingFormat(), path='/',
+ calling_format=DefaultCallingFormat, path='/',
provider='aws', bucket_class=Bucket, security_token=None,
suppress_consec_slashes=True, anon=False,
validate_certs=None):
+ if isinstance(calling_format, str):
+ calling_format=boto.utils.find_class(calling_format)()
self.calling_format = calling_format
self.bucket_class = bucket_class
self.anon = anon
@@ -404,12 +407,49 @@ class S3Connection(AWSAuthConnection):
return rs.owner.id
def get_bucket(self, bucket_name, validate=True, headers=None):
+ """
+ Retrieves a bucket by name.
+
+ If the bucket does not exist, an ``S3ResponseError`` will be raised. If
+ you are unsure if the bucket exists or not, you can use the
+ ``S3Connection.lookup`` method, which will either return a valid bucket
+ or ``None``.
+
+ :type bucket_name: string
+ :param bucket_name: The name of the bucket
+
+ :type headers: dict
+ :param headers: Additional headers to pass along with the request to
+ AWS.
+
+ :type validate: boolean
+ :param validate: If ``True``, it will try to fetch all keys within the
+ given bucket. (Default: ``True``)
+ """
bucket = self.bucket_class(self, bucket_name)
if validate:
bucket.get_all_keys(headers, maxkeys=0)
return bucket
def lookup(self, bucket_name, validate=True, headers=None):
+ """
+ Attempts to get a bucket from S3.
+
+ Works identically to ``S3Connection.get_bucket``, save for that it
+ will return ``None`` if the bucket does not exist instead of throwing
+ an exception.
+
+ :type bucket_name: string
+ :param bucket_name: The name of the bucket
+
+ :type headers: dict
+ :param headers: Additional headers to pass along with the request to
+ AWS.
+
+ :type validate: boolean
+ :param validate: If ``True``, it will try to fetch all keys within the
+ given bucket. (Default: ``True``)
+ """
try:
bucket = self.get_bucket(bucket_name, validate, headers=headers)
except:
@@ -463,6 +503,19 @@ class S3Connection(AWSAuthConnection):
response.status, response.reason, body)
def delete_bucket(self, bucket, headers=None):
+ """
+ Removes an S3 bucket.
+
+ In order to remove the bucket, it must first be empty. If the bucket is
+ not empty, an ``S3ResponseError`` will be raised.
+
+ :type bucket_name: string
+ :param bucket_name: The name of the bucket
+
+ :type headers: dict
+ :param headers: Additional headers to pass along with the request to
+ AWS.
+ """
response = self.make_request('DELETE', bucket, headers=headers)
body = response.read()
if response.status != 204:
diff --git a/boto/s3/key.py b/boto/s3/key.py
index 71013a54..fa9bc61f 100644
--- a/boto/s3/key.py
+++ b/boto/s3/key.py
@@ -21,6 +21,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+import errno
import mimetypes
import os
import re
@@ -32,6 +33,7 @@ import math
import urllib
import boto.utils
from boto.exception import BotoClientError
+from boto.exception import StorageDataError
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
@@ -109,8 +111,6 @@ class Key(object):
self.last_modified = None
self.owner = None
self.storage_class = 'STANDARD'
- self.md5 = None
- self.base64md5 = None
self.path = None
self.resp = None
self.mode = None
@@ -126,6 +126,7 @@ class Key(object):
# restored object.
self.ongoing_restore = None
self.expiry_date = None
+ self.local_hashes = {}
def __repr__(self):
if self.bucket:
@@ -133,18 +134,6 @@ class Key(object):
else:
return '<Key: None,%s>' % self.name
- def __getattr__(self, name):
- if name == 'key':
- return self.name
- else:
- raise AttributeError
-
- def __setattr__(self, name, value):
- if name == 'key':
- self.__dict__['name'] = value
- else:
- self.__dict__[name] = value
-
def __iter__(self):
return self
@@ -155,6 +144,38 @@ class Key(object):
provider = self.bucket.connection.provider
return provider
+ @property
+ def key(self):
+ return self.name
+
+ @key.setter
+ def key(self, value):
+ self.name = value
+
+ @property
+ def md5(self):
+ if 'md5' in self.local_hashes and self.local_hashes['md5']:
+ return binascii.b2a_hex(self.local_hashes['md5'])
+
+ @md5.setter
+ def md5(self, value):
+ if value:
+ self.local_hashes['md5'] = binascii.a2b_hex(value)
+ elif 'md5' in self.local_hashes:
+ self.local_hashes.pop('md5', None)
+
+ @property
+ def base64md5(self):
+ if 'md5' in self.local_hashes and self.local_hashes['md5']:
+ return binascii.b2a_base64(self.local_hashes['md5']).rstrip('\n')
+
+ @base64md5.setter
+ def base64md5(self, value):
+ if value:
+ self.local_hashes['md5'] = binascii.a2b_base64(value)
+ elif 'md5' in self.local_hashes:
+ del self.local_hashes['md5']
+
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
@@ -169,7 +190,8 @@ class Key(object):
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
- self.encrypted = resp.getheader(provider.server_side_encryption_header, None)
+ self.encrypted = resp.getheader(
+ provider.server_side_encryption_header, None)
else:
self.encrypted = None
@@ -202,6 +224,13 @@ class Key(object):
elif key == 'expiry-date':
self.expiry_date = val
+ def handle_addl_headers(self, headers):
+ """
+ Used by Key subclasses to do additional, provider-specific
+ processing of response headers. No-op for this base class.
+ """
+ pass
+
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
@@ -265,6 +294,7 @@ class Key(object):
self.content_disposition = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
+ self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
@@ -295,8 +325,23 @@ class Key(object):
closed = False
- def close(self):
- if self.resp:
+ def close(self, fast=False):
+ """
+ Close this key.
+
+ :type fast: bool
+ :param fast: True if you want the connection to be closed without first
+ reading the content. This should only be used in cases where subsequent
+ calls don't need to return the content from the open HTTP connection.
+ Note: As explained at
+ http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
+ callers must read the whole response before sending a new request to the
+ server. Calling Key.close(fast=True) and making a subsequent request to
+ the server will work because boto will get an httplib exception and
+ close/reopen the connection.
+
+ """
+ if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
@@ -513,7 +558,7 @@ class Key(object):
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
- def set_redirect(self, redirect_location):
+ def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
@@ -524,7 +569,12 @@ class Key(object):
:param redirect_location: The location to redirect.
"""
- headers = {'x-amz-website-redirect-location': redirect_location}
+ if headers is None:
+ headers = {}
+ else:
+ headers = headers.copy()
+
+ headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
@@ -626,20 +676,12 @@ class Key(object):
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
- reading, the fp will point at the first byte. See the
+ reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
- :type cb: function
- :param cb: a callback function that will be called to report
- progress on the upload. The callback should accept two
- integer parameters, the first representing the number of
- bytes that have been successfully transmitted to S3 and
- the second representing the size of the to be transmitted
- object.
-
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
@@ -648,6 +690,13 @@ class Key(object):
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
+ :type query_args: string
+ :param query_args: (optional) Arguments to pass in the query string.
+
+ :type chunked_transfer: boolean
+ :param chunked_transfer: (optional) If true, we use chunked
+ Transfer-Encoding.
+
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
@@ -656,6 +705,13 @@ class Key(object):
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
+ self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
+ query_args=query_args,
+ chunked_transfer=chunked_transfer, size=size)
+
+ def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
+ query_args=None, chunked_transfer=False, size=None,
+ hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
@@ -663,6 +719,12 @@ class Key(object):
spos = None
self.read_from_stream = False
+ # If hash_algs is unset and the MD5 hasn't already been computed,
+ # default to an MD5 hash_alg to hash the data on-the-fly.
+ if hash_algs is None and not self.md5:
+ hash_algs = {'md5': md5}
+ digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
+
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
@@ -681,19 +743,13 @@ class Key(object):
http_conn.putheader(key, headers[key])
http_conn.endheaders()
- # Calculate all MD5 checksums on the fly, if not already computed
- if not self.base64md5:
- m = md5()
- else:
- m = None
-
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
- # If the debuglevel < 3 we don't want to show connection
+ # If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
- if getattr(http_conn, 'debuglevel', 0) < 3:
+ if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
@@ -709,7 +765,8 @@ class Key(object):
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
- cb_count = int(math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
+ cb_count = int(
+ math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
@@ -734,8 +791,8 @@ class Key(object):
http_conn.send('\r\n')
else:
http_conn.send(chunk)
- if m:
- m.update(chunk)
+ for alg in digesters:
+ digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
@@ -752,10 +809,8 @@ class Key(object):
self.size = data_len
- if m:
- # Use the chunked trailer for the digest
- hd = m.hexdigest()
- self.md5, self.base64md5 = self.get_md5_from_hexdigest(hd)
+ for alg in digesters:
+ self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
@@ -765,10 +820,10 @@ class Key(object):
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
- response = http_conn.getresponse()
- body = response.read()
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
+ response = http_conn.getresponse()
+ body = response.read()
if ((response.status == 500 or response.status == 503 or
response.getheader('location')) and not chunked_transfer):
# we'll try again.
@@ -826,6 +881,7 @@ class Key(object):
sender=sender,
query_args=query_args)
self.handle_version_headers(resp, force=True)
+ self.handle_addl_headers(resp.getheaders())
def compute_md5(self, fp, size=None):
"""
@@ -838,14 +894,9 @@ class Key(object):
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
- inplace into different parts. Less bytes may be available.
-
- :rtype: tuple
- :return: A tuple containing the hex digest version of the MD5
- hash as the first element and the base64 encoded version
- of the plain digest as the second element.
+ in place into different parts. Less bytes may be available.
"""
- tup = compute_md5(fp, size=size)
+ hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
@@ -853,8 +904,8 @@ class Key(object):
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
- self.size = tup[2]
- return tup[0:2]
+ self.size = data_size
+ return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
@@ -1179,14 +1230,15 @@ class Key(object):
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
+
+ :rtype: int
+ :return: The number of bytes written to the key.
"""
- fp = open(filename, 'rb')
- try:
- self.set_contents_from_file(fp, headers, replace, cb, num_cb,
- policy, md5, reduced_redundancy,
- encrypt_key=encrypt_key)
- finally:
- fp.close()
+ with open(filename, 'rb') as fp:
+ return self.set_contents_from_file(fp, headers, replace, cb,
+ num_cb, policy, md5,
+ reduced_redundancy,
+ encrypt_key=encrypt_key)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
@@ -1297,11 +1349,12 @@ class Key(object):
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
+ hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
- response_headers=None, query_args=None):
+ response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
@@ -1311,9 +1364,11 @@ class Key(object):
query_args = query_args or []
if torrent:
query_args.append('torrent')
- m = None
- else:
- m = md5()
+
+ if hash_algs is None and not torrent:
+ hash_algs = {'md5': md5}
+ digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
+
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
@@ -1323,7 +1378,8 @@ class Key(object):
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
- query_args.append('%s=%s' % (key, urllib.quote(response_headers[key])))
+ query_args.append('%s=%s' % (
+ key, urllib.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
@@ -1346,22 +1402,27 @@ class Key(object):
cb_count = 0
i = 0
cb(data_len, cb_size)
- for bytes in self:
- fp.write(bytes)
- data_len += len(bytes)
- if m:
- m.update(bytes)
- if cb:
- if cb_size > 0 and data_len >= cb_size:
- break
- i += 1
- if i == cb_count or cb_count == -1:
- cb(data_len, cb_size)
- i = 0
+ try:
+ for bytes in self:
+ fp.write(bytes)
+ data_len += len(bytes)
+ for alg in digesters:
+ digesters[alg].update(bytes)
+ if cb:
+ if cb_size > 0 and data_len >= cb_size:
+ break
+ i += 1
+ if i == cb_count or cb_count == -1:
+ cb(data_len, cb_size)
+ i = 0
+ except IOError as e:
+ if e.errno == errno.ENOSPC:
+ raise StorageDataError('Out of space for destination file '
+ '%s' % fp.name)
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
- if m:
- self.md5 = m.hexdigest()
+ for alg in digesters:
+ self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
@@ -1670,7 +1731,8 @@ class Key(object):
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
- metadata=metadata, preserve_acl=preserve_acl)
+ metadata=metadata, preserve_acl=preserve_acl,
+ headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
diff --git a/boto/s3/keyfile.py b/boto/s3/keyfile.py
index 26c4ea98..4245413d 100644
--- a/boto/s3/keyfile.py
+++ b/boto/s3/keyfile.py
@@ -27,6 +27,7 @@ in a Key open for reading.
"""
import os
+from boto.exception import StorageResponseError
class KeyFile():
@@ -48,16 +49,15 @@ class KeyFile():
return self.location
def seek(self, pos, whence=os.SEEK_SET):
- # Note: This seek implementation is very inefficient if you have a Key
- # positioned to the start of a very large object and then call seek(0,
- # os.SEEK_END), because it will first read all the data from the open socket
- # before performing the range GET to position to the end of the file.
- self.key.close()
+ self.key.close(fast=True)
if whence == os.SEEK_END:
# We need special handling for this case because sending an HTTP range GET
# with EOF for the range start would cause an invalid range error. Instead
# we position to one before EOF (plus pos) and then read one byte to
# position at EOF.
+ if self.key.size == 0:
+ # Don't try to seek with an empty key.
+ return
pos = self.key.size + pos - 1
if pos < 0:
raise IOError("Invalid argument")
@@ -75,7 +75,7 @@ class KeyFile():
raise IOError('Invalid whence param (%d) passed to seek' % whence)
try:
self.key.open_read(headers={"Range": "bytes=%d-" % pos})
- except GSResponseError as e:
+ except StorageResponseError as e:
# 416 Invalid Range means that the given starting byte was past the end
# of file. We catch this because the Python file interface allows silently
# seeking past the end of the file.
diff --git a/boto/s3/resumable_download_handler.py b/boto/s3/resumable_download_handler.py
index 32f70416..06d179f0 100644
--- a/boto/s3/resumable_download_handler.py
+++ b/boto/s3/resumable_download_handler.py
@@ -90,7 +90,7 @@ class ResumableDownloadHandler(object):
Handler for resumable downloads.
"""
- ETAG_REGEX = '([a-z0-9]{32})\n'
+ MIN_ETAG_LEN = 5
RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
@@ -127,11 +127,11 @@ class ResumableDownloadHandler(object):
f = None
try:
f = open(self.tracker_file_name, 'r')
- etag_line = f.readline()
- m = re.search(self.ETAG_REGEX, etag_line)
- if m:
- self.etag_value_for_current_download = m.group(1)
- else:
+ self.etag_value_for_current_download = f.readline().rstrip('\n')
+ # We used to match an MD5-based regex to ensure that the etag was
+ # read correctly. Since ETags need not be MD5s, we now do a simple
+ # length sanity check instead.
+ if len(self.etag_value_for_current_download) < self.MIN_ETAG_LEN:
print('Couldn\'t read etag in tracker file (%s). Restarting '
'download from scratch.' % self.tracker_file_name)
except IOError, e:
@@ -173,7 +173,7 @@ class ResumableDownloadHandler(object):
os.unlink(self.tracker_file_name)
def _attempt_resumable_download(self, key, fp, headers, cb, num_cb,
- torrent, version_id):
+ torrent, version_id, hash_algs):
"""
Attempts a resumable download.
@@ -213,11 +213,11 @@ class ResumableDownloadHandler(object):
# Disable AWSAuthConnection-level retry behavior, since that would
# cause downloads to restart from scratch.
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
- override_num_retries=0)
+ override_num_retries=0, hash_algs=hash_algs)
fp.flush()
def get_file(self, key, fp, headers, cb=None, num_cb=10, torrent=False,
- version_id=None):
+ version_id=None, hash_algs=None):
"""
Retrieves a file from a Key
:type key: :class:`boto.s3.key.Key` or subclass
@@ -249,6 +249,11 @@ class ResumableDownloadHandler(object):
:type version_id: string
:param version_id: The version ID (optional)
+ :type hash_algs: dictionary
+ :param hash_algs: (optional) Dictionary of hash algorithms and
+ corresponding hashing class that implements update() and digest().
+ Defaults to {'md5': hashlib/md5.md5}.
+
Raises ResumableDownloadException if a problem occurs during
the transfer.
"""
@@ -267,7 +272,7 @@ class ResumableDownloadHandler(object):
had_file_bytes_before_attempt = get_cur_file_size(fp)
try:
self._attempt_resumable_download(key, fp, headers, cb, num_cb,
- torrent, version_id)
+ torrent, version_id, hash_algs)
# Download succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
# Previously, check_final_md5() was called here to validate
@@ -286,7 +291,7 @@ class ResumableDownloadHandler(object):
# so we need to close and reopen the key before resuming
# the download.
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
- override_num_retries=0)
+ override_num_retries=0, hash_algs=hash_algs)
except ResumableDownloadException, e:
if (e.disposition ==
ResumableTransferDisposition.ABORT_CUR_PROCESS):
diff --git a/boto/s3/website.py b/boto/s3/website.py
index 75908e7b..c307f3e9 100644
--- a/boto/s3/website.py
+++ b/boto/s3/website.py
@@ -51,34 +51,70 @@ class WebsiteConfiguration(object):
and redirects that apply when the conditions are met.
"""
- WEBSITE_SKELETON = """<?xml version="1.0" encoding="UTF-8"?>
- <WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
- %(body)s
- </WebsiteConfiguration>"""
-
def __init__(self, suffix=None, error_key=None,
redirect_all_requests_to=None, routing_rules=None):
self.suffix = suffix
self.error_key = error_key
self.redirect_all_requests_to = redirect_all_requests_to
- self.routing_rules = routing_rules
+ if routing_rules is not None:
+ self.routing_rules = routing_rules
+ else:
+ self.routing_rules = RoutingRules()
+
+ def startElement(self, name, attrs, connection):
+ if name == 'RoutingRules':
+ self.routing_rules = RoutingRules()
+ return self.routing_rules
+ elif name == 'IndexDocument':
+ return _XMLKeyValue([('Suffix', 'suffix')], container=self)
+ elif name == 'ErrorDocument':
+ return _XMLKeyValue([('Key', 'error_key')], container=self)
+
+ def endElement(self, name, value, connection):
+ pass
def to_xml(self):
- body_parts = []
+ parts = ['<?xml version="1.0" encoding="UTF-8"?>',
+ '<WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">']
if self.suffix is not None:
- body_parts.append(tag('IndexDocument', tag('Suffix', self.suffix)))
+ parts.append(tag('IndexDocument', tag('Suffix', self.suffix)))
if self.error_key is not None:
- body_parts.append(tag('ErrorDocument', tag('Key', self.error_key)))
+ parts.append(tag('ErrorDocument', tag('Key', self.error_key)))
if self.redirect_all_requests_to is not None:
- body_parts.append(self.redirect_all_requests_to.to_xml())
- if self.routing_rules is not None:
- body_parts.append(self.routing_rules.to_xml())
- body = '\n'.join(body_parts)
- return self.WEBSITE_SKELETON % {'body': body}
+ parts.append(self.redirect_all_requests_to.to_xml())
+ if self.routing_rules:
+ parts.append(self.routing_rules.to_xml())
+ parts.append('</WebsiteConfiguration>')
+ return ''.join(parts)
+
+
+class _XMLKeyValue(object):
+ def __init__(self, translator, container=None):
+ self.translator = translator
+ if container:
+ self.container = container
+ else:
+ self.container = self
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ for xml_key, attr_name in self.translator:
+ if name == xml_key:
+ setattr(self.container, attr_name, value)
+
+ def to_xml(self):
+ parts = []
+ for xml_key, attr_name in self.translator:
+ content = getattr(self.container, attr_name)
+ if content is not None:
+ parts.append(tag(xml_key, content))
+ return ''.join(parts)
-class RedirectLocation(object):
+
+class RedirectLocation(_XMLKeyValue):
"""Specify redirect behavior for every request to a bucket's endpoint.
:ivar hostname: Name of the host where requests will be redirected.
@@ -87,23 +123,21 @@ class RedirectLocation(object):
The default is the protocol that is used in the original request.
"""
+ TRANSLATOR = [('HostName', 'hostname'),
+ ('Protocol', 'protocol'),
+ ]
- def __init__(self, hostname, protocol=None):
+ def __init__(self, hostname=None, protocol=None):
self.hostname = hostname
self.protocol = protocol
+ super(RedirectLocation, self).__init__(self.TRANSLATOR)
def to_xml(self):
- inner_text = []
- if self.hostname is not None:
- inner_text.append(tag('HostName', self.hostname))
- if self.protocol is not None:
- inner_text.append(tag('Protocol', self.protocol))
- return tag('RedirectAllRequestsTo', '\n'.join(inner_text))
+ return tag('RedirectAllRequestsTo',
+ super(RedirectLocation, self).to_xml())
-class RoutingRules(object):
- def __init__(self):
- self._rules = []
+class RoutingRules(list):
def add_rule(self, rule):
"""
@@ -115,12 +149,24 @@ class RoutingRules(object):
so that it can chain subsequent calls.
"""
- self._rules.append(rule)
+ self.append(rule)
return self
+ def startElement(self, name, attrs, connection):
+ if name == 'RoutingRule':
+ rule = RoutingRule(Condition(), Redirect())
+ self.add_rule(rule)
+ return rule
+
+ def endElement(self, name, value, connection):
+ pass
+
+ def __repr__(self):
+ return "RoutingRules(%s)" % super(RoutingRules, self).__repr__()
+
def to_xml(self):
inner_text = []
- for rule in self._rules:
+ for rule in self:
inner_text.append(rule.to_xml())
return tag('RoutingRules', '\n'.join(inner_text))
@@ -141,13 +187,26 @@ class RoutingRule(object):
of an error, you can can specify a different error code to return.
"""
- def __init__(self, condition, redirect):
+ def __init__(self, condition=None, redirect=None):
self.condition = condition
self.redirect = redirect
+ def startElement(self, name, attrs, connection):
+ if name == 'Condition':
+ return self.condition
+ elif name == 'Redirect':
+ return self.redirect
+
+ def endElement(self, name, value, connection):
+ pass
+
def to_xml(self):
- return tag('RoutingRule',
- self.condition.to_xml() + self.redirect.to_xml())
+ parts = []
+ if self.condition:
+ parts.append(self.condition.to_xml())
+ if self.redirect:
+ parts.append(self.redirect.to_xml())
+ return tag('RoutingRule', '\n'.join(parts))
@classmethod
def when(cls, key_prefix=None, http_error_code=None):
@@ -164,9 +223,8 @@ class RoutingRule(object):
return self
-class Condition(object):
+class Condition(_XMLKeyValue):
"""
-
:ivar key_prefix: The object key name prefix when the redirect is applied.
For example, to redirect requests for ExamplePage.html, the key prefix
will be ExamplePage.html. To redirect request for all pages with the
@@ -178,24 +236,22 @@ class Condition(object):
specified redirect is applied.
"""
+ TRANSLATOR = [
+ ('KeyPrefixEquals', 'key_prefix'),
+ ('HttpErrorCodeReturnedEquals', 'http_error_code'),
+ ]
+
def __init__(self, key_prefix=None, http_error_code=None):
self.key_prefix = key_prefix
self.http_error_code = http_error_code
+ super(Condition, self).__init__(self.TRANSLATOR)
def to_xml(self):
- inner_text = []
- if self.key_prefix is not None:
- inner_text.append(tag('KeyPrefixEquals', self.key_prefix))
- if self.http_error_code is not None:
- inner_text.append(
- tag('HttpErrorCodeReturnedEquals',
- self.http_error_code))
- return tag('Condition', '\n'.join(inner_text))
+ return tag('Condition', super(Condition, self).to_xml())
-class Redirect(object):
+class Redirect(_XMLKeyValue):
"""
-
:ivar hostname: The host name to use in the redirect request.
:ivar protocol: The protocol to use in the redirect request. Can be either
@@ -213,6 +269,15 @@ class Redirect(object):
:ivar http_redirect_code: The HTTP redirect code to use on the response.
"""
+
+ TRANSLATOR = [
+ ('Protocol', 'protocol'),
+ ('HostName', 'hostname'),
+ ('ReplaceKeyWith', 'replace_key'),
+ ('ReplaceKeyPrefixWith', 'replace_key_prefix'),
+ ('HttpRedirectCode', 'http_redirect_code'),
+ ]
+
def __init__(self, hostname=None, protocol=None, replace_key=None,
replace_key_prefix=None, http_redirect_code=None):
self.hostname = hostname
@@ -220,18 +285,9 @@ class Redirect(object):
self.replace_key = replace_key
self.replace_key_prefix = replace_key_prefix
self.http_redirect_code = http_redirect_code
+ super(Redirect, self).__init__(self.TRANSLATOR)
def to_xml(self):
- inner_text = []
- if self.hostname is not None:
- inner_text.append(tag('HostName', self.hostname))
- if self.protocol is not None:
- inner_text.append(tag('Protocol', self.protocol))
- if self.replace_key is not None:
- inner_text.append(tag('ReplaceKey', self.replace_key))
- if self.replace_key_prefix is not None:
- inner_text.append(tag('ReplaceKeyPrefixWith',
- self.replace_key_prefix))
- if self.http_redirect_code is not None:
- inner_text.append(tag('HttpRedirectCode', self.http_redirect_code))
- return tag('Redirect', '\n'.join(inner_text))
+ return tag('Redirect', super(Redirect, self).to_xml())
+
+
diff --git a/boto/sns/connection.py b/boto/sns/connection.py
index 352f730f..1f29c195 100644
--- a/boto/sns/connection.py
+++ b/boto/sns/connection.py
@@ -21,6 +21,7 @@
# IN THE SOFTWARE.
import uuid
+import hashlib
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
@@ -309,6 +310,8 @@ class SNSConnection(AWSQueryConnection):
"""
t = queue.id.split('/')
q_arn = queue.arn
+ sid = hashlib.md5(topic + q_arn).hexdigest()
+ sid_exists = False
resp = self.subscribe(topic, 'sqs', q_arn)
attr = queue.get_attributes('Policy')
if 'Policy' in attr:
@@ -319,13 +322,18 @@ class SNSConnection(AWSQueryConnection):
policy['Version'] = '2008-10-17'
if 'Statement' not in policy:
policy['Statement'] = []
- statement = {'Action': 'SQS:SendMessage',
- 'Effect': 'Allow',
- 'Principal': {'AWS': '*'},
- 'Resource': q_arn,
- 'Sid': str(uuid.uuid4()),
- 'Condition': {'StringLike': {'aws:SourceArn': topic}}}
- policy['Statement'].append(statement)
+ # See if a Statement with the Sid exists already.
+ for s in policy['Statement']:
+ if s['Sid'] == sid:
+ sid_exists = True
+ if not sid_exists:
+ statement = {'Action': 'SQS:SendMessage',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': '*'},
+ 'Resource': q_arn,
+ 'Sid': sid,
+ 'Condition': {'StringLike': {'aws:SourceArn': topic}}}
+ policy['Statement'].append(statement)
queue.set_attribute('Policy', json.dumps(policy))
return resp
diff --git a/boto/sqs/connection.py b/boto/sqs/connection.py
index d993064a..e076de12 100644
--- a/boto/sqs/connection.py
+++ b/boto/sqs/connection.py
@@ -100,12 +100,8 @@ class SQSConnection(AWSQueryConnection):
:param queue: The SQS queue to be deleted
:type force_deletion: Boolean
- :param force_deletion: Normally, SQS will not delete a queue
- that contains messages. However, if the force_deletion
- argument is True, the queue will be deleted regardless of
- whether there are messages in the queue or not. USE WITH
- CAUTION. This will delete all messages in the queue as
- well.
+ :param force_deletion: A deprecated parameter that is no longer used by
+ SQS's API.
:rtype: bool
:return: True if the command succeeded, False otherwise
diff --git a/boto/sqs/queue.py b/boto/sqs/queue.py
index e20b6898..603faaae 100644
--- a/boto/sqs/queue.py
+++ b/boto/sqs/queue.py
@@ -159,8 +159,8 @@ class Queue:
:type action_name: str or unicode
:param action_name: The action. Valid choices are:
- *|SendMessage|ReceiveMessage|DeleteMessage|
- ChangeMessageVisibility|GetQueueAttributes
+ SendMessage|ReceiveMessage|DeleteMessage|
+ ChangeMessageVisibility|GetQueueAttributes|*
:rtype: bool
:return: True if successful, False otherwise.
diff --git a/boto/storage_uri.py b/boto/storage_uri.py
index 80dcd1b3..dbccc13c 100755
--- a/boto/storage_uri.py
+++ b/boto/storage_uri.py
@@ -23,6 +23,7 @@
import boto
import os
import sys
+import textwrap
from boto.s3.deletemarker import DeleteMarker
from boto.exception import BotoClientError
from boto.exception import InvalidUriError
@@ -66,14 +67,11 @@ class StorageUri(object):
def check_response(self, resp, level, uri):
if resp is None:
- raise InvalidUriError('Attempt to get %s for "%s" failed.\nThis '
- 'can happen if the URI refers to a non-'
- 'existent object or if you meant to\noperate '
- 'on a directory (e.g., leaving off -R option '
- 'on gsutil cp, mv, or ls of a\nbucket). If a '
- 'version-ful object was specified, you may '
- 'have neglected to\nuse a -v flag.' %
- (level, uri))
+ raise InvalidUriError('\n'.join(textwrap.wrap(
+ 'Attempt to get %s for "%s" failed. This can happen if '
+ 'the URI refers to a non-existent object or if you meant to '
+ 'operate on a directory (e.g., leaving off -R option on gsutil '
+ 'cp, mv, or ls of a bucket)' % (level, uri), 80)))
def _check_bucket_uri(self, function_name):
if issubclass(type(self), BucketStorageUri) and not self.bucket_name:
@@ -146,19 +144,6 @@ class StorageUri(object):
and ((self.version_id is not None)
or (self.generation is not None)))
- def versioned_uri_str(self):
- """Returns a versionful URI string."""
- version_desc = ''
- if not issubclass(type(self), BucketStorageUri):
- pass
- elif self.version_id is not None:
- version_desc += '#' + self.version_id
- elif self.generation is not None:
- version_desc += '#' + str(self.generation)
- if self.meta_generation is not None:
- version_desc += '.' + str(self.meta_generation)
- return self.uri + version_desc
-
def delete_key(self, validate=False, headers=None, version_id=None,
mfa_token=None):
self._check_object_uri('delete_key')
@@ -210,12 +195,20 @@ class StorageUri(object):
def get_contents_to_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None,
- res_download_handler=None, response_headers=None):
+ res_download_handler=None, response_headers=None,
+ hash_algs=None):
self._check_object_uri('get_contents_to_file')
key = self.get_key(None, headers)
self.check_response(key, 'key', self.uri)
- key.get_contents_to_file(fp, headers, cb, num_cb, torrent, version_id,
- res_download_handler, response_headers)
+ if hash_algs:
+ key.get_contents_to_file(fp, headers, cb, num_cb, torrent,
+ version_id, res_download_handler,
+ response_headers,
+ hash_algs=hash_algs)
+ else:
+ key.get_contents_to_file(fp, headers, cb, num_cb, torrent,
+ version_id, res_download_handler,
+ response_headers)
def get_contents_as_string(self, validate=False, headers=None, cb=None,
num_cb=10, torrent=False, version_id=None):
@@ -249,8 +242,7 @@ class BucketStorageUri(StorageUri):
def __init__(self, scheme, bucket_name=None, object_name=None,
debug=0, connection_args=None, suppress_consec_slashes=True,
- version_id=None, generation=None, meta_generation=None,
- is_latest=False):
+ version_id=None, generation=None, is_latest=False):
"""Instantiate a BucketStorageUri from scheme,bucket,object tuple.
@type scheme: string
@@ -258,7 +250,7 @@ class BucketStorageUri(StorageUri):
@type bucket_name: string
@param bucket_name: bucket name
@type object_name: string
- @param object_name: object name
+ @param object_name: object name, excluding generation/version.
@type debug: int
@param debug: debug level to pass in to connection (range 0..2)
@type connection_args: map
@@ -269,38 +261,66 @@ class BucketStorageUri(StorageUri):
consecutive slashes will be suppressed in key paths.
@param version_id: Object version id (S3-specific).
@param generation: Object generation number (GCS-specific).
- @param meta_generation: Object meta-generation number (GCS-specific).
@param is_latest: boolean indicating that a versioned object is the
current version
After instantiation the components are available in the following
- fields: uri, scheme, bucket_name, object_name.
+ fields: scheme, bucket_name, object_name, version_id, generation,
+ is_latest, versionless_uri, version_specific_uri, uri.
+ Note: If instantiated without version info, the string representation
+ for a URI stays versionless; similarly, if instantiated with version
+ info, the string representation for a URI stays version-specific. If you
+ call one of the uri.set_contents_from_xyz() methods, a specific object
+ version will be created, and its version-specific URI string can be
+ retrieved from version_specific_uri even if the URI was instantiated
+ without version info.
"""
self.scheme = scheme
self.bucket_name = bucket_name
self.object_name = object_name
+ self.debug = debug
if connection_args:
self.connection_args = connection_args
self.suppress_consec_slashes = suppress_consec_slashes
- if self.bucket_name and self.object_name:
- self.uri = ('%s://%s/%s' % (self.scheme, self.bucket_name,
- self.object_name))
- elif self.bucket_name:
- self.uri = ('%s://%s/' % (self.scheme, self.bucket_name))
- else:
- self.uri = ('%s://' % self.scheme)
- self.debug = debug
-
self.version_id = version_id
self.generation = generation and int(generation)
- self.meta_generation = meta_generation and int(meta_generation)
self.is_latest = is_latest
+ self.is_version_specific = bool(self.generation) or bool(version_id)
+ self._build_uri_strings()
+
+ def _build_uri_strings(self):
+ if self.bucket_name and self.object_name:
+ self.versionless_uri = '%s://%s/%s' % (self.scheme, self.bucket_name,
+ self.object_name)
+ if self.generation:
+ self.version_specific_uri = '%s#%s' % (self.versionless_uri,
+ self.generation)
+ elif self.version_id:
+ self.version_specific_uri = '%s#%s' % (
+ self.versionless_uri, self.version_id)
+ if self.is_version_specific:
+ self.uri = self.version_specific_uri
+ else:
+ self.uri = self.versionless_uri
+ elif self.bucket_name:
+ self.uri = ('%s://%s/' % (self.scheme, self.bucket_name))
+ else:
+ self.uri = ('%s://' % self.scheme)
def _update_from_key(self, key):
- self.version_id = getattr(key, 'version_id', None)
- self.generation = getattr(key, 'generation', None)
- self.meta_generation = getattr(key, 'meta_generation', None)
+ self._update_from_values(
+ getattr(key, 'version_id', None),
+ getattr(key, 'generation', None),
+ getattr(key, 'is_latest', None),
+ getattr(key, 'md5', None))
+
+ def _update_from_values(self, version_id, generation, is_latest, md5):
+ self.version_id = version_id
+ self.generation = generation
+ self.is_latest = is_latest
+ self._build_uri_strings()
+ self.md5 = md5
def get_key(self, validate=False, headers=None, version_id=None):
self._check_object_uri('get_key')
@@ -340,8 +360,9 @@ class BucketStorageUri(StorageUri):
suppress_consec_slashes=self.suppress_consec_slashes)
def clone_replace_key(self, key):
- """Instantiate a BucketStorageUri from a Key object while maintaining
- debug and suppress_consec_slashes values.
+ """Instantiate a BucketStorageUri from the current BucketStorageUri, by
+ replacing the object name with the object name and other metadata found
+ in the given Key object (including generation).
@type key: Key
@param key: key for the new StorageUri to represent
@@ -349,14 +370,11 @@ class BucketStorageUri(StorageUri):
self._check_bucket_uri('clone_replace_key')
version_id = None
generation = None
- meta_generation = None
is_latest = False
if hasattr(key, 'version_id'):
version_id = key.version_id
if hasattr(key, 'generation'):
generation = key.generation
- if hasattr(key, 'meta_generation'):
- meta_generation = key.meta_generation
if hasattr(key, 'is_latest'):
is_latest = key.is_latest
@@ -368,7 +386,6 @@ class BucketStorageUri(StorageUri):
suppress_consec_slashes=self.suppress_consec_slashes,
version_id=version_id,
generation=generation,
- meta_generation=meta_generation,
is_latest=is_latest)
def get_acl(self, validate=False, headers=None, version_id=None):
@@ -509,7 +526,7 @@ class BucketStorageUri(StorageUri):
def names_bucket(self):
"""Returns True if this URI names a bucket."""
- return self.names_container()
+ return bool(self.bucket_name) and bool(not self.object_name)
def names_file(self):
"""Returns True if this URI names a file."""
@@ -551,27 +568,48 @@ class BucketStorageUri(StorageUri):
return provider
def set_acl(self, acl_or_str, key_name='', validate=False, headers=None,
- version_id=None):
- """sets or updates a bucket's acl"""
+ version_id=None, if_generation=None, if_metageneration=None):
+ """Sets or updates a bucket's ACL."""
self._check_bucket_uri('set_acl')
key_name = key_name or self.object_name or ''
bucket = self.get_bucket(validate, headers)
if self.generation:
bucket.set_acl(
- acl_or_str, key_name, headers, generation=self.generation)
+ acl_or_str, key_name, headers, generation=self.generation,
+ if_generation=if_generation, if_metageneration=if_metageneration)
else:
version_id = version_id or self.version_id
bucket.set_acl(acl_or_str, key_name, headers, version_id)
+ def set_xml_acl(self, xmlstring, key_name='', validate=False, headers=None,
+ version_id=None, if_generation=None, if_metageneration=None):
+ """Sets or updates a bucket's ACL with an XML string."""
+ self._check_bucket_uri('set_xml_acl')
+ key_name = key_name or self.object_name or ''
+ bucket = self.get_bucket(validate, headers)
+ if self.generation:
+ bucket.set_xml_acl(
+ xmlstring, key_name, headers, generation=self.generation,
+ if_generation=if_generation, if_metageneration=if_metageneration)
+ else:
+ version_id = version_id or self.version_id
+ bucket.set_xml_acl(xmlstring, key_name, headers,
+ version_id=version_id)
+
+ def set_def_xml_acl(self, xmlstring, validate=False, headers=None):
+ """Sets or updates a bucket's default object ACL with an XML string."""
+ self._check_bucket_uri('set_def_xml_acl')
+ self.get_bucket(validate, headers).set_def_xml_acl(xmlstring, headers)
+
def set_def_acl(self, acl_or_str, validate=False, headers=None,
version_id=None):
- """sets or updates a bucket's default object acl"""
+ """Sets or updates a bucket's default object ACL."""
self._check_bucket_uri('set_def_acl')
self.get_bucket(validate, headers).set_def_acl(acl_or_str, headers)
def set_canned_acl(self, acl_str, validate=False, headers=None,
version_id=None):
- """sets or updates a bucket's acl to a predefined (canned) value"""
+ """Sets or updates a bucket's acl to a predefined (canned) value."""
self._check_object_uri('set_canned_acl')
self._warn_about_args('set_canned_acl', version_id=version_id)
key = self.get_key(validate, headers)
@@ -580,8 +618,8 @@ class BucketStorageUri(StorageUri):
def set_def_canned_acl(self, acl_str, validate=False, headers=None,
version_id=None):
- """sets or updates a bucket's default object acl to a predefined
- (canned) value"""
+ """Sets or updates a bucket's default object acl to a predefined
+ (canned) value."""
self._check_bucket_uri('set_def_canned_acl ')
key = self.get_key(validate, headers)
self.check_response(key, 'key', self.uri)
@@ -622,6 +660,9 @@ class BucketStorageUri(StorageUri):
result = key.set_contents_from_file(
fp, headers, replace, cb, num_cb, policy, md5, size=size,
rewind=rewind, res_upload_handler=res_upload_handler)
+ if res_upload_handler:
+ self._update_from_values(None, res_upload_handler.generation,
+ None, md5)
else:
self._warn_about_args('set_contents_from_file',
res_upload_handler=res_upload_handler)
@@ -645,26 +686,22 @@ class BucketStorageUri(StorageUri):
src_version_id=None, storage_class='STANDARD',
preserve_acl=False, encrypt_key=False, headers=None,
query_args=None, src_generation=None):
+ """Returns newly created key."""
self._check_object_uri('copy_key')
dst_bucket = self.get_bucket(validate=False, headers=headers)
if src_generation:
- dst_bucket.copy_key(new_key_name=self.object_name,
- src_bucket_name=src_bucket_name,
- src_key_name=src_key_name, metadata=metadata,
- storage_class=storage_class,
- preserve_acl=preserve_acl,
- encrypt_key=encrypt_key,
- headers=headers, query_args=query_args,
- src_generation=src_generation)
+ return dst_bucket.copy_key(new_key_name=self.object_name,
+ src_bucket_name=src_bucket_name,
+ src_key_name=src_key_name, metadata=metadata,
+ storage_class=storage_class, preserve_acl=preserve_acl,
+ encrypt_key=encrypt_key, headers=headers, query_args=query_args,
+ src_generation=src_generation)
else:
- dst_bucket.copy_key(new_key_name=self.object_name,
- src_bucket_name=src_bucket_name,
- src_key_name=src_key_name, metadata=metadata,
- src_version_id=src_version_id,
- storage_class=storage_class,
- preserve_acl=preserve_acl,
- encrypt_key=encrypt_key,
- headers=headers, query_args=query_args)
+ return dst_bucket.copy_key(new_key_name=self.object_name,
+ src_bucket_name=src_bucket_name, src_key_name=src_key_name,
+ metadata=metadata, src_version_id=src_version_id,
+ storage_class=storage_class, preserve_acl=preserve_acl,
+ encrypt_key=encrypt_key, headers=headers, query_args=query_args)
def enable_logging(self, target_bucket, target_prefix=None, validate=False,
headers=None, version_id=None):
@@ -677,8 +714,14 @@ class BucketStorageUri(StorageUri):
bucket = self.get_bucket(validate, headers)
bucket.disable_logging(headers=headers)
+ def get_logging_config(self, validate=False, headers=None, version_id=None):
+ self._check_bucket_uri('get_logging_config')
+ bucket = self.get_bucket(validate, headers)
+ return bucket.get_logging_config(headers=headers)
+
def set_website_config(self, main_page_suffix=None, error_key=None,
validate=False, headers=None):
+ self._check_bucket_uri('set_website_config')
bucket = self.get_bucket(validate, headers)
if not (main_page_suffix or error_key):
bucket.delete_website_configuration(headers)
@@ -686,10 +729,12 @@ class BucketStorageUri(StorageUri):
bucket.configure_website(main_page_suffix, error_key, headers)
def get_website_config(self, validate=False, headers=None):
+ self._check_bucket_uri('get_website_config')
bucket = self.get_bucket(validate, headers)
- return bucket.get_website_configuration_with_xml(headers)
+ return bucket.get_website_configuration(headers)
def get_versioning_config(self, headers=None):
+ self._check_bucket_uri('get_versioning_config')
bucket = self.get_bucket(False, headers)
return bucket.get_versioning_status(headers)
@@ -705,6 +750,15 @@ class BucketStorageUri(StorageUri):
preserve_acl,
headers=headers)
+ def compose(self, components, content_type=None, headers=None):
+ self._check_object_uri('compose')
+ component_keys = []
+ for suri in components:
+ component_keys.append(suri.new_key())
+ component_keys[-1].generation = suri.generation
+ self.new_key().compose(
+ component_keys, content_type=content_type, headers=headers)
+
def exists(self, headers=None):
"""Returns True if the object exists or False if it doesn't"""
if not self.object_name:
diff --git a/boto/swf/__init__.py b/boto/swf/__init__.py
index 34abc1dd..5eab6bc0 100644
--- a/boto/swf/__init__.py
+++ b/boto/swf/__init__.py
@@ -25,19 +25,28 @@
from boto.ec2.regioninfo import RegionInfo
import boto.swf.layer1
+REGION_ENDPOINTS = {
+ 'us-east-1': 'swf.us-east-1.amazonaws.com',
+ 'us-west-1': 'swf.us-west-1.amazonaws.com',
+ 'us-west-2': 'swf.us-west-2.amazonaws.com',
+ 'sa-east-1': 'swf.sa-east-1.amazonaws.com',
+ 'eu-west-1': 'swf.eu-west-1.amazonaws.com',
+ 'ap-northeast-1': 'swf.ap-northeast-1.amazonaws.com',
+ 'ap-southeast-1': 'swf.ap-southeast-1.amazonaws.com',
+ 'ap-southeast-2': 'swf.ap-southeast-2.amazonaws.com',
+}
-def regions():
+
+def regions(**kw_params):
"""
Get all available regions for the Amazon Simple Workflow service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
- import boto.dynamodb.layer2
- return [RegionInfo(name='us-east-1',
- endpoint='swf.us-east-1.amazonaws.com',
- connection_cls=boto.swf.layer1.Layer1),
- ]
+ return [RegionInfo(name=region_name, endpoint=REGION_ENDPOINTS[region_name],
+ connection_cls=boto.swf.layer1.Layer1)
+ for region_name in REGION_ENDPOINTS]
def connect_to_region(region_name, **kw_params):
diff --git a/boto/swf/layer2.py b/boto/swf/layer2.py
index 10c34d55..cb3298e1 100644
--- a/boto/swf/layer2.py
+++ b/boto/swf/layer2.py
@@ -41,9 +41,9 @@ class SWFBase(object):
def __repr__(self):
"""Generate string representation."""
- rep_str = self.name
+ rep_str = str(self.name)
if hasattr(self, 'version'):
- rep_str += '-' + getattr(self, 'version')
+ rep_str += '-' + str(getattr(self, 'version'))
return '<%s %r at 0x%x>' % (self.__class__.__name__, rep_str, id(self))
class Domain(SWFBase):
diff --git a/boto/utils.py b/boto/utils.py
index d1f88b6c..e5c083f3 100644
--- a/boto/utils.py
+++ b/boto/utils.py
@@ -90,7 +90,9 @@ qsa_of_interest = ['acl', 'cors', 'defaultObjectAcl', 'location', 'logging',
# GET bucket?storageClass is not part of the S3 API.)
'storageClass',
# websiteConfig is a QSA for buckets in Google Cloud Storage.
- 'websiteConfig']
+ 'websiteConfig',
+ # compose is a QSA for objects in Google Cloud Storage.
+ 'compose']
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
@@ -208,8 +210,7 @@ def retry_url(url, retry_on_404=True, num_retries=10):
req = urllib2.Request(url)
r = opener.open(req)
result = r.read()
- resp = urllib2.urlopen(req)
- return resp.read()
+ return result
except urllib2.HTTPError, e:
# in 2.6 you use getcode(), in 2.5 and earlier you use code
if hasattr(e, 'getcode'):
@@ -218,12 +219,12 @@ def retry_url(url, retry_on_404=True, num_retries=10):
code = e.code
if code == 404 and not retry_on_404:
return ''
- except urllib2.URLError, e:
- raise e
except Exception, e:
pass
boto.log.exception('Caught exception reading instance data')
- time.sleep(2 ** i)
+ # If not on the last iteration of the loop then sleep.
+ if i + 1 != num_retries:
+ time.sleep(2 ** i)
boto.log.error('Unable to read instance data, giving up')
return ''
@@ -310,8 +311,23 @@ class LazyLoadMetadata(dict):
return super(LazyLoadMetadata, self).__repr__()
+def _build_instance_metadata_url(url, version, path):
+ """
+ Builds an EC2 metadata URL for fetching information about an instance.
+
+ Requires the following arguments: a URL, a version and a path.
+
+ Example:
+
+ >>> _build_instance_metadata_url('http://169.254.169.254', 'latest', 'meta-data')
+ http://169.254.169.254/latest/meta-data/
+
+ """
+ return '%s/%s/%s/' % (url, version, path)
+
+
def get_instance_metadata(version='latest', url='http://169.254.169.254',
- timeout=None, num_retries=5):
+ data='meta-data', timeout=None, num_retries=5):
"""
Returns the instance metadata as a nested Python dictionary.
Simple values (e.g. local_hostname, hostname, etc.) will be
@@ -327,8 +343,8 @@ def get_instance_metadata(version='latest', url='http://169.254.169.254',
original = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
- return _get_instance_metadata('%s/%s/meta-data/' % (url, version),
- num_retries=num_retries)
+ metadata_url = _build_instance_metadata_url(url, version, data)
+ return _get_instance_metadata(metadata_url, num_retries=num_retries)
except urllib2.URLError, e:
return None
finally:
@@ -342,7 +358,7 @@ def get_instance_identity(version='latest', url='http://169.254.169.254',
Returns the instance identity as a nested Python dictionary.
"""
iid = {}
- base_url = 'http://169.254.169.254/latest/dynamic/instance-identity'
+ base_url = _build_instance_metadata_url(url, version, 'dynamic/instance-identity')
if timeout is not None:
original = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
@@ -365,7 +381,7 @@ def get_instance_identity(version='latest', url='http://169.254.169.254',
def get_instance_userdata(version='latest', sep=None,
url='http://169.254.169.254'):
- ud_url = '%s/%s/user-data' % (url, version)
+ ud_url = _build_instance_metadata_url(url, version, 'user-data')
user_data = retry_url(ud_url, retry_on_404=False)
if user_data:
if sep:
@@ -378,7 +394,7 @@ def get_instance_userdata(version='latest', sep=None,
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ'
-
+RFC1123 = '%a, %d %b %Y %H:%M:%S %Z'
def get_ts(ts=None):
if not ts:
@@ -392,9 +408,12 @@ def parse_ts(ts):
dt = datetime.datetime.strptime(ts, ISO8601)
return dt
except ValueError:
- dt = datetime.datetime.strptime(ts, ISO8601_MS)
- return dt
-
+ try:
+ dt = datetime.datetime.strptime(ts, ISO8601_MS)
+ return dt
+ except ValueError:
+ dt = datetime.datetime.strptime(ts, RFC1123)
+ return dt
def find_class(module_name, class_name=None):
if class_name:
diff --git a/boto/vpc/__init__.py b/boto/vpc/__init__.py
index 6aa1bbf5..e529b6f3 100644
--- a/boto/vpc/__init__.py
+++ b/boto/vpc/__init__.py
@@ -134,6 +134,39 @@ class VPCConnection(EC2Connection):
params = {'VpcId': vpc_id}
return self.get_status('DeleteVpc', params)
+ def modify_vpc_attribute(self, vpc_id,
+ enable_dns_support=None,
+ enable_dns_hostnames=None):
+ """
+ Modifies the specified attribute of the specified VPC.
+ You can only modify one attribute at a time.
+
+ :type vpc_id: str
+ :param vpc_id: The ID of the vpc to be deleted.
+
+ :type enable_dns_support: bool
+ :param enable_dns_support: Specifies whether the DNS server
+ provided by Amazon is enabled for the VPC.
+
+ :type enable_dns_hostnames: bool
+ :param enable_dns_hostnames: Specifies whether DNS hostnames are
+ provided for the instances launched in this VPC. You can only
+ set this attribute to ``true`` if EnableDnsSupport
+ is also ``true``.
+ """
+ params = {'VpcId': vpc_id}
+ if enable_dns_support is not None:
+ if enable_dns_support:
+ params['EnableDnsSupport.Value'] = 'true'
+ else:
+ params['EnableDnsSupport.Value'] = 'false'
+ if enable_dns_hostnames is not None:
+ if enable_dns_hostnames:
+ params['EnableDnsHostnames.Value'] = 'true'
+ else:
+ params['EnableDnsHostnames.Value'] = 'false'
+ return self.get_status('ModifyVpcAttribute', params)
+
# Route Tables
def get_all_route_tables(self, route_table_ids=None, filters=None):
@@ -370,7 +403,7 @@ class VPCConnection(EC2Connection):
Attach an internet gateway to a specific VPC.
:type internet_gateway_id: str
- :param internet_gateway_id: The ID of the internet gateway to delete.
+ :param internet_gateway_id: The ID of the internet gateway to attach.
:type vpc_id: str
:param vpc_id: The ID of the VPC to attach to.
@@ -448,7 +481,7 @@ class VPCConnection(EC2Connection):
Create a new Customer Gateway
:type type: str
- :param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1'
+ :param type: Type of VPN Connection. Only valid value currently is 'ipsec.1'
:type ip_address: str
:param ip_address: Internet-routable IP address for customer's gateway.
@@ -518,7 +551,7 @@ class VPCConnection(EC2Connection):
Create a new Vpn Gateway
:type type: str
- :param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1'
+ :param type: Type of VPN Connection. Only valid value currently is 'ipsec.1'
:type availability_zone: str
:param availability_zone: The Availability Zone where you want the VPN gateway.
@@ -871,7 +904,7 @@ class VPCConnection(EC2Connection):
:type destination_cidr_block: str
:param destination_cidr_block: The CIDR block associated with the local
- subnet of the customer data center.
+ subnet of the customer data center.
:type vpn_connection_id: str
:param vpn_connection_id: The ID of the VPN connection.
@@ -895,7 +928,7 @@ class VPCConnection(EC2Connection):
:type destination_cidr_block: str
:param destination_cidr_block: The CIDR block associated with the local
- subnet of the customer data center.
+ subnet of the customer data center.
:type vpn_connection_id: str
:param vpn_connection_id: The ID of the VPN connection.
diff --git a/boto/vpc/vpc.py b/boto/vpc/vpc.py
index 0539acd8..8fdaa62f 100644
--- a/boto/vpc/vpc.py
+++ b/boto/vpc/vpc.py
@@ -14,7 +14,7 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
@@ -28,15 +28,28 @@ from boto.ec2.ec2object import TaggedEC2Object
class VPC(TaggedEC2Object):
def __init__(self, connection=None):
+ """
+ Represents a VPC.
+
+ :ivar id: The unique ID of the VPC.
+ :ivar dhcp_options_id: The ID of the set of DHCP options you've associated with the VPC
+ (or default if the default options are associated with the VPC).
+ :ivar state: The current state of the VPC.
+ :ivar cidr_block: The CIDR block for the VPC.
+ :ivar is_default: Indicates whether the VPC is the default VPC.
+ :ivar instance_tenancy: The allowed tenancy of instances launched into the VPC.
+ """
TaggedEC2Object.__init__(self, connection)
self.id = None
self.dhcp_options_id = None
self.state = None
self.cidr_block = None
+ self.is_default = None
+ self.instance_tenancy = None
def __repr__(self):
return 'VPC:%s' % self.id
-
+
def endElement(self, name, value, connection):
if name == 'vpcId':
self.id = value
@@ -46,9 +59,24 @@ class VPC(TaggedEC2Object):
self.state = value
elif name == 'cidrBlock':
self.cidr_block = value
+ elif name == 'isDefault':
+ self.is_default = True if value == 'true' else False
+ elif name == 'instanceTenancy':
+ self.instance_tenancy = value
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_vpc(self.id)
+ def _update(self, updated):
+ self.__dict__.update(updated.__dict__)
+
+ def update(self, validate=False):
+ vpc_list = self.connection.get_all_vpcs([self.id])
+ if len(vpc_list):
+ updated_vpc = vpc_list[0]
+ self._update(updated_vpc)
+ elif validate:
+ raise ValueError('%s is not a valid VPC ID' % (self.id,))
+ return self.state
diff --git a/boto/vpc/vpnconnection.py b/boto/vpc/vpnconnection.py
index 39792381..aa49c36a 100644
--- a/boto/vpc/vpnconnection.py
+++ b/boto/vpc/vpnconnection.py
@@ -14,31 +14,173 @@
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+import boto
+from datetime import datetime
+from boto.resultset import ResultSet
"""
Represents a VPN Connectionn
"""
-from boto.ec2.ec2object import EC2Object
+from boto.ec2.ec2object import TaggedEC2Object
-class VpnConnection(EC2Object):
+class VpnConnectionOptions(object):
+ """
+ Represents VPN connection options
+ :ivar static_routes_only: Indicates whether the VPN connection uses static
+ routes only. Static routes must be used for devices that don't support
+ BGP.
+
+ """
+ def __init__(self, static_routes_only=None):
+ self.static_routes_only = static_routes_only
+
+ def __repr__(self):
+ return 'VpnConnectionOptions'
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'staticRoutesOnly':
+ self.static_routes_only = True if value == 'true' else False
+ else:
+ setattr(self, name, value)
+
+class VpnStaticRoute(object):
+ """
+ Represents a static route for a VPN connection.
+
+ :ivar destination_cidr_block: The CIDR block associated with the local
+ subnet of the customer data center.
+ :ivar source: Indicates how the routes were provided.
+ :ivar state: The current state of the static route.
+ """
+ def __init__(self, destination_cidr_block=None, source=None, state=None):
+ self.destination_cidr_block = destination_cidr_block
+ self.source = source
+ self.available = state
+
+ def __repr__(self):
+ return 'VpnStaticRoute: %s' % self.destination_cidr_block
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'destinationCidrBlock':
+ self.destination_cidr_block = value
+ elif name == 'source':
+ self.source = value
+ elif name == 'state':
+ self.state = value
+ else:
+ setattr(self, name, value)
+
+class VpnTunnel(object):
+ """
+ Represents telemetry for a VPN tunnel
+
+ :ivar outside_ip_address: The Internet-routable IP address of the
+ virtual private gateway's outside interface.
+ :ivar status: The status of the VPN tunnel. Valid values: UP | DOWN
+ :ivar last_status_change: The date and time of the last change in status.
+ :ivar status_message: If an error occurs, a description of the error.
+ :ivar accepted_route_count: The number of accepted routes.
+ """
+ def __init__(self, outside_ip_address=None, status=None, last_status_change=None,
+ status_message=None, accepted_route_count=None):
+ self.outside_ip_address = outside_ip_address
+ self.status = status
+ self.last_status_change = last_status_change
+ self.status_message = status_message
+ self.accepted_route_count = accepted_route_count
+
+ def __repr__(self):
+ return 'VpnTunnel: %s' % self.outside_ip_address
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'outsideIpAddress':
+ self.outside_ip_address = value
+ elif name == 'status':
+ self.status = value
+ elif name == 'lastStatusChange':
+ self.last_status_change = datetime.strptime(value,
+ '%Y-%m-%dT%H:%M:%S.%fZ')
+ elif name == 'statusMessage':
+ self.status_message = value
+ elif name == 'acceptedRouteCount':
+ try:
+ value = int(value)
+ except ValueError:
+ boto.log.warning('Error converting code (%s) to int' % value)
+ self.accepted_route_count = value
+ else:
+ setattr(self, name, value)
+
+class VpnConnection(TaggedEC2Object):
+ """
+ Represents a VPN Connection
+
+ :ivar id: The ID of the VPN connection.
+ :ivar state: The current state of the VPN connection.
+ Valid values: pending | available | deleting | deleted
+ :ivar customer_gateway_configuration: The configuration information for the
+ VPN connection's customer gateway (in the native XML format). This
+ element is always present in the
+ :class:`boto.vpc.VPCConnection.create_vpn_connection` response;
+ however, it's present in the
+ :class:`boto.vpc.VPCConnection.get_all_vpn_connections` response only
+ if the VPN connection is in the pending or available state.
+ :ivar type: The type of VPN connection (ipsec.1).
+ :ivar customer_gateway_id: The ID of the customer gateway at your end of
+ the VPN connection.
+ :ivar vpn_gateway_id: The ID of the virtual private gateway
+ at the AWS side of the VPN connection.
+ :ivar tunnels: A list of the vpn tunnels (always 2)
+ :ivar options: The option set describing the VPN connection.
+ :ivar static_routes: A list of static routes associated with a VPN
+ connection.
+
+ """
def __init__(self, connection=None):
- EC2Object.__init__(self, connection)
+ TaggedEC2Object.__init__(self, connection)
self.id = None
self.state = None
self.customer_gateway_configuration = None
self.type = None
self.customer_gateway_id = None
self.vpn_gateway_id = None
+ self.tunnels = []
+ self.options = None
+ self.static_routes = []
def __repr__(self):
return 'VpnConnection:%s' % self.id
-
+
+ def startElement(self, name, attrs, connection):
+ retval = super(VpnConnection, self).startElement(name, attrs, connection)
+ if retval is not None:
+ return retval
+ if name == 'vgwTelemetry':
+ self.tunnels = ResultSet([('item', VpnTunnel)])
+ return self.tunnels
+ elif name == 'routes':
+ self.static_routes = ResultSet([('item', VpnStaticRoute)])
+ return self.static_routes
+ elif name == 'options':
+ self.options = VpnConnectionOptions()
+ return self.options
+ return None
+
def endElement(self, name, value, connection):
if name == 'vpnConnectionId':
self.id = value
@@ -57,4 +199,3 @@ class VpnConnection(EC2Object):
def delete(self):
return self.connection.delete_vpn_connection(self.id)
-
diff --git a/docs/source/autoscale_tut.rst b/docs/source/autoscale_tut.rst
index 1f03ec05..1c3a0a18 100644
--- a/docs/source/autoscale_tut.rst
+++ b/docs/source/autoscale_tut.rst
@@ -32,9 +32,6 @@ There are two ways to do this in boto. The first is:
>>> from boto.ec2.autoscale import AutoScaleConnection
>>> conn = AutoScaleConnection('<aws access key>', '<aws secret key>')
-Alternatively, you can use the shortcut:
-
->>> conn = boto.connect_autoscale()
A Note About Regions and Endpoints
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -43,7 +40,7 @@ default the US endpoint is used. To choose a specific region, instantiate the
AutoScaleConnection object with that region's endpoint.
>>> import boto.ec2.autoscale
->>> ec2 = boto.ec2.autoscale.connect_to_region('eu-west-1')
+>>> autoscale = boto.ec2.autoscale.connect_to_region('eu-west-1')
Alternatively, edit your boto.cfg with the default Autoscale endpoint to use::
@@ -163,7 +160,8 @@ will now be a property of our ScalingPolicy objects.
Next we'll create CloudWatch alarms that will define when to run the
Auto Scaling Policies.
->>> cloudwatch = boto.connect_cloudwatch()
+>>> import boto.ec2.cloudwatch
+>>> cloudwatch = boto.ec2.cloudwatch.connect_to_region('us-west-2')
It makes sense to measure the average CPU usage across the whole Auto Scaling
Group, rather than individual instances. We express that as CloudWatch
@@ -199,7 +197,8 @@ beyond the limits of the Scaling Group's 'max_size' and 'min_size' properties.
To retrieve the instances in your autoscale group:
->>> ec2 = boto.connect_ec2()
+>>> import boto.ec2
+>>> ec2 = boto.ec2.connect_to_region('us-west-2)
>>> conn.get_all_groups(names=['my_group'])[0]
>>> instance_ids = [i.instance_id for i in group.instances]
>>> reservations = ec2.get_all_instances(instance_ids)
diff --git a/docs/source/boto_config_tut.rst b/docs/source/boto_config_tut.rst
index c134397c..dc8000e7 100644
--- a/docs/source/boto_config_tut.rst
+++ b/docs/source/boto_config_tut.rst
@@ -11,8 +11,8 @@ There is a growing list of configuration options for the boto library. Many of
these options can be passed into the constructors for top-level objects such as
connections. Some options, such as credentials, can also be read from
environment variables (e.g. ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY``).
-But there is no central place to manage these options. So, the development
-version of boto has now introduced the notion of boto config files.
+It is also possible to manage these options in a central place through the use
+of boto config files.
Details
-------
@@ -33,6 +33,13 @@ methods of that object. In addition, the boto
:py:class:`Config <boto.pyami.config.Config>` class defines additional
methods that are described on the PyamiConfigMethods page.
+An example ``~/.boto`` file should look like::
+
+ [Credentials]
+ aws_access_key_id = <your_access_key_here>
+ aws_secret_access_key = <your_secret_key_here>
+
+
Sections
--------
@@ -50,7 +57,7 @@ boto requests. The order of precedence for authentication credentials is:
* Credentials specified as options in the config file.
This section defines the following options: ``aws_access_key_id`` and
-``aws_secret_access_key``. The former being your aws key id and the latter
+``aws_secret_access_key``. The former being your AWS key id and the latter
being the secret key.
For example::
@@ -60,7 +67,7 @@ For example::
aws_secret_access_key = <your secret key>
Please notice that quote characters are not used to either side of the '='
-operator even when both your aws access key id and secret key are strings.
+operator even when both your AWS access key id and secret key are strings.
For greater security, the secret key can be stored in a keyring and
retrieved via the keyring package. To use a keyring, use ``keyring``,
@@ -76,11 +83,22 @@ Python path. To learn about setting up keyrings, see the `keyring
documentation
<http://pypi.python.org/pypi/keyring#installing-and-using-python-keyring-lib>`_
+Credentials can also be supplied for a Eucalyptus service::
+
+ [Credentials]
+ euca_access_key_id = <your access key>
+ euca_secret_access_key = <your secret key>
+
+Finally, this section is also be used to provide credentials for the Internet Archive API::
+
+ [Credentials]
+ ia_access_key_id = <your access key>
+ ia_secret_access_key = <your secret key>
Boto
^^^^
-The Boto section is used to specify options that control the operaton of
+The Boto section is used to specify options that control the operation of
boto itself. This section defines the following options:
:debug: Controls the level of debug messages that will be printed by the boto library.
@@ -99,7 +117,7 @@ boto itself. This section defines the following options:
request. The default number of retries is 5 but you can change the default
with this option.
-As an example::
+For example::
[Boto]
debug = 0
@@ -110,6 +128,152 @@ As an example::
proxy_user = foo
proxy_pass = bar
+
+:connection_stale_duration: Amount of time to wait in seconds before a
+ connection will stop getting reused. AWS will disconnect connections which
+ have been idle for 180 seconds.
+:is_secure: Is the connection over SSL. This setting will overide passed in
+ values.
+:https_validate_certificates: Validate HTTPS certificates. This is on by default
+:ca_certificates_file: Location of CA certificates
+:http_socket_timeout: Timeout used to overwrite the system default socket
+ timeout for httplib .
+:send_crlf_after_proxy_auth_headers: Change line ending behaviour with proxies.
+ For more details see this `discussion <https://groups.google.com/forum/?fromgroups=#!topic/boto-dev/teenFvOq2Cc>`_
+
+These settings will default to::
+
+ [Boto]
+ connection_stale_duration = 180
+ is_secure = True
+ https_validate_certificates = True
+ ca_certificates_file = cacerts.txt
+ http_socket_timeout = 60
+ send_crlf_after_proxy_auth_headers = False
+
+You can control the timeouts and number of retries used when retrieving
+information from the Metadata Service (this is used for retrieving credentials
+for IAM roles on EC2 instances):
+
+:metadata_service_timeout: Number of seconds until requests to the metadata
+ service will timeout (float).
+:metadata_service_num_attempts: Number of times to attempt to retrieve
+ information from the metadata service before giving up (int).
+
+These settings will default to::
+
+ [Boto]
+ metadata_service_timeout = 1.0
+ metadata_service_num_attempts = 1
+
+
+This section is also used for specifying endpoints for non-AWS services such as
+Eucalyptus and Walrus.
+
+:eucalyptus_host: Select a default endpoint host for eucalyptus
+:walrus_host: Select a default host for Walrus
+
+For example::
+
+ [Boto]
+ eucalyptus_host = somehost.example.com
+ walrus_host = somehost.example.com
+
+
+Finally, the Boto section is used to set defaults versions for many AWS services
+
+AutoScale settings:
+
+options:
+:autoscale_version: Set the API version
+:autoscale_endpoint: Endpoint to use
+:autoscale_region_name: Default region to use
+
+For example::
+
+ [Boto]
+ autoscale_version = 2011-01-01
+ autoscale_endpoint = autoscaling.us-west-2.amazonaws.com
+ autoscale_region_name = us-west-2
+
+
+Cloudformation settings can also be defined:
+
+:cfn_version: Cloud formation API version
+:cfn_region_name: Default region name
+:cfn_region_endpoint: Default endpoint
+
+For example::
+
+ [Boto]
+ cfn_version = 2010-05-15
+ cfn_region_name = us-west-2
+ cfn_region_endpoint = cloudformation.us-west-2.amazonaws.com
+
+Cloudsearch settings:
+
+:cs_region_name: Default cloudsearch region
+:cs_region_endpoint: Default cloudsearch endpoint
+
+For example::
+
+ [Boto]
+ cs_region_name = us-west-2
+ cs_region_endpoint = cloudsearch.us-west-2.amazonaws.com
+
+Cloudwatch settings:
+
+:cloudwatch_version: Cloudwatch API version
+:cloudwatch_region_name: Default region name
+:cloudwatch_region_endpoint: Default endpoint
+
+For example::
+
+ [Boto]
+ cloudwatch_version = 2010-08-01
+ cloudwatch_region_name = us-west-2
+ cloudwatch_region_endpoint = monitoring.us-west-2.amazonaws.com
+
+EC2 settings:
+
+:ec2_version: EC2 API version
+:ec2_region_name: Default region name
+:ec2_region_endpoint: Default endpoint
+
+For example::
+
+ [Boto]
+ ec2_version = 2012-12-01
+ ec2_region_name = us-west-2
+ ec2_region_endpoint = ec2.us-west-2.amazonaws.com
+
+ELB settings:
+
+:elb_version: ELB API version
+:elb_region_name: Default region name
+:elb_region_endpoint: Default endpoint
+
+For example::
+
+ [Boto]
+ elb_version = 2012-06-01
+ elb_region_name = us-west-2
+ elb_region_endpoint = elasticloadbalancing.us-west-2.amazonaws.com
+
+EMR settings:
+
+:emr_version: EMR API version
+:emr_region_name: Default region name
+:emr_region_endpoint: Default endpoint
+
+For example::
+
+ [Boto]
+ emr_version = 2009-03-31
+ emr_region_name = us-west-2
+ emr_region_endpoint = elasticmapreduce.us-west-2.amazonaws.com
+
+
Precedence
----------
@@ -117,9 +281,119 @@ Even if you have your boto config setup, you can also have credentials and
options stored in environmental variables or you can explicitly pass them to
method calls i.e.::
- >>> boto.connect_ec2('<KEY_ID>','<SECRET_KEY>')
+ >>> boto.ec2.connect_to_region(
+ ... 'us-west-2',
+ ... aws_access_key_id='foo',
+ ... aws_secret_access_key='bar')
In these cases where these options can be found in more than one place boto
will first use the explicitly supplied arguments, if none found it will then
look for them amidst environment variables and if that fails it will use the
ones in boto config.
+
+Notification
+^^^^^^^^^^^^
+
+If you are using notifications for boto.pyami, you can specify the email
+details through the following variables.
+
+:smtp_from: Used as the sender in notification emails.
+:smtp_to: Destination to which emails should be sent
+:smtp_host: Host to connect to when sending notification emails.
+:smtp_port: Port to connect to when connecting to the :smtp_host:
+
+Default values are::
+
+ [notification]
+ smtp_from = boto
+ smtp_to = None
+ smtp_host = localhost
+ smtp_port = 25
+ smtp_tls = True
+ smtp_user = john
+ smtp_pass = hunter2
+
+SWF
+^^^
+
+The SWF section allows you to configure the default region to be used for the
+Amazon Simple Workflow service.
+
+:region: Set the default region
+
+Example::
+
+ [SWF]
+ region = us-west-2
+
+Pyami
+^^^^^
+
+The Pyami section is used to configure the working directory for PyAMI.
+
+:working_dir: Working directory used by PyAMI
+
+Example::
+
+ [Pyami]
+ working_dir = /home/foo/
+
+DB
+^^
+The DB section is used to configure access to databases through the
+:func:`boto.sdb.db.manager.get_manager` function.
+
+:db_type: Type of the database. Current allowed values are `SimpleDB` and
+ `XML`.
+:db_user: AWS access key id.
+:db_passwd: AWS secret access key.
+:db_name: Database that will be connected to.
+:db_table: Table name :note: This doesn't appear to be used.
+:db_host: Host to connect to
+:db_port: Port to connect to
+:enable_ssl: Use SSL
+
+More examples::
+
+ [DB]
+ db_type = SimpleDB
+ db_user = <aws access key id>
+ db_passwd = <aws secret access key>
+ db_name = my_domain
+ db_table = table
+ db_host = sdb.amazonaws.com
+ enable_ssl = True
+ debug = True
+
+ [DB_TestBasic]
+ db_type = SimpleDB
+ db_user = <another aws access key id>
+ db_passwd = <another aws secret access key>
+ db_name = basic_domain
+ db_port = 1111
+
+SDB
+^^^
+
+This section is used to configure SimpleDB
+
+:region: Set the region to which SDB should connect
+
+Example::
+
+ [SDB]
+ region = us-west-2
+
+DynamoDB
+^^^^^^^^
+
+This section is used to configure DynamoDB
+
+:region: Choose the default region
+:validate_checksums: Check checksums returned by DynamoDB
+
+Example::
+
+ [DynamoDB]
+ region = us-west-2
+ validate_checksums = True
diff --git a/docs/source/cloudsearch_tut.rst b/docs/source/cloudsearch_tut.rst
index 7172a47d..f29bccad 100644
--- a/docs/source/cloudsearch_tut.rst
+++ b/docs/source/cloudsearch_tut.rst
@@ -16,10 +16,12 @@ The first step in accessing CloudSearch is to create a connection to the service
The recommended method of doing this is as follows::
>>> import boto.cloudsearch
- >>> conn = boto.cloudsearch.connect_to_region("us-east-1", aws_access_key_id= '<aws access key'>, aws_secret_access_key='<aws secret key>')
+ >>> conn = boto.cloudsearch.connect_to_region("us-west-2",
+ ... aws_access_key_id='<aws access key'>,
+ ... aws_secret_access_key='<aws secret key>')
At this point, the variable conn will point to a CloudSearch connection object
-in the us-east-1 region. Currently, this is the only region which has the
+in the us-west-2 region. Currently, this is the only region which has the
CloudSearch service. In this example, the AWS access key and AWS secret key are
passed in to the method explicitly. Alternatively, you can set the environment
variables:
@@ -30,7 +32,7 @@ variables:
and then simply call::
>>> import boto.cloudsearch
- >>> conn = boto.cloudsearch.connect_to_region("us-east-1")
+ >>> conn = boto.cloudsearch.connect_to_region("us-west-2")
In either case, conn will point to the Connection object which we will use
throughout the remainder of this tutorial.
@@ -40,7 +42,7 @@ Creating a Domain
Once you have a connection established with the CloudSearch service, you will
want to create a domain. A domain encapsulates the data that you wish to index,
-as well as indexes and metadata relating to it.
+as well as indexes and metadata relating to it::
>>> from boto.cloudsearch.domain import Domain
>>> domain = Domain(conn, conn.create_domain('demo'))
@@ -51,8 +53,9 @@ document service, which you will use to index and search.
Setting access policies
-----------------------
-Before you can connect to a document service, you need to set the correct access properties.
-For example, if you were connecting from 192.168.1.0, you could give yourself access as follows:
+Before you can connect to a document service, you need to set the correct
+access properties. For example, if you were connecting from 192.168.1.0, you
+could give yourself access as follows::
>>> our_ip = '192.168.1.0'
@@ -61,50 +64,57 @@ For example, if you were connecting from 192.168.1.0, you could give yourself ac
>>> policy.allow_search_ip(our_ip)
>>> policy.allow_doc_ip(our_ip)
-You can use the allow_search_ip() and allow_doc_ip() methods to give different
-CIDR blocks access to searching and the document service respectively.
+You can use the :py:meth:`allow_search_ip
+<boto.cloudsearch.optionstatus.ServicePoliciesStatus.allow_search_ip>` and
+:py:meth:`allow_doc_ip <boto.cloudsearch.optionstatus.ServicePoliciesStatus.allow_doc_ip>`
+methods to give different CIDR blocks access to searching and the document
+service respectively.
Creating index fields
---------------------
Each domain can have up to twenty index fields which are indexed by the
CloudSearch service. For each index field, you will need to specify whether
-it's a text or integer field, as well as optionaly a default value.
+it's a text or integer field, as well as optionaly a default value::
>>> # Create an 'text' index field called 'username'
>>> uname_field = domain.create_index_field('username', 'text')
>>> # Epoch time of when the user last did something
- >>> time_field = domain.create_index_field('last_activity', 'uint', default=0)
+ >>> time_field = domain.create_index_field('last_activity',
+ ... 'uint',
+ ... default=0)
It is also possible to mark an index field as a facet. Doing so allows a search
query to return categories into which results can be grouped, or to create
-drill-down categories
-
- >>> # But it would be neat to drill down into different countries
+drill-down categories::
+
+ >>> # But it would be neat to drill down into different countries
>>> loc_field = domain.create_index_field('location', 'text', facet=True)
Finally, you can also mark a snippet of text as being able to be returned
-directly in your search query by using the results option.
+directly in your search query by using the results option::
>>> # Directly insert user snippets in our results
>>> snippet_field = domain.create_index_field('snippet', 'text', result=True)
-You can add up to 20 index fields in this manner:
+You can add up to 20 index fields in this manner::
- >>> follower_field = domain.create_index_field('follower_count', 'uint', default=0)
+ >>> follower_field = domain.create_index_field('follower_count',
+ ... 'uint',
+ ... default=0)
Adding Documents to the Index
-----------------------------
Now, we can add some documents to our new search domain. First, you will need a
-document service object through which queries are sent:
+document service object through which queries are sent::
>>> doc_service = domain.get_document_service()
For this example, we will use a pre-populated list of sample content for our
import. You would normally pull such data from your database or another
-document store.
+document store::
>>> users = [
{
@@ -142,27 +152,30 @@ document store.
]
When adding documents to our document service, we will batch them together. You
-can schedule a document to be added by using the add() method. Whenever you are
-adding a document, you must provide a unique ID, a version ID, and the actual
-document to be indexed. In this case, we are using the user ID as our unique
-ID. The version ID is used to determine which is the latest version of an
-object to be indexed. If you wish to update a document, you must use a higher
-version ID. In this case, we are using the time of the user's last activity as
-a version number.
+can schedule a document to be added by using the :py:meth:`add
+<boto.cloudsearch.document.DocumentServiceConnection.add>` method. Whenever you are adding a
+document, you must provide a unique ID, a version ID, and the actual document
+to be indexed. In this case, we are using the user ID as our unique ID. The
+version ID is used to determine which is the latest version of an object to be
+indexed. If you wish to update a document, you must use a higher version ID. In
+this case, we are using the time of the user's last activity as a version
+number::
>>> for user in users:
>>> doc_service.add(user['id'], user['last_activity'], user)
When you are ready to send the batched request to the document service, you can
-do with the commit() method. Note that cloudsearch will charge per 1000 batch
-uploads. Each batch upload must be under 5MB.
+do with the :py:meth:`commit
+<boto.cloudsearch.document.DocumentServiceConnection.commit>` method. Note that
+cloudsearch will charge per 1000 batch uploads. Each batch upload must be under
+5MB::
- >>> result = doc_service.commit()
+ >>> result = doc_service.commit()
-The result is an instance of `cloudsearch.CommitResponse` which will
-make the plain dictionary response a nice object (ie result.adds,
-result.deletes) and raise an exception for us if all of our documents
-weren't actually committed.
+The result is an instance of :py:class:`CommitResponse
+<boto.cloudsearch.document.CommitResponse>` which will make the plain
+dictionary response a nice object (ie result.adds, result.deletes) and raise an
+exception for us if all of our documents weren't actually committed.
After you have successfully committed some documents to cloudsearch, you must
use :py:meth:`clear_sdf
@@ -173,12 +186,13 @@ cleared.
Searching Documents
-------------------
-Now, let's try performing a search. First, we will need a SearchServiceConnection:
+Now, let's try performing a search. First, we will need a
+SearchServiceConnection::
>>> search_service = domain.get_search_service()
A standard search will return documents which contain the exact words being
-searched for.
+searched for::
>>> results = search_service.search(q="dan")
>>> results.hits
@@ -186,7 +200,7 @@ searched for.
>>> map(lambda x: x['id'], results)
[u'1', u'4']
-The standard search does not look at word order:
+The standard search does not look at word order::
>>> results = search_service.search(q="dinosaur dress")
>>> results.hits
@@ -196,7 +210,7 @@ The standard search does not look at word order:
It's also possible to do more complex queries using the bq argument (Boolean
Query). When you are using bq, your search terms must be enclosed in single
-quotes.
+quotes::
>>> results = search_service.search(bq="'dan'")
>>> results.hits
@@ -205,7 +219,7 @@ quotes.
[u'1', u'4']
When you are using boolean queries, it's also possible to use wildcards to
-extend your search to all words which start with your search terms:
+extend your search to all words which start with your search terms::
>>> results = search_service.search(bq="'dan*'")
>>> results.hits
@@ -215,7 +229,7 @@ extend your search to all words which start with your search terms:
The boolean query also allows you to create more complex queries. You can OR
term together using "|", AND terms together using "+" or a space, and you can
-remove words from the query using the "-" operator.
+remove words from the query using the "-" operator::
>>> results = search_service.search(bq="'watched|moved'")
>>> results.hits
@@ -224,7 +238,7 @@ remove words from the query using the "-" operator.
[u'3', u'4']
By default, the search will return 10 terms but it is possible to adjust this
-by using the size argument as follows:
+by using the size argument as follows::
>>> results = search_service.search(bq="'dan*'", size=2)
>>> results.hits
@@ -232,7 +246,8 @@ by using the size argument as follows:
>>> map(lambda x: x['id'], results)
[u'1', u'2']
-It is also possible to offset the start of the search by using the start argument as follows:
+It is also possible to offset the start of the search by using the start
+argument as follows::
>>> results = search_service.search(bq="'dan*'", start=2)
>>> results.hits
@@ -244,18 +259,20 @@ It is also possible to offset the start of the search by using the start argumen
Ordering search results and rank expressions
--------------------------------------------
-If your search query is going to return many results, it is good to be able to sort them
-You can order your search results by using the rank argument. You are able to
-sort on any fields which have the results option turned on.
+If your search query is going to return many results, it is good to be able to
+sort them. You can order your search results by using the rank argument. You are
+able to sort on any fields which have the results option turned on::
>>> results = search_service.search(bq=query, rank=['-follower_count'])
You can also create your own rank expressions to sort your results according to
-other criteria:
+other criteria, such as showing most recently active user, or combining the
+recency score with the text_relevance::
+
+ >>> domain.create_rank_expression('recently_active', 'last_activity')
- >>> domain.create_rank_expression('recently_active', 'last_activity') # We'll want to be able to just show the most recently active users
-
- >>> domain.create_rank_expression('activish', 'text_relevance + ((follower_count/(time() - last_activity))*1000)') # Let's get trickier and combine text relevance with a really dynamic expression
+ >>> domain.create_rank_expression('activish',
+ ... 'text_relevance + ((follower_count/(time() - last_activity))*1000)')
>>> results = search_service.search(bq=query, rank=['-recently_active'])
@@ -273,7 +290,7 @@ you map the term running to the stem run and then search for running,
the request matches documents that contain run as well as running.
To get the current stemming dictionary defined for a domain, use the
-``get_stemming`` method of the Domain object.
+:py:meth:`get_stemming <boto.cloudsearch.domain.Domain.get_stemming>` method::
>>> stems = domain.get_stemming()
>>> stems
@@ -282,7 +299,7 @@ To get the current stemming dictionary defined for a domain, use the
This returns a dictionary object that can be manipulated directly to
add additional stems for your search domain by adding pairs of term:stem
-to the stems dictionary.
+to the stems dictionary::
>>> stems['stems']['running'] = 'run'
>>> stems['stems']['ran'] = 'run'
@@ -291,12 +308,12 @@ to the stems dictionary.
>>>
This has changed the value locally. To update the information in
-Amazon CloudSearch, you need to save the data.
+Amazon CloudSearch, you need to save the data::
>>> stems.save()
You can also access certain CloudSearch-specific attributes related to
-the stemming dictionary defined for your domain.
+the stemming dictionary defined for your domain::
>>> stems.status
u'RequiresIndexDocuments'
@@ -321,7 +338,7 @@ so common that including them would result in a massive number of
matches.
To view the stopwords currently defined for your domain, use the
-``get_stopwords`` method of the Domain object.
+:py:meth:`get_stopwords <boto.cloudsearch.domain.Domain.get_stopwords>` method::
>>> stopwords = domain.get_stopwords()
>>> stopwords
@@ -344,17 +361,18 @@ To view the stopwords currently defined for your domain, use the
u'the',
u'to',
u'was']}
- >>>
+ >>>
You can add additional stopwords by simply appending the values to the
-list.
+list::
>>> stopwords['stopwords'].append('foo')
>>> stopwords['stopwords'].append('bar')
>>> stopwords
Similarly, you could remove currently defined stopwords from the list.
-To save the changes, use the ``save`` method.
+To save the changes, use the :py:meth:`save
+<boto.cloudsearch.optionstatus.OptionStatus.save>` method::
>>> stopwords.save()
@@ -371,13 +389,13 @@ the indexed term, the results will include documents that contain the
indexed term.
If you want two terms to match the same documents, you must define
-them as synonyms of each other. For example:
+them as synonyms of each other. For example::
cat, feline
feline, cat
To view the synonyms currently defined for your domain, use the
-``get_synonyms`` method of the Domain object.
+:py:meth:`get_synonyms <boto.cloudsearch.domain.Domain.get_synonyms>` method::
>>> synonyms = domain.get_synonyms()
>>> synonyms
@@ -385,12 +403,13 @@ To view the synonyms currently defined for your domain, use the
>>>
You can define new synonyms by adding new term:synonyms entries to the
-synonyms dictionary object.
+synonyms dictionary object::
>>> synonyms['synonyms']['cat'] = ['feline', 'kitten']
>>> synonyms['synonyms']['dog'] = ['canine', 'puppy']
-To save the changes, use the ``save`` method.
+To save the changes, use the :py:meth:`save
+<boto.cloudsearch.optionstatus.OptionStatus.save>` method::
>>> synonyms.save()
@@ -400,12 +419,14 @@ that provide additional information about the stopwords in your domain.
Deleting Documents
------------------
+It is also possible to delete documents::
+
>>> import time
>>> from datetime import datetime
>>> doc_service = domain.get_document_service()
>>> # Again we'll cheat and use the current epoch time as our version number
-
+
>>> doc_service.delete(4, int(time.mktime(datetime.utcnow().timetuple())))
>>> service.commit()
diff --git a/docs/source/cloudwatch_tut.rst b/docs/source/cloudwatch_tut.rst
index 5639c043..c9302092 100644
--- a/docs/source/cloudwatch_tut.rst
+++ b/docs/source/cloudwatch_tut.rst
@@ -12,8 +12,8 @@ EC2Connection object or call the monitor method on the Instance object.
It takes a while for the monitoring data to start accumulating but once
it does, you can do this::
- >>> import boto
- >>> c = boto.connect_cloudwatch()
+ >>> import boto.ec2.cloudwatch
+ >>> c = boto.ec2.cloudwatch.connect_to_region('us-west-2')
>>> metrics = c.list_metrics()
>>> metrics
[Metric:NetworkIn,
@@ -113,4 +113,4 @@ about that particular data point.::
u'Timestamp': u'2009-05-21T19:55:00Z',
u'Unit': u'Percent'}
-My server obviously isn't very busy right now! \ No newline at end of file
+My server obviously isn't very busy right now!
diff --git a/docs/source/dynamodb_tut.rst b/docs/source/dynamodb_tut.rst
index 07f06083..0e6a81a1 100644
--- a/docs/source/dynamodb_tut.rst
+++ b/docs/source/dynamodb_tut.rst
@@ -1,339 +1,340 @@
-.. dynamodb_tut:
-
-============================================
-An Introduction to boto's DynamoDB interface
-============================================
-
-This tutorial focuses on the boto interface to AWS' DynamoDB_. This tutorial
-assumes that you have boto already downloaded and installed.
-
-.. _DynamoDB: http://aws.amazon.com/dynamodb/
-
-
-Creating a Connection
----------------------
-
-The first step in accessing DynamoDB is to create a connection to the service.
-To do so, the most straight forward way is the following::
-
- >>> import boto
- >>> conn = boto.connect_dynamodb(
- aws_access_key_id='<YOUR_AWS_KEY_ID>',
- aws_secret_access_key='<YOUR_AWS_SECRET_KEY>')
- >>> conn
- <boto.dynamodb.layer2.Layer2 object at 0x3fb3090>
-
-Bear in mind that if you have your credentials in boto config in your home
-directory, the two keyword arguments in the call above are not needed. More
-details on configuration can be found in :doc:`boto_config_tut`.
-
-The :py:func:`boto.connect_dynamodb` functions returns a
-:py:class:`boto.dynamodb.layer2.Layer2` instance, which is a high-level API
-for working with DynamoDB. Layer2 is a set of abstractions that sit atop
-the lower level :py:class:`boto.dynamodb.layer1.Layer1` API, which closely
-mirrors the Amazon DynamoDB API. For the purpose of this tutorial, we'll
-just be covering Layer2.
-
-
-Listing Tables
---------------
-
-Now that we have a DynamoDB connection object, we can then query for a list of
-existing tables in that region::
-
- >>> conn.list_tables()
- ['test-table', 'another-table']
-
-
-Creating Tables
----------------
-
-DynamoDB tables are created with the
-:py:meth:`Layer2.create_table <boto.dynamodb.layer2.Layer2.create_table>`
-method. While DynamoDB's items (a rough equivalent to a relational DB's row)
-don't have a fixed schema, you do need to create a schema for the table's
-hash key element, and the optional range key element. This is explained in
-greater detail in DynamoDB's `Data Model`_ documentation.
-
-We'll start by defining a schema that has a hash key and a range key that
-are both keys::
-
- >>> message_table_schema = conn.create_schema(
- hash_key_name='forum_name',
- hash_key_proto_value=str,
- range_key_name='subject',
- range_key_proto_value=str
- )
-
-The next few things to determine are table name and read/write throughput. We'll
-defer explaining throughput to the DynamoDB's `Provisioned Throughput`_ docs.
-
-We're now ready to create the table::
-
- >>> table = conn.create_table(
- name='messages',
- schema=message_table_schema,
- read_units=10,
- write_units=10
- )
- >>> table
- Table(messages)
-
-This returns a :py:class:`boto.dynamodb.table.Table` instance, which provides
-simple ways to create (put), update, and delete items.
-
-
-Getting a Table
----------------
-
-To retrieve an existing table, use
-:py:meth:`Layer2.get_table <boto.dynamodb.layer2.Layer2.get_table>`::
-
- >>> conn.list_tables()
- ['test-table', 'another-table', 'messages']
- >>> table = conn.get_table('messages')
- >>> table
- Table(messages)
-
-:py:meth:`Layer2.get_table <boto.dynamodb.layer2.Layer2.get_table>`, like
-:py:meth:`Layer2.create_table <boto.dynamodb.layer2.Layer2.create_table>`,
-returns a :py:class:`boto.dynamodb.table.Table` instance.
-
-Keep in mind that :py:meth:`Layer2.get_table <boto.dynamodb.layer2.Layer2.get_table>`
-will make an API call to retrieve various attributes of the table including the
-creation time, the read and write capacity, and the table schema. If you
-already know the schema, you can save an API call and create a
-:py:class:`boto.dynamodb.table.Table` object without making any calls to
-Amazon DynamoDB::
-
- >>> table = conn.table_from_schema(
- name='messages',
- schema=message_table_schema)
-
-If you do this, the following fields will have ``None`` values:
-
- * create_time
- * status
- * read_units
- * write_units
-
-In addition, the ``item_count`` and ``size_bytes`` will be 0.
-If you create a table object directly from a schema object and
-decide later that you need to retrieve any of these additional
-attributes, you can use the
-:py:meth:`Table.refresh <boto.dynamodb.table.Table.refresh>` method::
-
- >>> from boto.dynamodb.schema import Schema
- >>> table = conn.table_from_schema(
- name='messages',
- schema=Schema.create(hash_key=('forum_name', 'S'),
- range_key=('subject', 'S')))
- >>> print table.write_units
- None
- >>> # Now we decide we need to know the write_units:
- >>> table.refresh()
- >>> print table.write_units
- 10
-
-
-The recommended best practice is to retrieve a table object once and
-use that object for the duration of your application. So, for example,
-instead of this::
-
- class Application(object):
- def __init__(self, layer2):
- self._layer2 = layer2
-
- def retrieve_item(self, table_name, key):
- return self._layer2.get_table(table_name).get_item(key)
-
-You can do something like this instead::
-
- class Application(object):
- def __init__(self, layer2):
- self._layer2 = layer2
- self._tables_by_name = {}
-
- def retrieve_item(self, table_name, key):
- table = self._tables_by_name.get(table_name)
- if table is None:
- table = self._layer2.get_table(table_name)
- self._tables_by_name[table_name] = table
- return table.get_item(key)
-
-
-Describing Tables
------------------
-
-To get a complete description of a table, use
-:py:meth:`Layer2.describe_table <boto.dynamodb.layer2.Layer2.describe_table>`::
-
- >>> conn.list_tables()
- ['test-table', 'another-table', 'messages']
- >>> conn.describe_table('messages')
- {
- 'Table': {
- 'CreationDateTime': 1327117581.624,
- 'ItemCount': 0,
- 'KeySchema': {
- 'HashKeyElement': {
- 'AttributeName': 'forum_name',
- 'AttributeType': 'S'
- },
- 'RangeKeyElement': {
- 'AttributeName': 'subject',
- 'AttributeType': 'S'
- }
- },
- 'ProvisionedThroughput': {
- 'ReadCapacityUnits': 10,
- 'WriteCapacityUnits': 10
- },
- 'TableName': 'messages',
- 'TableSizeBytes': 0,
- 'TableStatus': 'ACTIVE'
- }
- }
-
-
-Adding Items
-------------
-
-Continuing on with our previously created ``messages`` table, adding an::
-
- >>> table = conn.get_table('messages')
- >>> item_data = {
- 'Body': 'http://url_to_lolcat.gif',
- 'SentBy': 'User A',
- 'ReceivedTime': '12/9/2011 11:36:03 PM',
- }
- >>> item = table.new_item(
- # Our hash key is 'forum'
- hash_key='LOLCat Forum',
- # Our range key is 'subject'
- range_key='Check this out!',
- # This has the
- attrs=item_data
- )
-
-The
-:py:meth:`Table.new_item <boto.dynamodb.table.Table.new_item>` method creates
-a new :py:class:`boto.dynamodb.item.Item` instance with your specified
-hash key, range key, and attributes already set.
-:py:class:`Item <boto.dynamodb.item.Item>` is a :py:class:`dict` sub-class,
-meaning you can edit your data as such::
-
- item['a_new_key'] = 'testing'
- del item['a_new_key']
-
-After you are happy with the contents of the item, use
-:py:meth:`Item.put <boto.dynamodb.item.Item.put>` to commit it to DynamoDB::
-
- >>> item.put()
-
-
-Retrieving Items
-----------------
-
-Now, let's check if it got added correctly. Since DynamoDB works under an
-'eventual consistency' mode, we need to specify that we wish a consistent read,
-as follows::
-
- >>> table = conn.get_table('messages')
- >>> item = table.get_item(
- # Your hash key was 'forum_name'
- hash_key='LOLCat Forum',
- # Your range key was 'subject'
- range_key='Check this out!'
- )
- >>> item
- {
- # Note that this was your hash key attribute (forum_name)
- 'forum_name': 'LOLCat Forum',
- # This is your range key attribute (subject)
- 'subject': 'Check this out!'
- 'Body': 'http://url_to_lolcat.gif',
- 'ReceivedTime': '12/9/2011 11:36:03 PM',
- 'SentBy': 'User A',
- }
-
-
-Updating Items
---------------
-
-To update an item's attributes, simply retrieve it, modify the value, then
-:py:meth:`Item.put <boto.dynamodb.item.Item.put>` it again::
-
- >>> table = conn.get_table('messages')
- >>> item = table.get_item(
- hash_key='LOLCat Forum',
- range_key='Check this out!'
- )
- >>> item['SentBy'] = 'User B'
- >>> item.put()
-
-Working with Decimals
----------------------
-
-To avoid the loss of precision, you can stipulate that the
-``decimal.Decimal`` type be used for numeric values::
-
- >>> import decimal
- >>> conn.use_decimals()
- >>> table = conn.get_table('messages')
- >>> item = table.new_item(
- hash_key='LOLCat Forum',
- range_key='Check this out!'
- )
- >>> item['decimal_type'] = decimal.Decimal('1.12345678912345')
- >>> item.put()
- >>> print table.get_item('LOLCat Forum', 'Check this out!')
- {u'forum_name': 'LOLCat Forum', u'decimal_type': Decimal('1.12345678912345'),
- u'subject': 'Check this out!'}
-
-You can enable the usage of ``decimal.Decimal`` by using either the ``use_decimals``
-method, or by passing in the
-:py:class:`Dynamizer <boto.dynamodb.types.Dynamizer>` class for
-the ``dynamizer`` param::
-
- >>> from boto.dynamodb.types import Dynamizer
- >>> conn = boto.connect_dynamodb(dynamizer=Dynamizer)
-
-This mechanism can also be used if you want to customize the encoding/decoding
-process of DynamoDB types.
-
-
-Deleting Items
---------------
-
-To delete items, use the
-:py:meth:`Item.delete <boto.dynamodb.item.Item.delete>` method::
-
- >>> table = conn.get_table('messages')
- >>> item = table.get_item(
- hash_key='LOLCat Forum',
- range_key='Check this out!'
- )
- >>> item.delete()
-
-
-Deleting Tables
----------------
-
-.. WARNING::
- Deleting a table will also **permanently** delete all of its contents without prompt. Use carefully.
-
-There are two easy ways to delete a table. Through your top-level
-:py:class:`Layer2 <boto.dynamodb.layer2.Layer2>` object::
-
- >>> conn.delete_table(table)
-
-Or by getting the table, then using
-:py:meth:`Table.delete <boto.dynamodb.table.Table.delete>`::
-
- >>> table = conn.get_table('messages')
- >>> table.delete()
-
-
-.. _Data Model: http://docs.amazonwebservices.com/amazondynamodb/latest/developerguide/DataModel.html
-.. _Provisioned Throughput: http://docs.amazonwebservices.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html
+.. dynamodb_tut:
+
+============================================
+An Introduction to boto's DynamoDB interface
+============================================
+
+This tutorial focuses on the boto interface to AWS' DynamoDB_. This tutorial
+assumes that you have boto already downloaded and installed.
+
+.. _DynamoDB: http://aws.amazon.com/dynamodb/
+
+
+Creating a Connection
+---------------------
+
+The first step in accessing DynamoDB is to create a connection to the service.
+To do so, the most straight forward way is the following::
+
+ >>> import boto.dynamodb
+ >>> conn = boto.dynamodb.connect_to_region(
+ 'us-west-2',
+ aws_access_key_id='<YOUR_AWS_KEY_ID>',
+ aws_secret_access_key='<YOUR_AWS_SECRET_KEY>')
+ >>> conn
+ <boto.dynamodb.layer2.Layer2 object at 0x3fb3090>
+
+Bear in mind that if you have your credentials in boto config in your home
+directory, the two keyword arguments in the call above are not needed. More
+details on configuration can be found in :doc:`boto_config_tut`.
+
+The :py:func:`boto.dynamodb.connect_to_region` function returns a
+:py:class:`boto.dynamodb.layer2.Layer2` instance, which is a high-level API
+for working with DynamoDB. Layer2 is a set of abstractions that sit atop
+the lower level :py:class:`boto.dynamodb.layer1.Layer1` API, which closely
+mirrors the Amazon DynamoDB API. For the purpose of this tutorial, we'll
+just be covering Layer2.
+
+
+Listing Tables
+--------------
+
+Now that we have a DynamoDB connection object, we can then query for a list of
+existing tables in that region::
+
+ >>> conn.list_tables()
+ ['test-table', 'another-table']
+
+
+Creating Tables
+---------------
+
+DynamoDB tables are created with the
+:py:meth:`Layer2.create_table <boto.dynamodb.layer2.Layer2.create_table>`
+method. While DynamoDB's items (a rough equivalent to a relational DB's row)
+don't have a fixed schema, you do need to create a schema for the table's
+hash key element, and the optional range key element. This is explained in
+greater detail in DynamoDB's `Data Model`_ documentation.
+
+We'll start by defining a schema that has a hash key and a range key that
+are both strings::
+
+ >>> message_table_schema = conn.create_schema(
+ hash_key_name='forum_name',
+ hash_key_proto_value=str,
+ range_key_name='subject',
+ range_key_proto_value=str
+ )
+
+The next few things to determine are table name and read/write throughput. We'll
+defer explaining throughput to the DynamoDB's `Provisioned Throughput`_ docs.
+
+We're now ready to create the table::
+
+ >>> table = conn.create_table(
+ name='messages',
+ schema=message_table_schema,
+ read_units=10,
+ write_units=10
+ )
+ >>> table
+ Table(messages)
+
+This returns a :py:class:`boto.dynamodb.table.Table` instance, which provides
+simple ways to create (put), update, and delete items.
+
+
+Getting a Table
+---------------
+
+To retrieve an existing table, use
+:py:meth:`Layer2.get_table <boto.dynamodb.layer2.Layer2.get_table>`::
+
+ >>> conn.list_tables()
+ ['test-table', 'another-table', 'messages']
+ >>> table = conn.get_table('messages')
+ >>> table
+ Table(messages)
+
+:py:meth:`Layer2.get_table <boto.dynamodb.layer2.Layer2.get_table>`, like
+:py:meth:`Layer2.create_table <boto.dynamodb.layer2.Layer2.create_table>`,
+returns a :py:class:`boto.dynamodb.table.Table` instance.
+
+Keep in mind that :py:meth:`Layer2.get_table <boto.dynamodb.layer2.Layer2.get_table>`
+will make an API call to retrieve various attributes of the table including the
+creation time, the read and write capacity, and the table schema. If you
+already know the schema, you can save an API call and create a
+:py:class:`boto.dynamodb.table.Table` object without making any calls to
+Amazon DynamoDB::
+
+ >>> table = conn.table_from_schema(
+ name='messages',
+ schema=message_table_schema)
+
+If you do this, the following fields will have ``None`` values:
+
+ * create_time
+ * status
+ * read_units
+ * write_units
+
+In addition, the ``item_count`` and ``size_bytes`` will be 0.
+If you create a table object directly from a schema object and
+decide later that you need to retrieve any of these additional
+attributes, you can use the
+:py:meth:`Table.refresh <boto.dynamodb.table.Table.refresh>` method::
+
+ >>> from boto.dynamodb.schema import Schema
+ >>> table = conn.table_from_schema(
+ name='messages',
+ schema=Schema.create(hash_key=('forum_name', 'S'),
+ range_key=('subject', 'S')))
+ >>> print table.write_units
+ None
+ >>> # Now we decide we need to know the write_units:
+ >>> table.refresh()
+ >>> print table.write_units
+ 10
+
+
+The recommended best practice is to retrieve a table object once and
+use that object for the duration of your application. So, for example,
+instead of this::
+
+ class Application(object):
+ def __init__(self, layer2):
+ self._layer2 = layer2
+
+ def retrieve_item(self, table_name, key):
+ return self._layer2.get_table(table_name).get_item(key)
+
+You can do something like this instead::
+
+ class Application(object):
+ def __init__(self, layer2):
+ self._layer2 = layer2
+ self._tables_by_name = {}
+
+ def retrieve_item(self, table_name, key):
+ table = self._tables_by_name.get(table_name)
+ if table is None:
+ table = self._layer2.get_table(table_name)
+ self._tables_by_name[table_name] = table
+ return table.get_item(key)
+
+
+Describing Tables
+-----------------
+
+To get a complete description of a table, use
+:py:meth:`Layer2.describe_table <boto.dynamodb.layer2.Layer2.describe_table>`::
+
+ >>> conn.list_tables()
+ ['test-table', 'another-table', 'messages']
+ >>> conn.describe_table('messages')
+ {
+ 'Table': {
+ 'CreationDateTime': 1327117581.624,
+ 'ItemCount': 0,
+ 'KeySchema': {
+ 'HashKeyElement': {
+ 'AttributeName': 'forum_name',
+ 'AttributeType': 'S'
+ },
+ 'RangeKeyElement': {
+ 'AttributeName': 'subject',
+ 'AttributeType': 'S'
+ }
+ },
+ 'ProvisionedThroughput': {
+ 'ReadCapacityUnits': 10,
+ 'WriteCapacityUnits': 10
+ },
+ 'TableName': 'messages',
+ 'TableSizeBytes': 0,
+ 'TableStatus': 'ACTIVE'
+ }
+ }
+
+
+Adding Items
+------------
+
+Continuing on with our previously created ``messages`` table, adding an::
+
+ >>> table = conn.get_table('messages')
+ >>> item_data = {
+ 'Body': 'http://url_to_lolcat.gif',
+ 'SentBy': 'User A',
+ 'ReceivedTime': '12/9/2011 11:36:03 PM',
+ }
+ >>> item = table.new_item(
+ # Our hash key is 'forum'
+ hash_key='LOLCat Forum',
+ # Our range key is 'subject'
+ range_key='Check this out!',
+ # This has the
+ attrs=item_data
+ )
+
+The
+:py:meth:`Table.new_item <boto.dynamodb.table.Table.new_item>` method creates
+a new :py:class:`boto.dynamodb.item.Item` instance with your specified
+hash key, range key, and attributes already set.
+:py:class:`Item <boto.dynamodb.item.Item>` is a :py:class:`dict` sub-class,
+meaning you can edit your data as such::
+
+ item['a_new_key'] = 'testing'
+ del item['a_new_key']
+
+After you are happy with the contents of the item, use
+:py:meth:`Item.put <boto.dynamodb.item.Item.put>` to commit it to DynamoDB::
+
+ >>> item.put()
+
+
+Retrieving Items
+----------------
+
+Now, let's check if it got added correctly. Since DynamoDB works under an
+'eventual consistency' mode, we need to specify that we wish a consistent read,
+as follows::
+
+ >>> table = conn.get_table('messages')
+ >>> item = table.get_item(
+ # Your hash key was 'forum_name'
+ hash_key='LOLCat Forum',
+ # Your range key was 'subject'
+ range_key='Check this out!'
+ )
+ >>> item
+ {
+ # Note that this was your hash key attribute (forum_name)
+ 'forum_name': 'LOLCat Forum',
+ # This is your range key attribute (subject)
+ 'subject': 'Check this out!'
+ 'Body': 'http://url_to_lolcat.gif',
+ 'ReceivedTime': '12/9/2011 11:36:03 PM',
+ 'SentBy': 'User A',
+ }
+
+
+Updating Items
+--------------
+
+To update an item's attributes, simply retrieve it, modify the value, then
+:py:meth:`Item.put <boto.dynamodb.item.Item.put>` it again::
+
+ >>> table = conn.get_table('messages')
+ >>> item = table.get_item(
+ hash_key='LOLCat Forum',
+ range_key='Check this out!'
+ )
+ >>> item['SentBy'] = 'User B'
+ >>> item.put()
+
+Working with Decimals
+---------------------
+
+To avoid the loss of precision, you can stipulate that the
+``decimal.Decimal`` type be used for numeric values::
+
+ >>> import decimal
+ >>> conn.use_decimals()
+ >>> table = conn.get_table('messages')
+ >>> item = table.new_item(
+ hash_key='LOLCat Forum',
+ range_key='Check this out!'
+ )
+ >>> item['decimal_type'] = decimal.Decimal('1.12345678912345')
+ >>> item.put()
+ >>> print table.get_item('LOLCat Forum', 'Check this out!')
+ {u'forum_name': 'LOLCat Forum', u'decimal_type': Decimal('1.12345678912345'),
+ u'subject': 'Check this out!'}
+
+You can enable the usage of ``decimal.Decimal`` by using either the ``use_decimals``
+method, or by passing in the
+:py:class:`Dynamizer <boto.dynamodb.types.Dynamizer>` class for
+the ``dynamizer`` param::
+
+ >>> from boto.dynamodb.types import Dynamizer
+ >>> conn = boto.dynamodb.connect_to_region(dynamizer=Dynamizer)
+
+This mechanism can also be used if you want to customize the encoding/decoding
+process of DynamoDB types.
+
+
+Deleting Items
+--------------
+
+To delete items, use the
+:py:meth:`Item.delete <boto.dynamodb.item.Item.delete>` method::
+
+ >>> table = conn.get_table('messages')
+ >>> item = table.get_item(
+ hash_key='LOLCat Forum',
+ range_key='Check this out!'
+ )
+ >>> item.delete()
+
+
+Deleting Tables
+---------------
+
+.. WARNING::
+ Deleting a table will also **permanently** delete all of its contents without prompt. Use carefully.
+
+There are two easy ways to delete a table. Through your top-level
+:py:class:`Layer2 <boto.dynamodb.layer2.Layer2>` object::
+
+ >>> conn.delete_table(table)
+
+Or by getting the table, then using
+:py:meth:`Table.delete <boto.dynamodb.table.Table.delete>`::
+
+ >>> table = conn.get_table('messages')
+ >>> table.delete()
+
+
+.. _Data Model: http://docs.amazonwebservices.com/amazondynamodb/latest/developerguide/DataModel.html
+.. _Provisioned Throughput: http://docs.amazonwebservices.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html
diff --git a/docs/source/ec2_tut.rst b/docs/source/ec2_tut.rst
index f8614dbe..d9ffe38c 100644
--- a/docs/source/ec2_tut.rst
+++ b/docs/source/ec2_tut.rst
@@ -12,23 +12,19 @@ Creating a Connection
---------------------
The first step in accessing EC2 is to create a connection to the service.
-There are two ways to do this in boto. The first is::
+The recommended way of doing this in boto is::
- >>> from boto.ec2.connection import EC2Connection
- >>> conn = EC2Connection('<AWS_ACCESS_KEY_ID>', '<AWS_SECRET_ACCESS_KEY>')
+ >>> import boto.ec2
+ >>> conn = boto.ec2.connect_to_region("us-west-2",
+ ... aws_access_key_id='<aws access key>',
+ ... aws_secret_access_key='<aws secret key>')
-At this point the variable conn will point to an EC2Connection object. In
-this example, the AWS access key and AWS secret key are passed in to the
-method explicitely. Alternatively, you can set the boto config environment variables
-and then call the constructor without any arguments, like this::
+At this point the variable ``conn`` will point to an EC2Connection object. In
+this example, the AWS access key and AWS secret key are passed in to the method
+explicitly. Alternatively, you can set the boto config environment variables
+and then simply specify which region you want as follows::
- >>> conn = EC2Connection()
-
-There is also a shortcut function in the boto package, called connect_ec2
-that may provide a slightly easier means of creating a connection::
-
- >>> import boto
- >>> conn = boto.connect_ec2()
+ >>> conn = boto.ec2.connect_to_region("us-west-2")
In either case, conn will point to an EC2Connection object which we will
use throughout the remainder of this tutorial.
@@ -41,7 +37,7 @@ stop and terminate instances. In its most primitive form, you can launch an
instance as follows::
>>> conn.run_instances('<ami-image-id>')
-
+
This will launch an instance in the specified region with the default parameters.
You will not be able to SSH into this machine, as it doesn't have a security
group set. See :doc:`security_groups` for details on creating one.
@@ -88,3 +84,95 @@ you can request instance termination. To do so you can use the call bellow::
Please use with care since once you request termination for an instance there
is no turning back.
+Checking What Instances Are Running
+-----------------------------------
+You can also get information on your currently running instances::
+
+ >>> reservations = conn.get_all_instances()
+ >>> reservations
+ [Reservation:r-00000000]
+
+A reservation corresponds to a command to start instances. You can see what
+instances are associated with a reservation::
+
+ >>> instances = reservations[0].instances
+ >>> instances
+ [Instance:i-00000000]
+
+An instance object allows you get more meta-data available about the instance::
+
+ >>> inst = instances[0]
+ >>> inst.instance_type
+ u'c1.xlarge'
+ >>> inst.placement
+ u'us-west-2'
+
+In this case, we can see that our instance is a c1.xlarge instance in the
+`us-west-2` availability zone.
+
+=================================
+Using Elastic Block Storage (EBS)
+=================================
+
+
+EBS Basics
+----------
+
+EBS can be used by EC2 instances for permanent storage. Note that EBS volumes
+must be in the same availability zone as the EC2 instance you wish to attach it
+to.
+
+To actually create a volume you will need to specify a few details. The
+following example will create a 50GB EBS in one of the `us-west-2` availability
+zones::
+
+ >>> vol = conn.create_volume(50, "us-west-2")
+ >>> vol
+ Volume:vol-00000000
+
+You can check that the volume is now ready and available::
+
+ >>> curr_vol = conn.get_all_volumes([vol.id])[0]
+ >>> curr_vol.status
+ u'available'
+ >>> curr_vol.zone
+ u'us-west-2'
+
+We can now attach this volume to the EC2 instance we created earlier, making it
+available as a new device::
+
+ >>> conn.attach_volume (vol.id, inst.id, "/dev/sdx")
+ u'attaching'
+
+You will now have a new volume attached to your instance. Note that with some
+Linux kernels, `/dev/sdx` may get translated to `/dev/xvdx`. This device can
+now be used as a normal block device within Linux.
+
+Working With Snapshots
+----------------------
+
+Snapshots allow you to make point-in-time snapshots of an EBS volume for future
+recovery. Snapshots allow you to create incremental backups, and can also be
+used to instantiate multiple new volumes. Snapshots can also be used to move
+EBS volumes across availability zones or making backups to S3.
+
+Creating a snapshot is easy::
+
+ >>> snapshot = conn.create_snapshot(vol.id, 'My snapshot')
+ >>> snapshot
+ Snapshot:snap-00000000
+
+Once you have a snapshot, you can create a new volume from it. Volumes are
+created lazily from snapshots, which means you can start using such a volume
+straight away::
+
+ >>> new_vol = snapshot.create_volume('us-west-2')
+ >>> conn.attach_volume (new_vol.id, inst.id, "/dev/sdy")
+ u'attaching'
+
+If you no longer need a snapshot, you can also easily delete it::
+
+ >>> conn.delete_snapshot(snapshot.id)
+ True
+
+
diff --git a/docs/source/elb_tut.rst b/docs/source/elb_tut.rst
index 10d3ca29..4d5661c4 100644
--- a/docs/source/elb_tut.rst
+++ b/docs/source/elb_tut.rst
@@ -43,48 +43,27 @@ Creating a Connection
The first step in accessing ELB is to create a connection to the service.
->>> import boto
->>> conn = boto.connect_elb(
- aws_access_key_id='YOUR-KEY-ID-HERE',
- aws_secret_access_key='YOUR-SECRET-HERE'
- )
-
-
-A Note About Regions and Endpoints
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Like EC2, the ELB service has a different endpoint for each region. By default
-the US East endpoint is used. To choose a specific region, instantiate the
-ELBConnection object with that region's information.
-
->>> from boto.regioninfo import RegionInfo
->>> reg = RegionInfo(
- name='eu-west-1',
- endpoint='elasticloadbalancing.eu-west-1.amazonaws.com'
- )
->>> conn = boto.connect_elb(
- aws_access_key_id='YOUR-KEY-ID-HERE',
- aws_secret_access_key='YOUR-SECRET-HERE',
- region=reg
- )
-
-Another way to connect to an alternative region is like this:
+the US East endpoint is used. To choose a specific region, use the
+``connect_to_region`` function::
->>> import boto.ec2.elb
->>> elb = boto.ec2.elb.connect_to_region('eu-west-1')
+ >>> import boto.ec2.elb
+ >>> elb = boto.ec2.elb.connect_to_region('us-west-2')
Here's yet another way to discover what regions are available and then
-connect to one:
-
->>> import boto.ec2.elb
->>> regions = boto.ec2.elb.regions()
->>> regions
-[RegionInfo:us-east-1,
- RegionInfo:ap-northeast-1,
- RegionInfo:us-west-1,
- RegionInfo:ap-southeast-1,
- RegionInfo:eu-west-1]
->>> elb = regions[-1].connect()
+connect to one::
+
+ >>> import boto.ec2.elb
+ >>> regions = boto.ec2.elb.regions()
+ >>> regions
+ [RegionInfo:us-east-1,
+ RegionInfo:ap-northeast-1,
+ RegionInfo:us-west-1,
+ RegionInfo:us-west-2,
+ RegionInfo:ap-southeast-1,
+ RegionInfo:eu-west-1]
+ >>> elb = regions[-1].connect()
Alternatively, edit your boto.cfg with the default ELB endpoint to use::
@@ -194,9 +173,9 @@ Finally, let's create a load balancer in the US region that listens on ports
and TCP. We want the load balancer to span the availability zones
*us-east-1a* and *us-east-1b*:
->>> regions = ['us-east-1a', 'us-east-1b']
+>>> zones = ['us-east-1a', 'us-east-1b']
>>> ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
->>> lb = conn.create_load_balancer('my-lb', regions, ports)
+>>> lb = conn.create_load_balancer('my-lb', zones, ports)
>>> # This is from the previous section.
>>> lb.configure_health_check(hc)
diff --git a/docs/source/emr_tut.rst b/docs/source/emr_tut.rst
index 996781ee..c42d188f 100644
--- a/docs/source/emr_tut.rst
+++ b/docs/source/emr_tut.rst
@@ -27,18 +27,18 @@ and then call the constructor without any arguments, like this:
>>> conn = EmrConnection()
-There is also a shortcut function in the boto package called connect_emr
-that may provide a slightly easier means of creating a connection:
+There is also a shortcut function in boto
+that makes it easy to create EMR connections:
->>> import boto
->>> conn = boto.connect_emr()
+>>> import boto.emr
+>>> conn = boto.emr.connect_to_region('us-west-2')
In either case, conn points to an EmrConnection object which we will use
throughout the remainder of this tutorial.
Creating Streaming JobFlow Steps
--------------------------------
-Upon creating a connection to Elastic Mapreduce you will next
+Upon creating a connection to Elastic Mapreduce you will next
want to create one or more jobflow steps. There are two types of steps, streaming
and custom jar, both of which have a class in the boto Elastic Mapreduce implementation.
@@ -76,8 +76,8 @@ Creating JobFlows
-----------------
Once you have created one or more jobflow steps, you will next want to create and run a jobflow. Creating a jobflow that executes either of the steps we created above can be accomplished by:
->>> import boto
->>> conn = boto.connect_emr()
+>>> import boto.emr
+>>> conn = boto.emr.connect_to_region('us-west-2')
>>> jobid = conn.run_jobflow(name='My jobflow',
... log_uri='s3://<my log uri>/jobflow_logs',
... steps=[step])
@@ -102,7 +102,6 @@ Terminating JobFlows
--------------------
By default when all the steps of a jobflow have finished or failed the jobflow terminates. However, if you set the keep_alive parameter to True or just want to halt the execution of a jobflow early you can terminate a jobflow by:
->>> import boto
->>> conn = boto.connect_emr()
+>>> import boto.emr
+>>> conn = boto.emr.connect_to_region('us-west-2')
>>> conn.terminate_jobflow('<jobflow id>')
-
diff --git a/docs/source/getting_started.rst b/docs/source/getting_started.rst
new file mode 100644
index 00000000..ab8e306f
--- /dev/null
+++ b/docs/source/getting_started.rst
@@ -0,0 +1,177 @@
+.. _getting-started:
+
+=========================
+Getting Started with Boto
+=========================
+
+This tutorial will walk you through installing and configuring ``boto``, as
+well how to use it to make API calls.
+
+This tutorial assumes you are familiar with Python & that you have registered
+for an `Amazon Web Services`_ account. You'll need retrieve your
+``Access Key ID`` and ``Secret Access Key`` from the web-based console.
+
+.. _`Amazon Web Services`: https://aws.amazon.com/
+
+
+Installing Boto
+---------------
+
+You can use ``pip`` to install the latest released version of ``boto``::
+
+ pip install boto
+
+If you want to install ``boto`` from source::
+
+ git clone git://github.com/boto/boto.git
+ cd boto
+ python setup.py install
+
+
+Using Virtual Environments
+--------------------------
+
+Another common way to install ``boto`` is to use a ``virtualenv``, which
+provides isolated environments. First, install the ``virtualenv`` Python
+package::
+
+ pip install virtualenv
+
+Next, create a virtual environment by using the ``virtualenv`` command and
+specifying where you want the virtualenv to be created (you can specify
+any directory you like, though this example allows for compatibility with
+``virtualenvwrapper``)::
+
+ mkdir ~/.virtualenvs
+ virtualenv ~/.virtualenvs/boto
+
+You can now activate the virtual environment::
+
+ source ~/.virtualenvs/boto/bin/activate
+
+Now, any usage of ``python`` or ``pip`` (within the current shell) will default
+to the new, isolated version within your virtualenv.
+
+You can now install ``boto`` into this virtual environment::
+
+ pip install boto
+
+When you are done using ``boto``, you can deactivate your virtual environment::
+
+ deactivate
+
+If you are creating a lot of virtual environments, `virtualenvwrapper`_
+is an excellent tool that lets you easily manage your virtual environments.
+
+.. _`virtualenvwrapper`: http://virtualenvwrapper.readthedocs.org/en/latest/
+
+
+Configuring Boto Credentials
+----------------------------
+
+You have a few options for configuring ``boto`` (see :doc:`boto_config_tut`).
+For this tutorial, we'll be using a configuration file. First, create a
+``~/.boto`` file with these contents::
+
+ [Credentials]
+ aws_access_key_id = YOURACCESSKEY
+ aws_secret_access_key = YOURSECRETKEY
+
+``boto`` supports a number of configuration values. For more information,
+see :doc:`boto_config_tut`. The above file, however, is all we need for now.
+You're now ready to use ``boto``.
+
+
+Making Connections
+------------------
+
+``boto`` provides a number of convenience functions to simplify connecting to a
+service. For example, to work with S3, you can run::
+
+ >>> import boto
+ >>> s3 = boto.connect_s3()
+
+If you want to connect to a different region, you can import the service module
+and use the ``connect_to_region`` functions. For example, to create an EC2
+client in 'us-west-2' region, you'd run the following::
+
+ >>> import boto.ec2
+ >>> ec2 = boto.ec2.connect_to_region('us-west-2')
+
+
+Troubleshooting Connections
+---------------------------
+
+When calling the various ``connect_*`` functions, you might run into an error
+like this::
+
+ >>> import boto
+ >>> s3 = boto.connect_s3()
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ File "boto/__init__.py", line 121, in connect_s3
+ return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
+ File "boto/s3/connection.py", line 171, in __init__
+ validate_certs=validate_certs)
+ File "boto/connection.py", line 548, in __init__
+ host, config, self.provider, self._required_auth_capability())
+ File "boto/auth.py", line 668, in get_auth_handler
+ 'Check your credentials' % (len(names), str(names)))
+ boto.exception.NoAuthHandlerFound: No handler was ready to authenticate. 1 handlers were checked. ['HmacAuthV1Handler'] Check your credentials
+
+This is because ``boto`` cannot find credentials to use. Verify that you have
+created a ``~/.boto`` file as shown above. You can also turn on debug logging
+to verify where your credentials are coming from::
+
+ >>> import boto
+ >>> boto.set_stream_logger('boto')
+ >>> s3 = boto.connect_s3()
+ 2012-12-10 17:15:03,799 boto [DEBUG]:Using access key found in config file.
+ 2012-12-10 17:15:03,799 boto [DEBUG]:Using secret key found in config file.
+
+
+Interacting with AWS Services
+-----------------------------
+
+Once you have a client for the specific service you want, there are methods on
+that object that will invoke API operations for that service. The following
+code demonstrates how to create a bucket and put an object in that bucket::
+
+ >>> import boto
+ >>> import time
+ >>> s3 = boto.connect_s3()
+
+ # Create a new bucket. Buckets must have a globally unique name (not just
+ # unique to your account).
+ >>> bucket = s3.create_bucket('boto-demo-%s' % int(time.time()))
+
+ # Create a new key/value pair.
+ >>> key = bucket.new_key('mykey')
+ >>> key.set_contents_from_string("Hello World!")
+
+ # Sleep to ensure the data is eventually there.
+ >>> time.sleep(2)
+
+ # Retrieve the contents of ``mykey``.
+ >>> print key.get_contents_as_string()
+ 'Hello World!'
+
+ # Delete the key.
+ >>> key.delete()
+ # Delete the bucket.
+ >>> bucket.delete()
+
+Each service supports a different set of commands. You'll want to refer to the
+other guides & API references in this documentation, as well as referring to
+the `official AWS API`_ documentation.
+
+.. _`official AWS API`: https://aws.amazon.com/documentation/
+
+Next Steps
+----------
+
+For many of the services that ``boto`` supports, there are tutorials as
+well as detailed API documentation. If you are interested in a specific
+service, the tutorial for the service is a good starting point. For instance,
+if you'd like more information on S3, check out the :ref:`S3 Tutorial <s3_tut>`
+and the :doc:`S3 API reference <ref/s3>`.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 17777244..090de3b6 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -9,6 +9,13 @@ offered by `Amazon Web Services`_.
.. _Amazon Web Services: http://aws.amazon.com/
+Getting Started
+---------------
+
+If you've never used ``boto`` before, you should read the
+:doc:`Getting Started with Boto <getting_started>` guide to get familiar
+with ``boto`` & its usage.
+
Currently Supported Services
----------------------------
@@ -28,8 +35,10 @@ Currently Supported Services
* :doc:`SimpleDB <simpledb_tut>` -- (:doc:`API Reference <ref/sdb>`)
* :doc:`DynamoDB <dynamodb_tut>` -- (:doc:`API Reference <ref/dynamodb>`)
- * Relational Data Services (RDS) -- (:doc:`API Reference <ref/rds>`)
+ * DynamoDB2 -- (:doc:`API Reference <ref/dynamodb2>`)
+ * :doc:`Relational Data Services (RDS) <rds_tut>` -- (:doc:`API Reference <ref/rds>`)
* ElastiCache -- (:doc:`API Reference <ref/elasticache>`)
+ * Redshift -- (:doc:`API Reference <ref/redshift>`)
* **Deployment and Management**
@@ -97,6 +106,7 @@ Additional Resources
.. toctree::
:hidden:
+ getting_started
ec2_tut
security_groups
ref/ec2
@@ -111,6 +121,7 @@ Additional Resources
ref/sdb_db
dynamodb_tut
ref/dynamodb
+ rds_tut
ref/rds
ref/cloudformation
ref/iam
@@ -136,6 +147,12 @@ Additional Resources
boto_config_tut
ref/index
documentation
+ contributing
+ ref/datapipeline
+ ref/elasticache
+ ref/elastictranscoder
+ ref/redshift
+ ref/dynamodb2
Indices and tables
diff --git a/docs/source/rds_tut.rst b/docs/source/rds_tut.rst
new file mode 100644
index 00000000..6955cbe3
--- /dev/null
+++ b/docs/source/rds_tut.rst
@@ -0,0 +1,108 @@
+.. _rds_tut:
+
+=======================================
+An Introduction to boto's RDS interface
+=======================================
+
+This tutorial focuses on the boto interface to the Relational Database Service
+from Amazon Web Services. This tutorial assumes that you have boto already
+downloaded and installed, and that you wish to setup a MySQL instance in RDS.
+
+Creating a Connection
+---------------------
+The first step in accessing RDS is to create a connection to the service.
+The recommended method of doing this is as follows::
+
+ >>> import boto.rds
+ >>> conn = boto.rds.connect_to_region(
+ ... "us-west-2",
+ ... aws_access_key_id='<aws access key'>,
+ ... aws_secret_access_key='<aws secret key>')
+
+At this point the variable conn will point to an RDSConnection object in the
+US-WEST-2 region. Bear in mind that just as any other AWS service, RDS is
+region-specific. In this example, the AWS access key and AWS secret key are
+passed in to the method explicitely. Alternatively, you can set the environment
+variables:
+
+* ``AWS_ACCESS_KEY_ID`` - Your AWS Access Key ID
+* ``AWS_SECRET_ACCESS_KEY`` - Your AWS Secret Access Key
+
+and then simply call::
+
+ >>> import boto.rds
+ >>> conn = boto.rds.connect_to_region("us-west-2")
+
+In either case, conn will point to an RDSConnection object which we will
+use throughout the remainder of this tutorial.
+
+Starting an RDS Instance
+------------------------
+
+Creating a DB instance is easy. You can do so as follows::
+
+ >>> db = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
+
+This example would create a DB identified as ``db-master-1`` with 10GB of
+storage. This instance would be running on ``db.m1.small`` type, with the login
+name being ``root``, and the password ``hunter2``.
+
+To check on the status of your RDS instance, you will have to query the RDS connection again::
+
+ >>> instances = conn.get_all_dbinstances("db-master-1")
+ >>> instances
+ [DBInstance:db-master-1]
+ >>> db = instances[0]
+ >>> db.status
+ u'available'
+ >>> db.endpoint
+ (u'db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306)
+
+Creating a Security Group
+-------------------------
+
+Before you can actually connect to this RDS service, you must first
+create a security group. You can add a CIDR range or an :py:class:`EC2 security
+group <boto.ec2.securitygroup.SecurityGroup>` to your :py:class:`DB security
+group <boto.rds.dbsecuritygroup.DBSecurityGroup>` ::
+
+ >>> sg = conn.create_dbsecurity_group('web_servers', 'Web front-ends')
+ >>> sg.authorize(cidr_ip='10.3.2.45/32')
+ True
+
+You can then associate this security group with your RDS instance::
+
+ >>> db.modify(security_groups=[sg])
+
+
+Connecting to your New Database
+-------------------------------
+
+Once you have reached this step, you can connect to your RDS instance as you
+would with any other MySQL instance::
+
+ >>> db.endpoint
+ (u'db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306)
+
+ % mysql -h db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com -u root -phunter2
+ mysql>
+
+
+Making a backup
+---------------
+
+You can also create snapshots of your database very easily::
+
+ >>> db.snapshot('db-master-1-2013-02-05')
+ DBSnapshot:db-master-1-2013-02-05
+
+
+Once this snapshot is complete, you can create a new database instance from
+it::
+
+ >>> db2 = conn.restore_dbinstance_from_dbsnapshot(
+ ... 'db-master-1-2013-02-05',
+ ... 'db-restored-1',
+ ... 'db.m1.small',
+ ... 'us-west-2')
+
diff --git a/docs/source/ref/cloudsearch.rst b/docs/source/ref/cloudsearch.rst
index 14671ee5..1610200a 100644
--- a/docs/source/ref/cloudsearch.rst
+++ b/docs/source/ref/cloudsearch.rst
@@ -7,7 +7,7 @@ Cloudsearch
boto.cloudsearch
----------------
-.. automodule:: boto.swf
+.. automodule:: boto.cloudsearch
:members:
:undoc-members:
diff --git a/docs/source/ref/dynamodb2.rst b/docs/source/ref/dynamodb2.rst
new file mode 100644
index 00000000..cfd1b6a1
--- /dev/null
+++ b/docs/source/ref/dynamodb2.rst
@@ -0,0 +1,26 @@
+.. ref-dynamodb2
+
+=========
+DynamoDB2
+=========
+
+boto.dynamodb2
+--------------
+
+.. automodule:: boto.dynamodb2
+ :members:
+ :undoc-members:
+
+boto.dynamodb2.layer1
+---------------------
+
+.. automodule:: boto.dynamodb2.layer1
+ :members:
+ :undoc-members:
+
+boto.dynamodb2.exceptions
+-------------------------
+
+.. automodule:: boto.dynamodb2.exceptions
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/index.rst b/docs/source/ref/index.rst
index d01b0909..3def15d7 100644
--- a/docs/source/ref/index.rst
+++ b/docs/source/ref/index.rst
@@ -27,6 +27,7 @@ API Reference
mws
pyami
rds
+ redshift
route53
s3
sdb
diff --git a/docs/source/ref/redshift.rst b/docs/source/ref/redshift.rst
new file mode 100644
index 00000000..b3d84636
--- /dev/null
+++ b/docs/source/ref/redshift.rst
@@ -0,0 +1,26 @@
+.. _ref-redshift:
+
+========
+Redshift
+========
+
+boto.redshift
+-------------
+
+.. automodule:: boto.redshift
+ :members:
+ :undoc-members:
+
+boto.redshift.layer1
+--------------------
+
+.. automodule:: boto.redshift.layer1
+ :members:
+ :undoc-members:
+
+boto.redshift.exceptions
+------------------------
+
+.. automodule:: boto.redshift.exceptions
+ :members:
+ :undoc-members:
diff --git a/docs/source/s3_tut.rst b/docs/source/s3_tut.rst
index 47841256..fc75e108 100644
--- a/docs/source/s3_tut.rst
+++ b/docs/source/s3_tut.rst
@@ -28,10 +28,10 @@ and then call the constructor without any arguments, like this:
>>> conn = S3Connection()
There is also a shortcut function in the boto package, called connect_s3
-that may provide a slightly easier means of creating a connection:
+that may provide a slightly easier means of creating a connection::
->>> import boto
->>> conn = boto.connect_s3()
+ >>> import boto
+ >>> conn = boto.connect_s3()
In either case, conn will point to an S3Connection object which we will
use throughout the remainder of this tutorial.
@@ -44,14 +44,14 @@ create a bucket. A bucket is a container used to store key/value pairs
in S3. A bucket can hold an unlimited amount of data so you could potentially
have just one bucket in S3 for all of your information. Or, you could create
separate buckets for different types of data. You can figure all of that out
-later, first let's just create a bucket. That can be accomplished like this:
+later, first let's just create a bucket. That can be accomplished like this::
->>> bucket = conn.create_bucket('mybucket')
-Traceback (most recent call last):
- File "<stdin>", line 1, in ?
- File "boto/connection.py", line 285, in create_bucket
- raise S3CreateError(response.status, response.reason)
-boto.exception.S3CreateError: S3Error[409]: Conflict
+ >>> bucket = conn.create_bucket('mybucket')
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ File "boto/connection.py", line 285, in create_bucket
+ raise S3CreateError(response.status, response.reason)
+ boto.exception.S3CreateError: S3Error[409]: Conflict
Whoa. What happended there? Well, the thing you have to know about
buckets is that they are kind of like domain names. It's one flat name
@@ -72,21 +72,26 @@ Creating a Bucket In Another Location
The example above assumes that you want to create a bucket in the
standard US region. However, it is possible to create buckets in
other locations. To do so, first import the Location object from the
-boto.s3.connection module, like this:
-
->>> from boto.s3.connection import Location
->>> dir(Location)
-['DEFAULT', 'EU', 'USWest', 'APSoutheast', '__doc__', '__module__']
->>>
-
-As you can see, the Location object defines three possible locations;
-DEFAULT, EU, USWest, and APSoutheast. By default, the location is the
-empty string which is interpreted as the US Classic Region, the
-original S3 region. However, by specifying another location at the
-time the bucket is created, you can instruct S3 to create the bucket
-in that location. For example:
-
->>> conn.create_bucket('mybucket', location=Location.EU)
+boto.s3.connection module, like this::
+
+ >>> from boto.s3.connection import Location
+ >>> print '\n'.join(i for i in dir(Location) if i[0].isupper())
+ APNortheast
+ APSoutheast
+ APSoutheast2
+ DEFAULT
+ EU
+ SAEast
+ USWest
+ USWest2
+
+As you can see, the Location object defines a number of possible locations. By
+default, the location is the empty string which is interpreted as the US
+Classic Region, the original S3 region. However, by specifying another
+location at the time the bucket is created, you can instruct S3 to create the
+bucket in that location. For example::
+
+ >>> conn.create_bucket('mybucket', location=Location.EU)
will create the bucket in the EU region (assuming the name is available).
@@ -99,34 +104,36 @@ or what format you use to store it. All you need is a key that is unique
within your bucket.
The Key object is used in boto to keep track of data stored in S3. To store
-new data in S3, start by creating a new Key object:
+new data in S3, start by creating a new Key object::
->>> from boto.s3.key import Key
->>> k = Key(bucket)
->>> k.key = 'foobar'
->>> k.set_contents_from_string('This is a test of S3')
+ >>> from boto.s3.key import Key
+ >>> k = Key(bucket)
+ >>> k.key = 'foobar'
+ >>> k.set_contents_from_string('This is a test of S3')
The net effect of these statements is to create a new object in S3 with a
key of "foobar" and a value of "This is a test of S3". To validate that
-this worked, quit out of the interpreter and start it up again. Then:
+this worked, quit out of the interpreter and start it up again. Then::
->>> import boto
->>> c = boto.connect_s3()
->>> b = c.create_bucket('mybucket') # substitute your bucket name here
->>> from boto.s3.key import Key
->>> k = Key(b)
->>> k.key = 'foobar'
->>> k.get_contents_as_string()
-'This is a test of S3'
+ >>> import boto
+ >>> c = boto.connect_s3()
+ >>> b = c.create_bucket('mybucket') # substitute your bucket name here
+ >>> from boto.s3.key import Key
+ >>> k = Key(b)
+ >>> k.key = 'foobar'
+ >>> k.get_contents_as_string()
+ 'This is a test of S3'
So, we can definitely store and retrieve strings. A more interesting
example may be to store the contents of a local file in S3 and then retrieve
the contents to another local file.
->>> k = Key(b)
->>> k.key = 'myfile'
->>> k.set_contents_from_filename('foo.jpg')
->>> k.get_contents_to_filename('bar.jpg')
+::
+
+ >>> k = Key(b)
+ >>> k.key = 'myfile'
+ >>> k.set_contents_from_filename('foo.jpg')
+ >>> k.get_contents_to_filename('bar.jpg')
There are a couple of things to note about this. When you send data to
S3 from a file or filename, boto will attempt to determine the correct
@@ -136,24 +143,77 @@ guessing. The other thing to note is that boto does stream the content
to and from S3 so you should be able to send and receive large files without
any problem.
+Accessing A Bucket
+------------------
+
+Once a bucket exists, you can access it by getting the bucket. For example::
+
+ >>> mybucket = conn.get_bucket('mybucket') # Substitute in your bucket name
+ >>> mybucket.list()
+ <listing of keys in the bucket)
+
+By default, this method tries to validate the bucket's existence. You can
+override this behavior by passing ``validate=False``.::
+
+ >>> nonexistent = conn.get_bucket('i-dont-exist-at-all', validate=False)
+
+If the bucket does not exist, a ``S3ResponseError`` will commonly be thrown. If
+you'd rather not deal with any exceptions, you can use the ``lookup`` method.::
+
+ >>> nonexistent = conn.lookup('i-dont-exist-at-all')
+ >>> if nonexistent is None:
+ ... print "No such bucket!"
+ ...
+ No such bucket!
+
+Deleting A Bucket
+-----------------
+
+Removing a bucket can be done using the ``delete_bucket`` method. For example::
+
+ >>> conn.delete_bucket('mybucket') # Substitute in your bucket name
+
+The bucket must be empty of keys or this call will fail & an exception will be
+raised. You can remove a non-empty bucket by doing something like::
+
+ >>> full_bucket = conn.get_bucket('bucket-to-delete')
+ # It's full of keys. Delete them all.
+ >>> for key in full_bucket.list():
+ ... key.delete()
+ ...
+ # The bucket is empty now. Delete it.
+ >>> conn.delete_bucket('bucket-to-delete')
+
+.. warning::
+
+ This method can cause data loss! Be very careful when using it.
+
+ Additionally, be aware that using the above method for removing all keys
+ and deleting the bucket involves a request for each key. As such, it's not
+ particularly fast & is very chatty.
+
Listing All Available Buckets
-----------------------------
In addition to accessing specific buckets via the create_bucket method
you can also get a list of all available buckets that you have created.
->>> rs = conn.get_all_buckets()
+::
+
+ >>> rs = conn.get_all_buckets()
This returns a ResultSet object (see the SQS Tutorial for more info on
ResultSet objects). The ResultSet can be used as a sequence or list type
object to retrieve Bucket objects.
->>> len(rs)
-11
->>> for b in rs:
-... print b.name
-...
-<listing of available buckets>
->>> b = rs[0]
+::
+
+ >>> len(rs)
+ 11
+ >>> for b in rs:
+ ... print b.name
+ ...
+ <listing of available buckets>
+ >>> b = rs[0]
Setting / Getting the Access Control List for Buckets and Keys
--------------------------------------------------------------
@@ -195,17 +255,19 @@ You can also retrieve the current ACL for a Bucket or Key object using the
get_acl object. This method parses the AccessControlPolicy response sent
by S3 and creates a set of Python objects that represent the ACL.
->>> acp = b.get_acl()
->>> acp
-<boto.acl.Policy instance at 0x2e6940>
->>> acp.acl
-<boto.acl.ACL instance at 0x2e69e0>
->>> acp.acl.grants
-[<boto.acl.Grant instance at 0x2e6a08>]
->>> for grant in acp.acl.grants:
-... print grant.permission, grant.display_name, grant.email_address, grant.id
-...
-FULL_CONTROL <boto.user.User instance at 0x2e6a30>
+::
+
+ >>> acp = b.get_acl()
+ >>> acp
+ <boto.acl.Policy instance at 0x2e6940>
+ >>> acp.acl
+ <boto.acl.ACL instance at 0x2e69e0>
+ >>> acp.acl.grants
+ [<boto.acl.Grant instance at 0x2e6a08>]
+ >>> for grant in acp.acl.grants:
+ ... print grant.permission, grant.display_name, grant.email_address, grant.id
+ ...
+ FULL_CONTROL <boto.user.User instance at 0x2e6a30>
The Python objects representing the ACL can be found in the acl.py module
of boto.
@@ -213,10 +275,10 @@ of boto.
Both the Bucket object and the Key object also provide shortcut
methods to simplify the process of granting individuals specific
access. For example, if you want to grant an individual user READ
-access to a particular object in S3 you could do the following:
+access to a particular object in S3 you could do the following::
->>> key = b.lookup('mykeytoshare')
->>> key.add_email_grant('READ', 'foo@bar.com')
+ >>> key = b.lookup('mykeytoshare')
+ >>> key.add_email_grant('READ', 'foo@bar.com')
The email address provided should be the one associated with the users
AWS account. There is a similar method called add_user_grant that accepts the
@@ -227,23 +289,23 @@ Setting/Getting Metadata Values on Key Objects
S3 allows arbitrary user metadata to be assigned to objects within a bucket.
To take advantage of this S3 feature, you should use the set_metadata and
get_metadata methods of the Key object to set and retrieve metadata associated
-with an S3 object. For example:
+with an S3 object. For example::
->>> k = Key(b)
->>> k.key = 'has_metadata'
->>> k.set_metadata('meta1', 'This is the first metadata value')
->>> k.set_metadata('meta2', 'This is the second metadata value')
->>> k.set_contents_from_filename('foo.txt')
+ >>> k = Key(b)
+ >>> k.key = 'has_metadata'
+ >>> k.set_metadata('meta1', 'This is the first metadata value')
+ >>> k.set_metadata('meta2', 'This is the second metadata value')
+ >>> k.set_contents_from_filename('foo.txt')
This code associates two metadata key/value pairs with the Key k. To retrieve
-those values later:
+those values later::
->>> k = b.get_key('has_metadata')
->>> k.get_metadata('meta1')
-'This is the first metadata value'
->>> k.get_metadata('meta2')
-'This is the second metadata value'
->>>
+ >>> k = b.get_key('has_metadata')
+ >>> k.get_metadata('meta1')
+ 'This is the first metadata value'
+ >>> k.get_metadata('meta2')
+ 'This is the second metadata value'
+ >>>
Setting/Getting/Deleting CORS Configuration on a Bucket
-------------------------------------------------------
@@ -254,12 +316,12 @@ in a different domain. With CORS support in Amazon S3, you can build
rich client-side web applications with Amazon S3 and selectively allow
cross-origin access to your Amazon S3 resources.
-To create a CORS configuration and associate it with a bucket:
+To create a CORS configuration and associate it with a bucket::
->>> from boto.s3.cors import CORSConfiguration
->>> cors_cfg = CORSConfiguration()
->>> cors_cfg.add_rule(['PUT', 'POST', 'DELETE'], 'https://www.example.com', allowed_header='*', max_age_seconds=3000, expose_header='x-amz-server-side-encryption')
->>> cors_cfg.add_rule('GET', '*')
+ >>> from boto.s3.cors import CORSConfiguration
+ >>> cors_cfg = CORSConfiguration()
+ >>> cors_cfg.add_rule(['PUT', 'POST', 'DELETE'], 'https://www.example.com', allowed_header='*', max_age_seconds=3000, expose_header='x-amz-server-side-encryption')
+ >>> cors_cfg.add_rule('GET', '*')
The above code creates a CORS configuration object with two rules.
@@ -270,20 +332,20 @@ The above code creates a CORS configuration object with two rules.
return any requested headers.
* The second rule allows cross-origin GET requests from all origins.
-To associate this configuration with a bucket:
+To associate this configuration with a bucket::
->>> import boto
->>> c = boto.connect_s3()
->>> bucket = c.lookup('mybucket')
->>> bucket.set_cors(cors_cfg)
+ >>> import boto
+ >>> c = boto.connect_s3()
+ >>> bucket = c.lookup('mybucket')
+ >>> bucket.set_cors(cors_cfg)
-To retrieve the CORS configuration associated with a bucket:
+To retrieve the CORS configuration associated with a bucket::
->>> cors_cfg = bucket.get_cors()
+ >>> cors_cfg = bucket.get_cors()
-And, finally, to delete all CORS configurations from a bucket:
+And, finally, to delete all CORS configurations from a bucket::
->>> bucket.delete_cors()
+ >>> bucket.delete_cors()
Transitioning Objects to Glacier
--------------------------------
@@ -298,48 +360,50 @@ configurations are assigned to buckets and require these parameters:
* The date (or time period) when you want S3 to perform these actions.
For example, given a bucket ``s3-glacier-boto-demo``, we can first retrieve the
-bucket:
+bucket::
->>> import boto
->>> c = boto.connect_s3()
->>> bucket = c.get_bucket('s3-glacier-boto-demo')
+ >>> import boto
+ >>> c = boto.connect_s3()
+ >>> bucket = c.get_bucket('s3-glacier-boto-demo')
Then we can create a lifecycle object. In our example, we want all objects
under ``logs/*`` to transition to Glacier 30 days after the object is created.
->>> from boto.s3.lifecycle import Lifecycle, Transition, Rule
->>> to_glacier = Transition(days=30, storage_class='GLACIER')
->>> rule = Rule('ruleid', 'logs/', 'Enabled', transition=to_glacier)
->>> lifecycle = Lifecycle()
->>> lifecycle.append(rule)
+::
+
+ >>> from boto.s3.lifecycle import Lifecycle, Transition, Rule
+ >>> to_glacier = Transition(days=30, storage_class='GLACIER')
+ >>> rule = Rule('ruleid', 'logs/', 'Enabled', transition=to_glacier)
+ >>> lifecycle = Lifecycle()
+ >>> lifecycle.append(rule)
.. note::
For API docs for the lifecycle objects, see :py:mod:`boto.s3.lifecycle`
-We can now configure the bucket with this lifecycle policy:
+We can now configure the bucket with this lifecycle policy::
->>> bucket.configure_lifecycle(lifecycle)
+ >>> bucket.configure_lifecycle(lifecycle)
True
-You can also retrieve the current lifecycle policy for the bucket:
+You can also retrieve the current lifecycle policy for the bucket::
->>> current = bucket.get_lifecycle_config()
->>> print current[0].transition
-<Transition: in: 30 days, GLACIER>
+ >>> current = bucket.get_lifecycle_config()
+ >>> print current[0].transition
+ <Transition: in: 30 days, GLACIER>
When an object transitions to Glacier, the storage class will be
-updated. This can be seen when you **list** the objects in a bucket:
+updated. This can be seen when you **list** the objects in a bucket::
->>> for key in bucket.list():
-... print key, key.storage_class
-...
-<Key: s3-glacier-boto-demo,logs/testlog1.log> GLACIER
+ >>> for key in bucket.list():
+ ... print key, key.storage_class
+ ...
+ <Key: s3-glacier-boto-demo,logs/testlog1.log> GLACIER
-You can also use the prefix argument to the ``bucket.list`` method:
+You can also use the prefix argument to the ``bucket.list`` method::
->>> print list(b.list(prefix='logs/testlog1.log'))[0].storage_class
-u'GLACIER'
+ >>> print list(b.list(prefix='logs/testlog1.log'))[0].storage_class
+ u'GLACIER'
Restoring Objects from Glacier
@@ -351,34 +415,36 @@ method of the key object.
The ``restore`` method takes an integer that specifies the number of days
to keep the object in S3.
->>> import boto
->>> c = boto.connect_s3()
->>> bucket = c.get_bucket('s3-glacier-boto-demo')
->>> key = bucket.get_key('logs/testlog1.log')
->>> key.restore(days=5)
+::
+
+ >>> import boto
+ >>> c = boto.connect_s3()
+ >>> bucket = c.get_bucket('s3-glacier-boto-demo')
+ >>> key = bucket.get_key('logs/testlog1.log')
+ >>> key.restore(days=5)
It takes about 4 hours for a restore operation to make a copy of the archive
available for you to access. While the object is being restored, the
-``ongoing_restore`` attribute will be set to ``True``:
+``ongoing_restore`` attribute will be set to ``True``::
->>> key = bucket.get_key('logs/testlog1.log')
->>> print key.ongoing_restore
-True
+ >>> key = bucket.get_key('logs/testlog1.log')
+ >>> print key.ongoing_restore
+ True
When the restore is finished, this value will be ``False`` and the expiry
-date of the object will be non ``None``:
+date of the object will be non ``None``::
->>> key = bucket.get_key('logs/testlog1.log')
->>> print key.ongoing_restore
-False
->>> print key.expiry_date
-"Fri, 21 Dec 2012 00:00:00 GMT"
+ >>> key = bucket.get_key('logs/testlog1.log')
+ >>> print key.ongoing_restore
+ False
+ >>> print key.expiry_date
+ "Fri, 21 Dec 2012 00:00:00 GMT"
.. note:: If there is no restore operation either in progress or completed,
the ``ongoing_restore`` attribute will be ``None``.
-Once the object is restored you can then download the contents:
+Once the object is restored you can then download the contents::
->>> key.get_contents_to_filename('testlog1.log')
+ >>> key.get_contents_to_filename('testlog1.log')
diff --git a/docs/source/ses_tut.rst b/docs/source/ses_tut.rst
index c71e8868..d19a4e36 100644
--- a/docs/source/ses_tut.rst
+++ b/docs/source/ses_tut.rst
@@ -15,18 +15,19 @@ Creating a Connection
The first step in accessing SES is to create a connection to the service.
To do so, the most straight forward way is the following::
- >>> import boto
- >>> conn = boto.connect_ses(
+ >>> import boto.ses
+ >>> conn = boto.ses.connect_to_region(
+ 'us-west-2',
aws_access_key_id='<YOUR_AWS_KEY_ID>',
aws_secret_access_key='<YOUR_AWS_SECRET_KEY>')
>>> conn
- SESConnection:email.us-east-1.amazonaws.com
+ SESConnection:email.us-west-2.amazonaws.com
Bear in mind that if you have your credentials in boto config in your home
directory, the two keyword arguments in the call above are not needed. More
details on configuration can be fond in :doc:`boto_config_tut`.
-The :py:func:`boto.connect_ses` functions returns a
+The :py:func:`boto.ses.connect_to_region` functions returns a
:py:class:`boto.ses.connection.SESConnection` instance, which is a the boto API
for working with SES.
@@ -168,4 +169,4 @@ where we'll just show a short excerpt here::
]
}
}
- } \ No newline at end of file
+ }
diff --git a/docs/source/simpledb_tut.rst b/docs/source/simpledb_tut.rst
index 39607260..98cabfe0 100644
--- a/docs/source/simpledb_tut.rst
+++ b/docs/source/simpledb_tut.rst
@@ -13,8 +13,11 @@ Creating a Connection
The first step in accessing SimpleDB is to create a connection to the service.
To do so, the most straight forward way is the following::
- >>> import boto
- >>> conn = boto.connect_sdb(aws_access_key_id='<YOUR_AWS_KEY_ID>',aws_secret_access_key='<YOUR_AWS_SECRET_KEY>')
+ >>> import boto.sdb
+ >>> conn = boto.sdb.connect_to_region(
+ ... 'us-west-2',
+ ... aws_access_key_id='<YOUR_AWS_KEY_ID>',
+ ... aws_secret_access_key='<YOUR_AWS_SECRET_KEY>')
>>> conn
SDBConnection:sdb.amazonaws.com
>>>
diff --git a/docs/source/sqs_tut.rst b/docs/source/sqs_tut.rst
index 9445de26..d4d69c98 100644
--- a/docs/source/sqs_tut.rst
+++ b/docs/source/sqs_tut.rst
@@ -15,12 +15,12 @@ The recommended method of doing this is as follows::
>>> import boto.sqs
>>> conn = boto.sqs.connect_to_region(
- ... "us-east-1",
+ ... "us-west-2",
... aws_access_key_id='<aws access key'>,
... aws_secret_access_key='<aws secret key>')
At this point the variable conn will point to an SQSConnection object in the
-US-EAST-1 region. Bear in mind that just as any other AWS service, SQS is
+US-WEST-2 region. Bear in mind that just as any other AWS service, SQS is
region-specific. In this example, the AWS access key and AWS secret key are
passed in to the method explicitely. Alternatively, you can set the environment
variables:
@@ -31,7 +31,7 @@ variables:
and then simply call::
>>> import boto.sqs
- >>> conn = boto.sqs.connect_to_region("us-east-1")
+ >>> conn = boto.sqs.connect_to_region("us-west-2")
In either case, conn will point to an SQSConnection object which we will
use throughout the remainder of this tutorial.
@@ -217,7 +217,7 @@ If I want to delete the entire queue, I would use:
>>> conn.delete_queue(q)
-However, and this is a good safe guard, this won't succeed unless the queue is empty.
+This will delete the queue, even if there are still messages within the queue.
Additional Information
----------------------
diff --git a/docs/source/vpc_tut.rst b/docs/source/vpc_tut.rst
index ce26ead0..1244c4e1 100644
--- a/docs/source/vpc_tut.rst
+++ b/docs/source/vpc_tut.rst
@@ -97,4 +97,13 @@ Releasing an Elastic IP Attached to a VPC Instance
--------------------------------------------------
>>> ec2.connection.release_address(None, 'eipalloc-35cf685d')
->>> \ No newline at end of file
+>>>
+
+To Get All VPN Connections
+--------------------------
+>>> vpns = c.get_all_vpn_connections()
+>>> vpns[0].id
+u'vpn-12ef67bv'
+>>> tunnels = vpns[0].tunnels
+>>> tunnels
+[VpnTunnel: 177.12.34.56, VpnTunnel: 177.12.34.57]
diff --git a/requirements.txt b/requirements.txt
index d96dae7c..4d6572c0 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,9 +1,12 @@
mock==1.0.1
-nose==1.1.2
-requests==0.13.1
+nose==1.2.1
+# If you upgrade to ``requests>=1.2.1``, please update
+# ``boto/cloudsearch/document.py``.
+requests>=1.1.0
rsa==3.1.1
tox==1.4
Sphinx==1.1.3
simplejson==2.5.2
argparse==1.2.1
unittest2==0.5.1
+httpretty==0.5.5
diff --git a/setup.py b/setup.py
index 96bc48d2..c4b13600 100644
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,8 @@ setup(name = "boto",
"bin/list_instances", "bin/taskadmin", "bin/kill_instance",
"bin/bundle_image", "bin/pyami_sendmail", "bin/lss3",
"bin/cq", "bin/route53", "bin/cwutil", "bin/instance_events",
- "bin/asadmin", "bin/glacier", "bin/mturk"],
+ "bin/asadmin", "bin/glacier", "bin/mturk",
+ "bin/dynamodb_dump", "bin/dynamodb_load"],
url = "https://github.com/boto/boto/",
packages = ["boto", "boto.sqs", "boto.s3", "boto.gs", "boto.file",
"boto.ec2", "boto.ec2.cloudwatch", "boto.ec2.autoscale",
@@ -72,7 +73,8 @@ setup(name = "boto",
"boto.cloudformation", "boto.sts", "boto.dynamodb",
"boto.swf", "boto.mws", "boto.cloudsearch", "boto.glacier",
"boto.beanstalk", "boto.datapipeline", "boto.elasticache",
- "boto.elastictranscoder"],
+ "boto.elastictranscoder", "boto.opsworks", "boto.redshift",
+ "boto.dynamodb2"],
package_data = {"boto.cacerts": ["cacerts.txt"]},
license = "MIT",
platforms = "Posix; MacOS X; Windows",
diff --git a/tests/integration/dynamodb2/__init__.py b/tests/integration/dynamodb2/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integration/dynamodb2/__init__.py
diff --git a/tests/integration/dynamodb2/test_cert_verification.py b/tests/integration/dynamodb2/test_cert_verification.py
new file mode 100644
index 00000000..3901c57e
--- /dev/null
+++ b/tests/integration/dynamodb2/test_cert_verification.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Check that all of the certs on all service endpoints validate.
+"""
+
+import unittest
+import boto.dynamodb2
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ dynamodb2 = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.dynamodb2.regions():
+ c = region.connect()
+ c.list_tables()
diff --git a/tests/integration/dynamodb2/test_layer1.py b/tests/integration/dynamodb2/test_layer1.py
new file mode 100644
index 00000000..d85d8a87
--- /dev/null
+++ b/tests/integration/dynamodb2/test_layer1.py
@@ -0,0 +1,244 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Tests for Layer1 of DynamoDB v2
+"""
+import time
+
+from tests.unit import unittest
+from boto.dynamodb2 import exceptions
+from boto.dynamodb2.layer1 import DynamoDBConnection
+
+
+class DynamoDBv2Layer1Test(unittest.TestCase):
+ dynamodb = True
+
+ def setUp(self):
+ self.dynamodb = DynamoDBConnection()
+ self.table_name = 'test-%d' % int(time.time())
+ self.hash_key_name = 'username'
+ self.hash_key_type = 'S'
+ self.range_key_name = 'date_joined'
+ self.range_key_type = 'N'
+ self.read_units = 5
+ self.write_units = 5
+ self.attributes = [
+ {
+ 'AttributeName': self.hash_key_name,
+ 'AttributeType': self.hash_key_type,
+ },
+ {
+ 'AttributeName': self.range_key_name,
+ 'AttributeType': self.range_key_type,
+ }
+ ]
+ self.schema = [
+ {
+ 'AttributeName': self.hash_key_name,
+ 'KeyType': 'HASH',
+ },
+ {
+ 'AttributeName': self.range_key_name,
+ 'KeyType': 'RANGE',
+ },
+ ]
+ self.provisioned_throughput = {
+ 'ReadCapacityUnits': self.read_units,
+ 'WriteCapacityUnits': self.write_units,
+ }
+ self.lsi = [
+ {
+ 'IndexName': 'MostRecentIndex',
+ 'KeySchema': [
+ {
+ 'AttributeName': self.hash_key_name,
+ 'KeyType': 'HASH',
+ },
+ {
+ 'AttributeName': self.range_key_name,
+ 'KeyType': 'RANGE',
+ },
+ ],
+ 'Projection': {
+ 'ProjectionType': 'KEYS_ONLY',
+ }
+ }
+ ]
+
+ def create_table(self, table_name, attributes, schema,
+ provisioned_throughput, lsi=None, wait=True):
+ # Note: This is a slightly different ordering that makes less sense.
+ result = self.dynamodb.create_table(
+ attributes,
+ table_name,
+ schema,
+ provisioned_throughput,
+ local_secondary_indexes=lsi
+ )
+ self.addCleanup(self.dynamodb.delete_table, table_name)
+ if wait:
+ while True:
+ description = self.dynamodb.describe_table(table_name)
+ if description['Table']['TableStatus'].lower() == 'active':
+ return result
+ else:
+ time.sleep(5)
+ else:
+ return result
+
+ def test_integrated(self):
+ result = self.create_table(
+ self.table_name,
+ self.attributes,
+ self.schema,
+ self.provisioned_throughput,
+ self.lsi
+ )
+ self.assertEqual(result['TableDescription']['TableName'], self.table_name)
+
+ description = self.dynamodb.describe_table(self.table_name)
+ self.assertEqual(description['Table']['ItemCount'], 0)
+
+ # Create some records.
+ record_1_data = {
+ 'username': {'S': 'johndoe'},
+ 'first_name': {'S': 'John'},
+ 'last_name': {'S': 'Doe'},
+ 'date_joined': {'N': '1366056668'},
+ 'friend_count': {'N': '3'},
+ 'friends': {'SS': ['alice', 'bob', 'jane']},
+ }
+ r1_result = self.dynamodb.put_item(self.table_name, record_1_data)
+
+ # Get the data.
+ record_1 = self.dynamodb.get_item(self.table_name, key={
+ 'username': {'S': 'johndoe'},
+ 'date_joined': {'N': '1366056668'},
+ }, consistent_read=True)
+ self.assertEqual(record_1['Item']['username']['S'], 'johndoe')
+ self.assertEqual(record_1['Item']['first_name']['S'], 'John')
+ self.assertEqual(record_1['Item']['friends']['SS'], ['alice', 'bob', 'jane'])
+
+ # Now in a batch.
+ self.dynamodb.batch_write_item({
+ self.table_name: [
+ {
+ 'PutRequest': {
+ 'Item': {
+ 'username': {'S': 'jane'},
+ 'first_name': {'S': 'Jane'},
+ 'last_name': {'S': 'Doe'},
+ 'date_joined': {'N': '1366056789'},
+ 'friend_count': {'N': '1'},
+ 'friends': {'SS': ['johndoe']},
+ },
+ },
+ },
+ ]
+ })
+
+ # Now a query.
+ lsi_results = self.dynamodb.query(
+ self.table_name,
+ index_name='MostRecentIndex',
+ key_conditions={
+ 'username': {
+ 'AttributeValueList': [
+ {'S': 'johndoe'},
+ ],
+ 'ComparisonOperator': 'EQ',
+ },
+ },
+ consistent_read=True
+ )
+ self.assertEqual(lsi_results['Count'], 1)
+
+ results = self.dynamodb.query(self.table_name, key_conditions={
+ 'username': {
+ 'AttributeValueList': [
+ {'S': 'jane'},
+ ],
+ 'ComparisonOperator': 'EQ',
+ },
+ 'date_joined': {
+ 'AttributeValueList': [
+ {'N': '1366050000'}
+ ],
+ 'ComparisonOperator': 'GT',
+ }
+ }, consistent_read=True)
+ self.assertEqual(results['Count'], 1)
+
+ # Now a scan.
+ results = self.dynamodb.scan(self.table_name)
+ self.assertEqual(results['Count'], 2)
+ self.assertEqual(sorted([res['username']['S'] for res in results['Items']]), ['jane', 'johndoe'])
+
+ self.dynamodb.delete_item(self.table_name, key={
+ 'username': {'S': 'johndoe'},
+ 'date_joined': {'N': '1366056668'},
+ })
+
+ results = self.dynamodb.scan(self.table_name)
+ self.assertEqual(results['Count'], 1)
+
+ def test_without_range_key(self):
+ result = self.create_table(
+ self.table_name,
+ [
+ {
+ 'AttributeName': self.hash_key_name,
+ 'AttributeType': self.hash_key_type,
+ },
+ ],
+ [
+ {
+ 'AttributeName': self.hash_key_name,
+ 'KeyType': 'HASH',
+ },
+ ],
+ self.provisioned_throughput
+ )
+ self.assertEqual(result['TableDescription']['TableName'], self.table_name)
+
+ description = self.dynamodb.describe_table(self.table_name)
+ self.assertEqual(description['Table']['ItemCount'], 0)
+
+ # Create some records.
+ record_1_data = {
+ 'username': {'S': 'johndoe'},
+ 'first_name': {'S': 'John'},
+ 'last_name': {'S': 'Doe'},
+ 'date_joined': {'N': '1366056668'},
+ 'friend_count': {'N': '3'},
+ 'friends': {'SS': ['alice', 'bob', 'jane']},
+ }
+ r1_result = self.dynamodb.put_item(self.table_name, record_1_data)
+
+ # Now try a range-less get.
+ johndoe = self.dynamodb.get_item(self.table_name, key={
+ 'username': {'S': 'johndoe'},
+ }, consistent_read=True)
+ self.assertEqual(johndoe['Item']['username']['S'], 'johndoe')
+ self.assertEqual(johndoe['Item']['first_name']['S'], 'John')
+ self.assertEqual(johndoe['Item']['friends']['SS'], ['alice', 'bob', 'jane'])
diff --git a/tests/integration/ec2/elb/test_connection.py b/tests/integration/ec2/elb/test_connection.py
index 2d574d98..618d0ce9 100644
--- a/tests/integration/ec2/elb/test_connection.py
+++ b/tests/integration/ec2/elb/test_connection.py
@@ -30,15 +30,23 @@ from boto.ec2.elb import ELBConnection
class ELBConnectionTest(unittest.TestCase):
ec2 = True
+ def setup(self):
+ """Creates a named load balancer that can be safely
+ deleted at the end of each test"""
+ self.conn = ELBConnection()
+ self.name = 'elb-boto-unit-test'
+ self.availability_zones = ['us-east-1a']
+ self.listeners = [(80, 8000, 'HTTP')]
+ self.balancer = self.conn.create_load_balancer(name, availability_zones, listeners)
+
def tearDown(self):
- """ Deletes all load balancers after every test. """
- for lb in ELBConnection().get_all_load_balancers():
- lb.delete()
+ """ Deletes the test load balancer after every test.
+ It does not delete EVERY load balancer in your account"""
+ self.balancer.delete()
def test_build_list_params(self):
- c = ELBConnection()
params = {}
- c.build_list_params(
+ self.conn.build_list_params(
params, ['thing1', 'thing2', 'thing3'], 'ThingName%d')
expected_params = {
'ThingName1': 'thing1',
@@ -52,76 +60,60 @@ class ELBConnectionTest(unittest.TestCase):
# balancer.dns_name, along the lines of the existing EC2 unit tests.
def test_create_load_balancer(self):
- c = ELBConnection()
- name = 'elb-boto-unit-test'
- availability_zones = ['us-east-1a']
- listeners = [(80, 8000, 'HTTP')]
- balancer = c.create_load_balancer(name, availability_zones, listeners)
- self.assertEqual(balancer.name, name)
- self.assertEqual(balancer.availability_zones, availability_zones)
- self.assertEqual(balancer.listeners, listeners)
-
- balancers = c.get_all_load_balancers()
- self.assertEqual([lb.name for lb in balancers], [name])
+ self.assertEqual(self.balancer.name, self.name)
+ self.assertEqual(self.balancer.availability_zones,\
+ self.availability_zones)
+ self.assertEqual(self.balancer.listeners, self.listeners)
- def test_create_load_balancer_listeners(self):
- c = ELBConnection()
- name = 'elb-boto-unit-test'
- availability_zones = ['us-east-1a']
- listeners = [(80, 8000, 'HTTP')]
- balancer = c.create_load_balancer(name, availability_zones, listeners)
+ balancers = self.conn.get_all_load_balancers()
+ self.assertEqual([lb.name for lb in balancers], [self.name])
+ def test_create_load_balancer_listeners(self):
more_listeners = [(443, 8001, 'HTTP')]
- c.create_load_balancer_listeners(name, more_listeners)
- balancers = c.get_all_load_balancers()
- self.assertEqual([lb.name for lb in balancers], [name])
+ self.conn.create_load_balancer_listeners(self.name, more_listeners)
+ balancers = self.conn.get_all_load_balancers()
+ self.assertEqual([lb.name for lb in balancers], [self.name])
self.assertEqual(
sorted(l.get_tuple() for l in balancers[0].listeners),
- sorted(listeners + more_listeners)
+ sorted(self.listeners + more_listeners)
)
def test_delete_load_balancer_listeners(self):
- c = ELBConnection()
- name = 'elb-boto-unit-test'
- availability_zones = ['us-east-1a']
- listeners = [(80, 8000, 'HTTP'), (443, 8001, 'HTTP')]
- balancer = c.create_load_balancer(name, availability_zones, listeners)
-
- balancers = c.get_all_load_balancers()
- self.assertEqual([lb.name for lb in balancers], [name])
+ mod_listeners = [(80, 8000, 'HTTP'), (443, 8001, 'HTTP')]
+ mod_name = self.name + "_mod"
+ self.mod_balancer = self.conn.create_load_balancer(mod_name,\
+ self.availability_zones, mod_listeners)
+
+ mod_balancers = self.conn.get_all_load_balancers(load_balancer_names=[mod_name])
+ self.assertEqual([lb.name for lb in mod_balancers], [mod_name])
self.assertEqual(
- sorted([l.get_tuple() for l in balancers[0].listeners]),
- sorted(listeners))
+ sorted([l.get_tuple() for l in mod_balancers[0].listeners]),
+ sorted(mod_listeners))
- c.delete_load_balancer_listeners(name, [443])
- balancers = c.get_all_load_balancers()
- self.assertEqual([lb.name for lb in balancers], [name])
- self.assertEqual([l.get_tuple() for l in balancers[0].listeners],
- listeners[:1])
+ self.conn.delete_load_balancer_listeners(self.mod_balancer.name, [443])
+ mod_balancers = self.conn.get_all_load_balancers(load_balancer_names=[mod_name])
+ self.assertEqual([lb.name for lb in mod_balancers], [mod_name])
+ self.assertEqual([l.get_tuple() for l in mod_balancers[0].listeners],
+ mod_listeners[:1])
+ self.mod_balancer.delete()
def test_create_load_balancer_listeners_with_policies(self):
- c = ELBConnection()
- name = 'elb-boto-unit-test-policy'
- availability_zones = ['us-east-1a']
- listeners = [(80, 8000, 'HTTP')]
- balancer = c.create_load_balancer(name, availability_zones, listeners)
-
more_listeners = [(443, 8001, 'HTTP')]
- c.create_load_balancer_listeners(name, more_listeners)
+ self.conn.create_load_balancer_listeners(self.name, more_listeners)
lb_policy_name = 'lb-policy'
- c.create_lb_cookie_stickiness_policy(1000, name, lb_policy_name)
- c.set_lb_policies_of_listener(name, listeners[0][0], lb_policy_name)
+ self.conn.create_lb_cookie_stickiness_policy(1000, self.name, lb_policy_name)
+ self.conn.set_lb_policies_of_listener(self.name, self.listeners[0][0], lb_policy_name)
app_policy_name = 'app-policy'
- c.create_app_cookie_stickiness_policy('appcookie', name, app_policy_name)
- c.set_lb_policies_of_listener(name, more_listeners[0][0], app_policy_name)
+ self.conn.create_app_cookie_stickiness_policy('appcookie', self.name, app_policy_name)
+ self.conn.set_lb_policies_of_listener(self.name, more_listeners[0][0], app_policy_name)
- balancers = c.get_all_load_balancers()
- self.assertEqual([lb.name for lb in balancers], [name])
+ balancers = self.conn.get_all_load_balancers(load_balancer_names=[self.name])
+ self.assertEqual([lb.name for lb in balancers], [self.name])
self.assertEqual(
sorted(l.get_tuple() for l in balancers[0].listeners),
- sorted(listeners + more_listeners)
+ sorted(self.listeners + more_listeners)
)
# Policy names should be checked here once they are supported
# in the Listener object.
diff --git a/tests/integration/elastictranscoder/test_layer1.py b/tests/integration/elastictranscoder/test_layer1.py
index ac82297c..fa2f840c 100644
--- a/tests/integration/elastictranscoder/test_layer1.py
+++ b/tests/integration/elastictranscoder/test_layer1.py
@@ -105,6 +105,11 @@ class TestETSLayer1PipelineManagement(unittest.TestCase):
self.assertEqual(response['Pipeline']['Notifications']['Error'],
topic_arn)
+ def test_list_jobs_by_pipeline(self):
+ pipeline_id = self.create_pipeline()
+ response = self.api.list_jobs_by_pipeline(pipeline_id)
+ self.assertEqual(response['Jobs'], [])
+
def test_proper_error_when_pipeline_does_not_exist(self):
with self.assertRaises(ValidationException):
self.api.read_pipeline('badpipelineid')
diff --git a/tests/integration/gs/test_basic.py b/tests/integration/gs/test_basic.py
index d425f306..9ac60b91 100644
--- a/tests/integration/gs/test_basic.py
+++ b/tests/integration/gs/test_basic.py
@@ -25,7 +25,7 @@
# IN THE SOFTWARE.
"""
-Some unit tests for the GSConnection
+Some integration tests for the GSConnection
"""
import os
diff --git a/tests/integration/gs/test_generation_conditionals.py b/tests/integration/gs/test_generation_conditionals.py
index 052186c2..a35c466c 100644
--- a/tests/integration/gs/test_generation_conditionals.py
+++ b/tests/integration/gs/test_generation_conditionals.py
@@ -21,7 +21,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-"""Unit tests for GS versioning support."""
+"""Integration tests for GS versioning support."""
import StringIO
import os
@@ -116,13 +116,13 @@ class GSGenerationConditionalsTest(GSTestCase):
k.set_contents_from_string(s1)
g1 = k.generation
- mg1 = k.meta_generation
+ mg1 = k.metageneration
self.assertEqual(str(mg1), "1")
b.set_acl("public-read", key_name="foo")
k = b.get_key("foo")
g2 = k.generation
- mg2 = k.meta_generation
+ mg2 = k.metageneration
self.assertEqual(g2, g1)
self.assertGreater(mg2, mg1)
@@ -145,7 +145,7 @@ class GSGenerationConditionalsTest(GSTestCase):
k = b.get_key("foo")
g3 = k.generation
- mg3 = k.meta_generation
+ mg3 = k.metageneration
self.assertEqual(g3, g2)
self.assertGreater(mg3, mg2)
@@ -181,13 +181,13 @@ class GSGenerationConditionalsTest(GSTestCase):
k.set_contents_from_string(s1)
g1 = k.generation
- mg1 = k.meta_generation
+ mg1 = k.metageneration
self.assertEqual(str(mg1), "1")
b.set_canned_acl("public-read", key_name="foo")
k = b.get_key("foo")
g2 = k.generation
- mg2 = k.meta_generation
+ mg2 = k.metageneration
self.assertEqual(g2, g1)
self.assertGreater(mg2, mg1)
@@ -211,7 +211,7 @@ class GSGenerationConditionalsTest(GSTestCase):
k = b.get_key("foo")
g3 = k.generation
- mg3 = k.meta_generation
+ mg3 = k.metageneration
self.assertEqual(g3, g2)
self.assertGreater(mg3, mg2)
@@ -225,7 +225,7 @@ class GSGenerationConditionalsTest(GSTestCase):
k.set_contents_from_string(s1)
g1 = k.generation
- mg1 = k.meta_generation
+ mg1 = k.metageneration
self.assertEqual(str(mg1), "1")
acl_xml = (
@@ -241,7 +241,7 @@ class GSGenerationConditionalsTest(GSTestCase):
k = b.get_key("foo")
g2 = k.generation
- mg2 = k.meta_generation
+ mg2 = k.metageneration
self.assertEqual(g2, g1)
self.assertGreater(mg2, mg1)
@@ -262,7 +262,7 @@ class GSGenerationConditionalsTest(GSTestCase):
k = b.get_key("foo")
g3 = k.generation
- mg3 = k.meta_generation
+ mg3 = k.metageneration
self.assertEqual(g3, g2)
self.assertGreater(mg3, mg2)
@@ -275,13 +275,13 @@ class GSGenerationConditionalsTest(GSTestCase):
k.set_contents_from_string("test1")
g1 = k.generation
- mg1 = k.meta_generation
+ mg1 = k.metageneration
self.assertEqual(str(mg1), "1")
k.set_acl("public-read")
k = b.get_key("foo")
g2 = k.generation
- mg2 = k.meta_generation
+ mg2 = k.metageneration
self.assertEqual(g2, g1)
self.assertGreater(mg2, mg1)
@@ -302,7 +302,7 @@ class GSGenerationConditionalsTest(GSTestCase):
k = b.get_key("foo")
g3 = k.generation
- mg3 = k.meta_generation
+ mg3 = k.metageneration
self.assertEqual(g3, g2)
self.assertGreater(mg3, mg2)
@@ -314,13 +314,13 @@ class GSGenerationConditionalsTest(GSTestCase):
k.set_contents_from_string("test1")
g1 = k.generation
- mg1 = k.meta_generation
+ mg1 = k.metageneration
self.assertEqual(str(mg1), "1")
k.set_canned_acl("public-read")
k = b.get_key("foo")
g2 = k.generation
- mg2 = k.meta_generation
+ mg2 = k.metageneration
self.assertEqual(g2, g1)
self.assertGreater(mg2, mg1)
@@ -343,7 +343,7 @@ class GSGenerationConditionalsTest(GSTestCase):
k = b.get_key("foo")
g3 = k.generation
- mg3 = k.meta_generation
+ mg3 = k.metageneration
self.assertEqual(g3, g2)
self.assertGreater(mg3, mg2)
@@ -356,7 +356,7 @@ class GSGenerationConditionalsTest(GSTestCase):
k.set_contents_from_string(s1)
g1 = k.generation
- mg1 = k.meta_generation
+ mg1 = k.metageneration
self.assertEqual(str(mg1), "1")
acl_xml = (
@@ -372,7 +372,7 @@ class GSGenerationConditionalsTest(GSTestCase):
k = b.get_key("foo")
g2 = k.generation
- mg2 = k.meta_generation
+ mg2 = k.metageneration
self.assertEqual(g2, g1)
self.assertGreater(mg2, mg1)
@@ -392,7 +392,7 @@ class GSGenerationConditionalsTest(GSTestCase):
k = b.get_key("foo")
g3 = k.generation
- mg3 = k.meta_generation
+ mg3 = k.metageneration
self.assertEqual(g3, g2)
self.assertGreater(mg3, mg2)
diff --git a/tests/integration/gs/test_resumable_downloads.py b/tests/integration/gs/test_resumable_downloads.py
index e16433e3..ba5d9830 100644
--- a/tests/integration/gs/test_resumable_downloads.py
+++ b/tests/integration/gs/test_resumable_downloads.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
@@ -113,9 +111,7 @@ class ResumableDownloadTests(GSTestCase):
self.assertTrue(os.path.exists(tracker_file_name))
f = open(tracker_file_name)
etag_line = f.readline()
- m = re.search(ResumableDownloadHandler.ETAG_REGEX, etag_line)
- f.close()
- self.assertTrue(m)
+ self.assertEquals(etag_line.rstrip('\n'), small_src_key.etag.strip('"\''))
def test_retryable_exception_recovery(self):
"""
diff --git a/tests/integration/gs/test_resumable_uploads.py b/tests/integration/gs/test_resumable_uploads.py
index cc40229e..7c601451 100644
--- a/tests/integration/gs/test_resumable_uploads.py
+++ b/tests/integration/gs/test_resumable_uploads.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/tests/integration/gs/test_storage_uri.py b/tests/integration/gs/test_storage_uri.py
index 3258eae8..a8ed3b62 100644
--- a/tests/integration/gs/test_storage_uri.py
+++ b/tests/integration/gs/test_storage_uri.py
@@ -21,13 +21,18 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-"""Unit tests for StorageUri interface."""
+"""Integration tests for StorageUri interface."""
+import binascii
+import re
import StringIO
from boto import storage_uri
+from boto.exception import BotoClientError
+from boto.gs.acl import SupportedPermissions as perms
from tests.integration.gs.testcase import GSTestCase
+
class GSStorageUriTest(GSTestCase):
def testHasVersion(self):
@@ -40,10 +45,6 @@ class GSStorageUriTest(GSTestCase):
# Generation triggers versioning.
uri.generation = 12345
self.assertTrue(uri.has_version())
- # Meta-generation is permitted.
- uri.meta_generation = 1
- self.assertTrue(uri.has_version())
- # Meta-generation is insufficient for versioning.
uri.generation = None
self.assertFalse(uri.has_version())
@@ -52,25 +53,6 @@ class GSStorageUriTest(GSTestCase):
uri.generation = 0
self.assertTrue(uri.has_version())
- def testVersionUriStr(self):
- uri_str = "gs://bucket/obj"
- uri = storage_uri(uri_str)
- uri.version_id = "versionid"
- self.assertEquals(uri_str + "#versionid", uri.versioned_uri_str())
-
- uri = storage_uri(uri_str)
- self.assertEquals(uri_str, uri.versioned_uri_str())
-
- uri.generation = 12345
- self.assertEquals(uri_str + "#12345", uri.versioned_uri_str())
- uri.generation = 0
- self.assertEquals(uri_str + "#0", uri.versioned_uri_str())
-
- uri.meta_generation = 1
- self.assertEquals(uri_str + "#0.1", uri.versioned_uri_str())
- uri.meta_generation = 0
- self.assertEquals(uri_str + "#0.0", uri.versioned_uri_str())
-
def testCloneReplaceKey(self):
b = self._MakeBucket()
k = b.new_key("obj")
@@ -80,38 +62,100 @@ class GSStorageUriTest(GSTestCase):
uri = orig_uri.clone_replace_key(k)
self.assertTrue(uri.has_version())
- self.assertRegexpMatches(str(uri.generation), r'[0-9]+')
- self.assertEquals(uri.meta_generation, 1)
+ self.assertRegexpMatches(str(uri.generation), r"[0-9]+")
+
+ def testSetAclXml(self):
+ """Ensures that calls to the set_xml_acl functions succeed."""
+ b = self._MakeBucket()
+ k = b.new_key("obj")
+ k.set_contents_from_string("stringdata")
+ bucket_uri = storage_uri("gs://%s/" % b.name)
+
+ # Get a valid ACL for an object.
+ bucket_uri.object_name = "obj"
+ bucket_acl = bucket_uri.get_acl()
+ bucket_uri.object_name = None
+
+ # Add a permission to the ACL.
+ all_users_read_permission = ("<Entry><Scope type='AllUsers'/>"
+ "<Permission>READ</Permission></Entry>")
+ acl_string = re.sub(r"</Entries>",
+ all_users_read_permission + "</Entries>",
+ bucket_acl.to_xml())
+
+ # Test-generated owner IDs are not currently valid for buckets
+ acl_no_owner_string = re.sub(r"<Owner>.*</Owner>", "", acl_string)
+
+ # Set ACL on an object.
+ bucket_uri.set_xml_acl(acl_string, "obj")
+ # Set ACL on a bucket.
+ bucket_uri.set_xml_acl(acl_no_owner_string)
+ # Set the default ACL for a bucket.
+ bucket_uri.set_def_xml_acl(acl_no_owner_string)
+
+ # Verify all the ACLs were successfully applied.
+ new_obj_acl_string = k.get_acl().to_xml()
+ new_bucket_acl_string = bucket_uri.get_acl().to_xml()
+ new_bucket_def_acl_string = bucket_uri.get_def_acl().to_xml()
+ self.assertRegexpMatches(new_obj_acl_string, r"AllUsers")
+ self.assertRegexpMatches(new_bucket_acl_string, r"AllUsers")
+ self.assertRegexpMatches(new_bucket_def_acl_string, r"AllUsers")
def testPropertiesUpdated(self):
- b = self._MakeBucket()
- bucket_uri = storage_uri("gs://%s" % b.name)
- key_uri = bucket_uri.clone_replace_name("obj")
- key_uri.set_contents_from_string("data1")
-
- self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
- self.assertEquals(int(key_uri.meta_generation), 1)
- k = b.get_key("obj")
- self.assertEqual(k.generation, key_uri.generation)
- self.assertEqual(k.meta_generation, key_uri.meta_generation)
- self.assertEquals(k.get_contents_as_string(), "data1")
-
- key_uri.set_contents_from_stream(StringIO.StringIO("data2"))
- self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
- self.assertGreater(key_uri.generation, k.generation)
- self.assertEqual(int(key_uri.meta_generation), 1)
- k = b.get_key("obj")
- self.assertEqual(k.generation, key_uri.generation)
- self.assertEqual(k.meta_generation, key_uri.meta_generation)
- self.assertEquals(int(key_uri.meta_generation), 1)
- self.assertEquals(k.get_contents_as_string(), "data2")
-
- key_uri.set_contents_from_file(StringIO.StringIO("data3"))
- self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
- self.assertGreater(key_uri.generation, k.generation)
- self.assertEqual(int(key_uri.meta_generation), 1)
- k = b.get_key("obj")
- self.assertEqual(k.generation, key_uri.generation)
- self.assertEqual(k.meta_generation, key_uri.meta_generation)
- self.assertEquals(int(key_uri.meta_generation), 1)
- self.assertEquals(k.get_contents_as_string(), "data3")
+ b = self._MakeBucket()
+ bucket_uri = storage_uri("gs://%s" % b.name)
+ key_uri = bucket_uri.clone_replace_name("obj")
+ key_uri.set_contents_from_string("data1")
+
+ self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
+ k = b.get_key("obj")
+ self.assertEqual(k.generation, key_uri.generation)
+ self.assertEquals(k.get_contents_as_string(), "data1")
+
+ key_uri.set_contents_from_stream(StringIO.StringIO("data2"))
+ self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
+ self.assertGreater(key_uri.generation, k.generation)
+ k = b.get_key("obj")
+ self.assertEqual(k.generation, key_uri.generation)
+ self.assertEquals(k.get_contents_as_string(), "data2")
+
+ key_uri.set_contents_from_file(StringIO.StringIO("data3"))
+ self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
+ self.assertGreater(key_uri.generation, k.generation)
+ k = b.get_key("obj")
+ self.assertEqual(k.generation, key_uri.generation)
+ self.assertEquals(k.get_contents_as_string(), "data3")
+
+ def testCompose(self):
+ data1 = 'hello '
+ data2 = 'world!'
+ expected_crc = 1238062967
+
+ b = self._MakeBucket()
+ bucket_uri = storage_uri("gs://%s" % b.name)
+ key_uri1 = bucket_uri.clone_replace_name("component1")
+ key_uri1.set_contents_from_string(data1)
+ key_uri2 = bucket_uri.clone_replace_name("component2")
+ key_uri2.set_contents_from_string(data2)
+
+ # Simple compose.
+ key_uri_composite = bucket_uri.clone_replace_name("composite")
+ components = [key_uri1, key_uri2]
+ key_uri_composite.compose(components, content_type='text/plain')
+ self.assertEquals(key_uri_composite.get_contents_as_string(),
+ data1 + data2)
+ composite_key = key_uri_composite.get_key()
+ cloud_crc32c = binascii.hexlify(
+ composite_key.cloud_hashes['crc32c'])
+ self.assertEquals(cloud_crc32c, hex(expected_crc)[2:])
+ self.assertEquals(composite_key.content_type, 'text/plain')
+
+ # Compose disallowed between buckets.
+ key_uri1.bucket_name += '2'
+ try:
+ key_uri_composite.compose(components)
+ self.fail('Composing between buckets didn\'t fail as expected.')
+ except BotoClientError as err:
+ self.assertEquals(
+ err.reason, 'GCS does not support inter-bucket composing')
+
diff --git a/tests/integration/gs/test_versioning.py b/tests/integration/gs/test_versioning.py
index 5f31e3d2..6d1aedde 100644
--- a/tests/integration/gs/test_versioning.py
+++ b/tests/integration/gs/test_versioning.py
@@ -21,7 +21,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-"""Unit tests for GS versioning support."""
+"""Integration tests for GS versioning support."""
from xml import sax
@@ -64,8 +64,7 @@ class GSVersioningTest(GSTestCase):
# Delete "current" version and make sure that version is no longer
# visible from a basic GET call.
- k = b.get_key("foo")
- k.delete()
+ b.delete_key("foo", generation=None)
self.assertIsNone(b.get_key("foo"))
# Both old versions should still be there when listed using the versions
@@ -259,10 +258,10 @@ class GSVersioningTest(GSTestCase):
k.set_contents_from_string("test1")
g1 = k.generation
self.assertRegexpMatches(g1, r'[0-9]+')
- self.assertEqual(k.meta_generation, '1')
+ self.assertEqual(k.metageneration, '1')
k.set_contents_from_string("test2")
g2 = k.generation
self.assertNotEqual(g1, g2)
self.assertRegexpMatches(g2, r'[0-9]+')
self.assertGreater(int(g2), int(g1))
- self.assertEqual(k.meta_generation, '1')
+ self.assertEqual(k.metageneration, '1')
diff --git a/tests/integration/gs/util.py b/tests/integration/gs/util.py
index 314230ab..5c99ac08 100644
--- a/tests/integration/gs/util.py
+++ b/tests/integration/gs/util.py
@@ -1,3 +1,25 @@
+# Copyright (c) 2012, Google, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
import time
from boto.provider import Provider
diff --git a/tests/integration/opsworks/__init__.py b/tests/integration/opsworks/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integration/opsworks/__init__.py
diff --git a/tests/integration/opsworks/test_layer1.py b/tests/integration/opsworks/test_layer1.py
new file mode 100644
index 00000000..a9887cde
--- /dev/null
+++ b/tests/integration/opsworks/test_layer1.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import unittest
+import time
+
+from boto.opsworks.layer1 import OpsWorksConnection
+from boto.opsworks.exceptions import ValidationException
+
+
+class TestOpsWorksConnection(unittest.TestCase):
+ def setUp(self):
+ self.api = OpsWorksConnection()
+
+ def test_describe_stacks(self):
+ response = self.api.describe_stacks()
+ self.assertIn('Stacks', response)
+
+ def test_validation_errors(self):
+ with self.assertRaises(ValidationException):
+ self.api.create_stack('testbotostack', 'us-east-1',
+ 'badarn', 'badarn2')
diff --git a/tests/integration/redshift/__init__.py b/tests/integration/redshift/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/integration/redshift/__init__.py
diff --git a/tests/integration/redshift/test_cert_verification.py b/tests/integration/redshift/test_cert_verification.py
new file mode 100644
index 00000000..27fd16da
--- /dev/null
+++ b/tests/integration/redshift/test_cert_verification.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from tests.unit import unittest
+import boto.redshift
+
+
+class CertVerificationTest(unittest.TestCase):
+
+ redshift = True
+ ssl = True
+
+ def test_certs(self):
+ for region in boto.redshift.regions():
+ c = region.connect()
+ c.describe_cluster_versions()
diff --git a/tests/integration/redshift/test_layer1.py b/tests/integration/redshift/test_layer1.py
new file mode 100644
index 00000000..490618e1
--- /dev/null
+++ b/tests/integration/redshift/test_layer1.py
@@ -0,0 +1,134 @@
+# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import unittest
+import time
+
+from nose.plugins.attrib import attr
+
+from boto.redshift.layer1 import RedshiftConnection
+from boto.redshift.exceptions import ClusterNotFoundFault
+from boto.redshift.exceptions import ResizeNotFoundFault
+
+
+class TestRedshiftLayer1Management(unittest.TestCase):
+ redshift = True
+
+ def setUp(self):
+ self.api = RedshiftConnection()
+ self.cluster_prefix = 'boto-redshift-cluster-%s'
+ self.node_type = 'dw.hs1.xlarge'
+ self.master_username = 'mrtest'
+ self.master_password = 'P4ssword'
+ self.db_name = 'simon'
+ # Redshift was taking ~20 minutes to bring clusters up in testing.
+ self.wait_time = 60 * 20
+
+ def cluster_id(self):
+ # This need to be unique per-test method.
+ return self.cluster_prefix % str(int(time.time()))
+
+ def create_cluster(self):
+ cluster_id = self.cluster_id()
+ self.api.create_cluster(
+ cluster_id, self.node_type,
+ self.master_username, self.master_password,
+ db_name=self.db_name, number_of_nodes=3
+ )
+
+ # Wait for it to come up.
+ time.sleep(self.wait_time)
+
+ self.addCleanup(self.delete_cluster_the_slow_way, cluster_id)
+ return cluster_id
+
+ def delete_cluster_the_slow_way(self, cluster_id):
+ # Because there might be other operations in progress. :(
+ time.sleep(self.wait_time)
+
+ self.api.delete_cluster(cluster_id, skip_final_cluster_snapshot=True)
+
+ @attr('notdefault')
+ def test_create_delete_cluster(self):
+ cluster_id = self.cluster_id()
+ self.api.create_cluster(
+ cluster_id, self.node_type,
+ self.master_username, self.master_password,
+ db_name=self.db_name, number_of_nodes=3
+ )
+
+ # Wait for it to come up.
+ time.sleep(self.wait_time)
+
+ self.api.delete_cluster(cluster_id, skip_final_cluster_snapshot=True)
+
+ @attr('notdefault')
+ def test_as_much_as_possible_before_teardown(self):
+ # Per @garnaat, for the sake of suite time, we'll test as much as we
+ # can before we teardown.
+
+ # Test a non-existent cluster ID.
+ with self.assertRaises(ClusterNotFoundFault):
+ self.api.describe_clusters('badpipelineid')
+
+ # Now create the cluster & move on.
+ cluster_id = self.create_cluster()
+
+ # Test never resized.
+ with self.assertRaises(ResizeNotFoundFault):
+ self.api.describe_resize(cluster_id)
+
+ # The cluster shows up in describe_clusters
+ clusters = self.api.describe_clusters()['DescribeClustersResponse']\
+ ['DescribeClustersResult']\
+ ['Clusters']
+ cluster_ids = [c['ClusterIdentifier'] for c in clusters]
+ self.assertIn(cluster_id, cluster_ids)
+
+ # The cluster shows up in describe_clusters w/ id
+ response = self.api.describe_clusters(cluster_id)
+ self.assertEqual(response['DescribeClustersResponse']\
+ ['DescribeClustersResult']['Clusters'][0]\
+ ['ClusterIdentifier'], cluster_id)
+
+ snapshot_id = "snap-%s" % cluster_id
+
+ # Test creating a snapshot.
+ response = self.api.create_cluster_snapshot(snapshot_id, cluster_id)
+ self.assertEqual(response['CreateClusterSnapshotResponse']\
+ ['CreateClusterSnapshotResult']['Snapshot']\
+ ['SnapshotIdentifier'], snapshot_id)
+ self.assertEqual(response['CreateClusterSnapshotResponse']\
+ ['CreateClusterSnapshotResult']['Snapshot']\
+ ['Status'], 'creating')
+ self.addCleanup(self.api.delete_cluster_snapshot, snapshot_id)
+
+ # More waiting. :(
+ time.sleep(self.wait_time)
+
+ # Describe the snapshots.
+ response = self.api.describe_cluster_snapshots(
+ cluster_identifier=cluster_id
+ )
+ snap = response['DescribeClusterSnapshotsResponse']\
+ ['DescribeClusterSnapshotsResult']['Snapshots'][-1]
+ self.assertEqual(snap['SnapshotType'], 'manual')
+ self.assertEqual(snap['DBName'], self.db_name)
diff --git a/tests/integration/s3/mock_storage_service.py b/tests/integration/s3/mock_storage_service.py
index 3eee5a4b..4b95e505 100644
--- a/tests/integration/s3/mock_storage_service.py
+++ b/tests/integration/s3/mock_storage_service.py
@@ -118,7 +118,7 @@ class MockKey(object):
if match:
self.read_pos = int(match.group(1))
- def close(self):
+ def close(self, fast=NOT_IMPL):
self.closed = True
def read(self, size=0):
@@ -250,6 +250,9 @@ class MockBucket(object):
def enable_logging(self, target_bucket_prefix):
self.logging = True
+ def get_logging_config(self):
+ return {"Logging": {}}
+
def get_acl(self, key_name='', headers=NOT_IMPL, version_id=NOT_IMPL):
if key_name:
# Return ACL for the key.
@@ -399,8 +402,7 @@ class MockBucketStorageUri(object):
def __init__(self, scheme, bucket_name=None, object_name=None,
debug=NOT_IMPL, suppress_consec_slashes=NOT_IMPL,
- version_id=None, generation=None, meta_generation=None,
- is_latest=False):
+ version_id=None, generation=None, is_latest=False):
self.scheme = scheme
self.bucket_name = bucket_name
self.object_name = object_name
@@ -415,8 +417,12 @@ class MockBucketStorageUri(object):
self.version_id = version_id
self.generation = generation and int(generation)
- self.meta_generation = meta_generation and int(meta_generation)
+ self.is_version_specific = (bool(self.generation)
+ or bool(self.version_id))
self.is_latest = is_latest
+ if bucket_name and object_name:
+ self.versionless_uri = '%s://%s/%s' % (scheme, bucket_name,
+ object_name)
def __repr__(self):
"""Returns string representation of URI."""
@@ -439,7 +445,6 @@ class MockBucketStorageUri(object):
suppress_consec_slashes=self.suppress_consec_slashes,
version_id=getattr(key, 'version_id', None),
generation=getattr(key, 'generation', None),
- meta_generation=getattr(key, 'meta_generation', None),
is_latest=getattr(key, 'is_latest', None))
def connect(self, access_key_id=NOT_IMPL, secret_access_key=NOT_IMPL):
@@ -457,18 +462,6 @@ class MockBucketStorageUri(object):
and ((self.version_id is not None)
or (self.generation is not None)))
- def versioned_uri_str(self):
- version_desc = ''
- if not issubclass(type(self), MockBucketStorageUri):
- pass
- elif self.version_id is not None:
- version_desc += '#' + self.version_id
- elif self.generation is not None:
- version_desc += '#' + str(self.generation)
- if self.meta_generation is not None:
- version_desc += '.' + str(self.meta_generation)
- return self.uri + version_desc
-
def delete_key(self, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL, mfa_token=NOT_IMPL):
self.get_bucket().delete_key(self.object_name)
@@ -481,6 +474,10 @@ class MockBucketStorageUri(object):
headers=NOT_IMPL, version_id=NOT_IMPL):
self.get_bucket().enable_logging(target_bucket)
+ def get_logging_config(self, validate=NOT_IMPL, headers=NOT_IMPL,
+ version_id=NOT_IMPL):
+ return self.get_bucket().get_logging_config()
+
def equals(self, uri):
return self.uri == uri.uri
diff --git a/tests/integration/s3/test_connection.py b/tests/integration/s3/test_connection.py
index b6733036..5d7473ee 100644
--- a/tests/integration/s3/test_connection.py
+++ b/tests/integration/s3/test_connection.py
@@ -99,7 +99,8 @@ class S3ConnectionTest (unittest.TestCase):
k.name = 'foo/bar'
k.set_contents_from_string(s1, headers)
k.name = 'foo/bas'
- k.set_contents_from_filename('foobar')
+ size = k.set_contents_from_filename('foobar')
+ assert size == 42
k.name = 'foo/bat'
k.set_contents_from_string(s1)
k.name = 'fie/bar'
diff --git a/tests/integration/sns/test_sns_sqs_subscription.py b/tests/integration/sns/test_sns_sqs_subscription.py
new file mode 100644
index 00000000..0cb8b360
--- /dev/null
+++ b/tests/integration/sns/test_sns_sqs_subscription.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Unit tests for subscribing SQS queues to SNS topics.
+"""
+
+import hashlib
+import time
+import json
+
+from tests.unit import unittest
+
+from boto.sqs.connection import SQSConnection
+from boto.sns.connection import SNSConnection
+
+class SNSSubcribeSQSTest(unittest.TestCase):
+
+ sqs = True
+ sns = True
+
+ def setUp(self):
+ self.sqsc = SQSConnection()
+ self.snsc = SNSConnection()
+
+ def get_policy_statements(self, queue):
+ attrs = queue.get_attributes('Policy')
+ policy = json.loads(attrs.get('Policy', "{}"))
+ return policy.get('Statement', {})
+
+ def test_correct_sid(self):
+ now = time.time()
+ topic_name = queue_name = "test_correct_sid%d" % (now)
+
+ timeout = 60
+ queue = self.sqsc.create_queue(queue_name, timeout)
+ self.addCleanup(self.sqsc.delete_queue, queue, True)
+ queue_arn = queue.arn
+
+ topic = self.snsc.create_topic(topic_name)
+ topic_arn = topic['CreateTopicResponse']['CreateTopicResult']\
+ ['TopicArn']
+ self.addCleanup(self.snsc.delete_topic, topic_arn)
+
+ expected_sid = hashlib.md5(topic_arn + queue_arn).hexdigest()
+ resp = self.snsc.subscribe_sqs_queue(topic_arn, queue)
+
+ found_expected_sid = False
+ statements = self.get_policy_statements(queue)
+ for statement in statements:
+ if statement['Sid'] == expected_sid:
+ found_expected_sid = True
+ break
+ self.assertTrue(found_expected_sid)
+
+ def test_idempotent_subscribe(self):
+ now = time.time()
+ topic_name = queue_name = "test_idempotent_subscribe%d" % (now)
+
+ timeout = 60
+ queue = self.sqsc.create_queue(queue_name, timeout)
+ self.addCleanup(self.sqsc.delete_queue, queue, True)
+ initial_statements = self.get_policy_statements(queue)
+ queue_arn = queue.arn
+
+ topic = self.snsc.create_topic(topic_name)
+ topic_arn = topic['CreateTopicResponse']['CreateTopicResult']\
+ ['TopicArn']
+ self.addCleanup(self.snsc.delete_topic, topic_arn)
+
+ resp = self.snsc.subscribe_sqs_queue(topic_arn, queue)
+ time.sleep(3)
+ first_subscribe_statements = self.get_policy_statements(queue)
+ self.assertEqual(len(first_subscribe_statements),
+ len(initial_statements) + 1)
+
+ resp2 = self.snsc.subscribe_sqs_queue(topic_arn, queue)
+ time.sleep(3)
+ second_subscribe_statements = self.get_policy_statements(queue)
+ self.assertEqual(len(second_subscribe_statements),
+ len(first_subscribe_statements))
diff --git a/tests/integration/sqs/test_connection.py b/tests/integration/sqs/test_connection.py
index 611d5219..9b2ab59a 100644
--- a/tests/integration/sqs/test_connection.py
+++ b/tests/integration/sqs/test_connection.py
@@ -30,6 +30,7 @@ from threading import Timer
from tests.unit import unittest
from boto.sqs.connection import SQSConnection
+from boto.sqs.message import Message
from boto.sqs.message import MHMessage
from boto.exception import SQSError
@@ -56,17 +57,18 @@ class SQSConnectionTest(unittest.TestCase):
# now create one that should work and should be unique (i.e. a new one)
queue_name = 'test%d' % int(time.time())
timeout = 60
- queue = c.create_queue(queue_name, timeout)
+ queue_1 = c.create_queue(queue_name, timeout)
+ self.addCleanup(c.delete_queue, queue_1, True)
time.sleep(60)
rs = c.get_all_queues()
i = 0
for q in rs:
i += 1
assert i == num_queues + 1
- assert queue.count_slow() == 0
+ assert queue_1.count_slow() == 0
# check the visibility timeout
- t = queue.get_timeout()
+ t = queue_1.get_timeout()
assert t == timeout, '%d != %d' % (t, timeout)
# now try to get queue attributes
@@ -82,75 +84,73 @@ class SQSConnectionTest(unittest.TestCase):
# now change the visibility timeout
timeout = 45
- queue.set_timeout(timeout)
+ queue_1.set_timeout(timeout)
time.sleep(60)
- t = queue.get_timeout()
+ t = queue_1.get_timeout()
assert t == timeout, '%d != %d' % (t, timeout)
# now add a message
message_body = 'This is a test\n'
- message = queue.new_message(message_body)
- queue.write(message)
+ message = queue_1.new_message(message_body)
+ queue_1.write(message)
time.sleep(60)
- assert queue.count_slow() == 1
+ assert queue_1.count_slow() == 1
time.sleep(90)
# now read the message from the queue with a 10 second timeout
- message = queue.read(visibility_timeout=10)
+ message = queue_1.read(visibility_timeout=10)
assert message
assert message.get_body() == message_body
# now immediately try another read, shouldn't find anything
- message = queue.read()
+ message = queue_1.read()
assert message == None
# now wait 30 seconds and try again
time.sleep(30)
- message = queue.read()
+ message = queue_1.read()
assert message
# now delete the message
- queue.delete_message(message)
+ queue_1.delete_message(message)
time.sleep(30)
- assert queue.count_slow() == 0
+ assert queue_1.count_slow() == 0
# try a batch write
num_msgs = 10
msgs = [(i, 'This is message %d' % i, 0) for i in range(num_msgs)]
- queue.write_batch(msgs)
+ queue_1.write_batch(msgs)
# try to delete all of the messages using batch delete
deleted = 0
while deleted < num_msgs:
time.sleep(5)
- msgs = queue.get_messages(num_msgs)
+ msgs = queue_1.get_messages(num_msgs)
if msgs:
- br = queue.delete_message_batch(msgs)
+ br = queue_1.delete_message_batch(msgs)
deleted += len(br.results)
# create another queue so we can test force deletion
# we will also test MHMessage with this queue
queue_name = 'test%d' % int(time.time())
timeout = 60
- queue = c.create_queue(queue_name, timeout)
- queue.set_message_class(MHMessage)
+ queue_2 = c.create_queue(queue_name, timeout)
+ self.addCleanup(c.delete_queue, queue_2, True)
+ queue_2.set_message_class(MHMessage)
time.sleep(30)
# now add a couple of messages
- message = queue.new_message()
+ message = queue_2.new_message()
message['foo'] = 'bar'
- queue.write(message)
+ queue_2.write(message)
message_body = {'fie': 'baz', 'foo': 'bar'}
- message = queue.new_message(body=message_body)
- queue.write(message)
+ message = queue_2.new_message(body=message_body)
+ queue_2.write(message)
time.sleep(30)
- m = queue.read()
+ m = queue_2.read()
assert m['foo'] == 'bar'
- # now delete that queue and messages
- c.delete_queue(queue, True)
-
print '--- tests completed ---'
def test_sqs_timeout(self):
@@ -215,3 +215,27 @@ class SQSConnectionTest(unittest.TestCase):
# we're giving +- .5 seconds for the total time the queue
# was blocked on the read call.
self.assertTrue(4.5 <= (end - start) <= 5.5)
+
+ def test_queue_deletion_affects_full_queues(self):
+ conn = SQSConnection()
+ initial_count = len(conn.get_all_queues())
+
+ empty = conn.create_queue('empty%d' % int(time.time()))
+ full = conn.create_queue('full%d' % int(time.time()))
+ time.sleep(60)
+ # Make sure they're both around.
+ self.assertEqual(len(conn.get_all_queues()), initial_count + 2)
+
+ # Put a message in the full queue.
+ m1 = Message()
+ m1.set_body('This is a test message.')
+ full.write(m1)
+ self.assertEqual(full.count(), 1)
+
+ self.assertTrue(conn.delete_queue(empty))
+ # Here's the regression for the docs. SQS will delete a queue with
+ # messages in it, no ``force_deletion`` needed.
+ self.assertTrue(conn.delete_queue(full))
+ # Wait long enough for SQS to finally remove the queues.
+ time.sleep(90)
+ self.assertEqual(len(conn.get_all_queues()), initial_count)
diff --git a/tests/test.py b/tests/test.py
index 68e7af25..d9781ecd 100755
--- a/tests/test.py
+++ b/tests/test.py
@@ -29,7 +29,10 @@ import argparse
def main():
- parser = argparse.ArgumentParser()
+ description = ("Runs boto unit and/or integration tests. "
+ "Arguments will be passed on to nosetests. "
+ "See nosetests --help for more information.")
+ parser = argparse.ArgumentParser(description=description)
parser.add_argument('-t', '--service-tests', action="append", default=[],
help="Run tests for a given service. This will "
"run any test tagged with the specified value, "
diff --git a/tests/unit/beanstalk/test_layer1.py b/tests/unit/beanstalk/test_layer1.py
index 6df75374..2ecec0d2 100644
--- a/tests/unit/beanstalk/test_layer1.py
+++ b/tests/unit/beanstalk/test_layer1.py
@@ -44,11 +44,8 @@ class TestListAvailableSolutionStacks(AWSMockServiceTestCase):
self.assert_request_parameters({
'Action': 'ListAvailableSolutionStacks',
'ContentType': 'JSON',
- 'SignatureMethod': 'HmacSHA256',
- 'SignatureVersion': 2,
'Version': '2010-12-01',
- 'AWSAccessKeyId': 'aws_access_key_id',
- }, ignore_params_values=['Timestamp'])
+ })
class TestCreateApplicationVersion(AWSMockServiceTestCase):
@@ -78,16 +75,13 @@ class TestCreateApplicationVersion(AWSMockServiceTestCase):
self.assert_request_parameters({
'Action': 'CreateApplicationVersion',
'ContentType': 'JSON',
- 'SignatureMethod': 'HmacSHA256',
- 'SignatureVersion': 2,
'Version': '2010-12-01',
'ApplicationName': 'application1',
'AutoCreateApplication': 'true',
'SourceBundle.S3Bucket': 'mybucket',
'SourceBundle.S3Key': 'mykey',
'VersionLabel': 'version1',
- 'AWSAccessKeyId': 'aws_access_key_id',
- }, ignore_params_values=['Timestamp'])
+ })
self.assertEqual(app_version['ApplicationName'], 'application1')
self.assertEqual(app_version['VersionLabel'], 'version1')
@@ -114,15 +108,12 @@ class TestCreateEnvironment(AWSMockServiceTestCase):
'EnvironmentName': 'environment1',
'TemplateName': '32bit Amazon Linux running Tomcat 7',
'ContentType': 'JSON',
- 'SignatureMethod': 'HmacSHA256',
- 'SignatureVersion': 2,
'Version': '2010-12-01',
'VersionLabel': 'version1',
- 'AWSAccessKeyId': 'aws_access_key_id',
'OptionSettings.member.1.Namespace': 'aws:autoscaling:launchconfiguration',
'OptionSettings.member.1.OptionName': 'Ec2KeyName',
'OptionSettings.member.1.Value': 'mykeypair',
'OptionSettings.member.2.Namespace': 'aws:elasticbeanstalk:application:environment',
'OptionSettings.member.2.OptionName': 'ENVVAR',
'OptionSettings.member.2.Value': 'VALUE1',
- }, ignore_params_values=['Timestamp'])
+ })
diff --git a/tests/unit/cloudsearch/__init__.py b/tests/unit/cloudsearch/__init__.py
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/tests/unit/cloudsearch/__init__.py
@@ -0,0 +1 @@
+
diff --git a/tests/unit/cloudsearch/test_connection.py b/tests/unit/cloudsearch/test_connection.py
new file mode 100644
index 00000000..d2f67526
--- /dev/null
+++ b/tests/unit/cloudsearch/test_connection.py
@@ -0,0 +1,241 @@
+#!/usr/bin env python
+
+from tests.unit import AWSMockServiceTestCase
+
+from boto.cloudsearch.domain import Domain
+from boto.cloudsearch.layer1 import Layer1
+
+import json
+
+class TestCloudSearchCreateDomain(AWSMockServiceTestCase):
+ connection_class = Layer1
+
+ def default_body(self):
+ return """
+<CreateDomainResponse xmlns="http://cloudsearch.amazonaws.com/doc/2011-02-01">
+ <CreateDomainResult>
+ <DomainStatus>
+ <SearchPartitionCount>0</SearchPartitionCount>
+ <SearchService>
+ <Arn>arn:aws:cs:us-east-1:1234567890:search/demo</Arn>
+ <Endpoint>search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint>
+ </SearchService>
+ <NumSearchableDocs>0</NumSearchableDocs>
+ <Created>true</Created>
+ <DomainId>1234567890/demo</DomainId>
+ <Processing>false</Processing>
+ <SearchInstanceCount>0</SearchInstanceCount>
+ <DomainName>demo</DomainName>
+ <RequiresIndexDocuments>false</RequiresIndexDocuments>
+ <Deleted>false</Deleted>
+ <DocService>
+ <Arn>arn:aws:cs:us-east-1:1234567890:doc/demo</Arn>
+ <Endpoint>doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint>
+ </DocService>
+ </DomainStatus>
+ </CreateDomainResult>
+ <ResponseMetadata>
+ <RequestId>00000000-0000-0000-0000-000000000000</RequestId>
+ </ResponseMetadata>
+</CreateDomainResponse>
+"""
+
+ def test_create_domain(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.create_domain('demo')
+
+ self.assert_request_parameters({
+ 'Action': 'CreateDomain',
+ 'DomainName': 'demo',
+ 'AWSAccessKeyId': 'aws_access_key_id',
+ 'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': 2,
+ 'Version': '2011-02-01',
+ }, ignore_params_values=['Timestamp'])
+
+ def test_cloudsearch_connect_result_endpoints(self):
+ """Check that endpoints & ARNs are correctly returned from AWS"""
+
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.create_domain('demo')
+ domain = Domain(self, api_response)
+
+ self.assertEqual(domain.doc_service_arn,
+ "arn:aws:cs:us-east-1:1234567890:doc/demo")
+ self.assertEqual(
+ domain.doc_service_endpoint,
+ "doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+ self.assertEqual(domain.search_service_arn,
+ "arn:aws:cs:us-east-1:1234567890:search/demo")
+ self.assertEqual(
+ domain.search_service_endpoint,
+ "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+
+ def test_cloudsearch_connect_result_statuses(self):
+ """Check that domain statuses are correctly returned from AWS"""
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.create_domain('demo')
+ domain = Domain(self, api_response)
+
+ self.assertEqual(domain.created, True)
+ self.assertEqual(domain.processing, False)
+ self.assertEqual(domain.requires_index_documents, False)
+ self.assertEqual(domain.deleted, False)
+
+ def test_cloudsearch_connect_result_details(self):
+ """Check that the domain information is correctly returned from AWS"""
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.create_domain('demo')
+ domain = Domain(self, api_response)
+
+ self.assertEqual(domain.id, "1234567890/demo")
+ self.assertEqual(domain.name, "demo")
+
+ def test_cloudsearch_documentservice_creation(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.create_domain('demo')
+ domain = Domain(self, api_response)
+
+ document = domain.get_document_service()
+
+ self.assertEqual(
+ document.endpoint,
+ "doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+
+ def test_cloudsearch_searchservice_creation(self):
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.create_domain('demo')
+ domain = Domain(self, api_response)
+
+ search = domain.get_search_service()
+
+ self.assertEqual(
+ search.endpoint,
+ "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+
+
+class CloudSearchConnectionDeletionTest(AWSMockServiceTestCase):
+ connection_class = Layer1
+
+ def default_body(self):
+ return """
+<DeleteDomainResponse xmlns="http://cloudsearch.amazonaws.com/doc/2011-02-01">
+ <DeleteDomainResult>
+ <DomainStatus>
+ <SearchPartitionCount>0</SearchPartitionCount>
+ <SearchService>
+ <Arn>arn:aws:cs:us-east-1:1234567890:search/demo</Arn>
+ <Endpoint>search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint>
+ </SearchService>
+ <NumSearchableDocs>0</NumSearchableDocs>
+ <Created>true</Created>
+ <DomainId>1234567890/demo</DomainId>
+ <Processing>false</Processing>
+ <SearchInstanceCount>0</SearchInstanceCount>
+ <DomainName>demo</DomainName>
+ <RequiresIndexDocuments>false</RequiresIndexDocuments>
+ <Deleted>false</Deleted>
+ <DocService>
+ <Arn>arn:aws:cs:us-east-1:1234567890:doc/demo</Arn>
+ <Endpoint>doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com</Endpoint>
+ </DocService>
+ </DomainStatus>
+ </DeleteDomainResult>
+ <ResponseMetadata>
+ <RequestId>00000000-0000-0000-0000-000000000000</RequestId>
+ </ResponseMetadata>
+</DeleteDomainResponse>
+"""
+
+ def test_cloudsearch_deletion(self):
+ """
+ Check that the correct arguments are sent to AWS when creating a
+ cloudsearch connection.
+ """
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.delete_domain('demo')
+
+ self.assert_request_parameters({
+ 'Action': 'DeleteDomain',
+ 'DomainName': 'demo',
+ 'AWSAccessKeyId': 'aws_access_key_id',
+ 'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': 2,
+ 'Version': '2011-02-01',
+ }, ignore_params_values=['Timestamp'])
+
+
+class CloudSearchConnectionIndexDocumentTest(AWSMockServiceTestCase):
+ connection_class = Layer1
+
+ def default_body(self):
+ return """
+<IndexDocumentsResponse xmlns="http://cloudsearch.amazonaws.com/doc/2011-02-01">
+ <IndexDocumentsResult>
+ <FieldNames>
+ <member>average_score</member>
+ <member>brand_id</member>
+ <member>colors</member>
+ <member>context</member>
+ <member>context_owner</member>
+ <member>created_at</member>
+ <member>creator_id</member>
+ <member>description</member>
+ <member>file_size</member>
+ <member>format</member>
+ <member>has_logo</member>
+ <member>has_messaging</member>
+ <member>height</member>
+ <member>image_id</member>
+ <member>ingested_from</member>
+ <member>is_advertising</member>
+ <member>is_photo</member>
+ <member>is_reviewed</member>
+ <member>modified_at</member>
+ <member>subject_date</member>
+ <member>tags</member>
+ <member>title</member>
+ <member>width</member>
+ </FieldNames>
+ </IndexDocumentsResult>
+ <ResponseMetadata>
+ <RequestId>eb2b2390-6bbd-11e2-ab66-93f3a90dcf2a</RequestId>
+ </ResponseMetadata>
+</IndexDocumentsResponse>
+"""
+
+ def test_cloudsearch_index_documents(self):
+ """
+ Check that the correct arguments are sent to AWS when indexing a
+ domain.
+ """
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.index_documents('demo')
+
+ self.assert_request_parameters({
+ 'Action': 'IndexDocuments',
+ 'DomainName': 'demo',
+ 'AWSAccessKeyId': 'aws_access_key_id',
+ 'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': 2,
+ 'Version': '2011-02-01',
+ }, ignore_params_values=['Timestamp'])
+
+ def test_cloudsearch_index_documents_resp(self):
+ """
+ Check that the AWS response is being parsed correctly when indexing a
+ domain.
+ """
+ self.set_http_response(status_code=200)
+ api_response = self.service_connection.index_documents('demo')
+
+ self.assertEqual(api_response, ['average_score', 'brand_id', 'colors',
+ 'context', 'context_owner',
+ 'created_at', 'creator_id',
+ 'description', 'file_size', 'format',
+ 'has_logo', 'has_messaging', 'height',
+ 'image_id', 'ingested_from',
+ 'is_advertising', 'is_photo',
+ 'is_reviewed', 'modified_at',
+ 'subject_date', 'tags', 'title',
+ 'width'])
diff --git a/tests/unit/cloudsearch/test_document.py b/tests/unit/cloudsearch/test_document.py
new file mode 100644
index 00000000..dc2cc246
--- /dev/null
+++ b/tests/unit/cloudsearch/test_document.py
@@ -0,0 +1,324 @@
+#!/usr/bin env python
+
+from tests.unit import unittest
+from httpretty import HTTPretty
+from mock import MagicMock
+
+import urlparse
+import json
+
+from boto.cloudsearch.document import DocumentServiceConnection
+from boto.cloudsearch.document import CommitMismatchError, EncodingError, \
+ ContentTooLongError, DocumentServiceConnection
+
+import boto
+
+class CloudSearchDocumentTest(unittest.TestCase):
+ def setUp(self):
+ HTTPretty.enable()
+ HTTPretty.register_uri(
+ HTTPretty.POST,
+ ("http://doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com/"
+ "2011-02-01/documents/batch"),
+ body=json.dumps(self.response),
+ content_type="application/json")
+
+ def tearDown(self):
+ HTTPretty.disable()
+
+class CloudSearchDocumentSingleTest(CloudSearchDocumentTest):
+
+ response = {
+ 'status': 'success',
+ 'adds': 1,
+ 'deletes': 0,
+ }
+
+ def test_cloudsearch_add_basics(self):
+ """
+ Check that a simple add document actually sends an add document request
+ to AWS.
+ """
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+ document.add("1234", 10, {"id": "1234", "title": "Title 1",
+ "category": ["cat_a", "cat_b", "cat_c"]})
+ document.commit()
+
+ args = json.loads(HTTPretty.last_request.body)[0]
+
+ self.assertEqual(args['lang'], 'en')
+ self.assertEqual(args['type'], 'add')
+
+ def test_cloudsearch_add_single_basic(self):
+ """
+ Check that a simple add document sends correct document metadata to
+ AWS.
+ """
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+ document.add("1234", 10, {"id": "1234", "title": "Title 1",
+ "category": ["cat_a", "cat_b", "cat_c"]})
+ document.commit()
+
+ args = json.loads(HTTPretty.last_request.body)[0]
+
+ self.assertEqual(args['id'], '1234')
+ self.assertEqual(args['version'], 10)
+ self.assertEqual(args['type'], 'add')
+
+ def test_cloudsearch_add_single_fields(self):
+ """
+ Check that a simple add document sends the actual document to AWS.
+ """
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+ document.add("1234", 10, {"id": "1234", "title": "Title 1",
+ "category": ["cat_a", "cat_b", "cat_c"]})
+ document.commit()
+
+ args = json.loads(HTTPretty.last_request.body)[0]
+
+ self.assertEqual(args['fields']['category'], ['cat_a', 'cat_b',
+ 'cat_c'])
+ self.assertEqual(args['fields']['id'], '1234')
+ self.assertEqual(args['fields']['title'], 'Title 1')
+
+ def test_cloudsearch_add_single_result(self):
+ """
+ Check that the reply from adding a single document is correctly parsed.
+ """
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+ document.add("1234", 10, {"id": "1234", "title": "Title 1",
+ "category": ["cat_a", "cat_b", "cat_c"]})
+ doc = document.commit()
+
+ self.assertEqual(doc.status, 'success')
+ self.assertEqual(doc.adds, 1)
+ self.assertEqual(doc.deletes, 0)
+
+ self.assertEqual(doc.doc_service, document)
+
+
+class CloudSearchDocumentMultipleAddTest(CloudSearchDocumentTest):
+
+ response = {
+ 'status': 'success',
+ 'adds': 3,
+ 'deletes': 0,
+ }
+
+ objs = {
+ '1234': {
+ 'version': 10, 'fields': {"id": "1234", "title": "Title 1",
+ "category": ["cat_a", "cat_b",
+ "cat_c"]}},
+ '1235': {
+ 'version': 11, 'fields': {"id": "1235", "title": "Title 2",
+ "category": ["cat_b", "cat_c",
+ "cat_d"]}},
+ '1236': {
+ 'version': 12, 'fields': {"id": "1236", "title": "Title 3",
+ "category": ["cat_e", "cat_f",
+ "cat_g"]}},
+ }
+
+
+ def test_cloudsearch_add_basics(self):
+ """Check that multiple documents are added correctly to AWS"""
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+ for (key, obj) in self.objs.items():
+ document.add(key, obj['version'], obj['fields'])
+ document.commit()
+
+ args = json.loads(HTTPretty.last_request.body)
+
+ for arg in args:
+ self.assertTrue(arg['id'] in self.objs)
+ self.assertEqual(arg['version'], self.objs[arg['id']]['version'])
+ self.assertEqual(arg['fields']['id'],
+ self.objs[arg['id']]['fields']['id'])
+ self.assertEqual(arg['fields']['title'],
+ self.objs[arg['id']]['fields']['title'])
+ self.assertEqual(arg['fields']['category'],
+ self.objs[arg['id']]['fields']['category'])
+
+ def test_cloudsearch_add_results(self):
+ """
+ Check that the result from adding multiple documents is parsed
+ correctly.
+ """
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+ for (key, obj) in self.objs.items():
+ document.add(key, obj['version'], obj['fields'])
+ doc = document.commit()
+
+ self.assertEqual(doc.status, 'success')
+ self.assertEqual(doc.adds, len(self.objs))
+ self.assertEqual(doc.deletes, 0)
+
+
+class CloudSearchDocumentDelete(CloudSearchDocumentTest):
+
+ response = {
+ 'status': 'success',
+ 'adds': 0,
+ 'deletes': 1,
+ }
+
+ def test_cloudsearch_delete(self):
+ """
+ Test that the request for a single document deletion is done properly.
+ """
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+ document.delete("5", "10")
+ document.commit()
+ args = json.loads(HTTPretty.last_request.body)[0]
+
+ self.assertEqual(args['version'], '10')
+ self.assertEqual(args['type'], 'delete')
+ self.assertEqual(args['id'], '5')
+
+ def test_cloudsearch_delete_results(self):
+ """
+ Check that the result of a single document deletion is parsed properly.
+ """
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+ document.delete("5", "10")
+ doc = document.commit()
+
+ self.assertEqual(doc.status, 'success')
+ self.assertEqual(doc.adds, 0)
+ self.assertEqual(doc.deletes, 1)
+
+
+class CloudSearchDocumentDeleteMultiple(CloudSearchDocumentTest):
+ response = {
+ 'status': 'success',
+ 'adds': 0,
+ 'deletes': 2,
+ }
+
+ def test_cloudsearch_delete_multiples(self):
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+ document.delete("5", "10")
+ document.delete("6", "11")
+ document.commit()
+ args = json.loads(HTTPretty.last_request.body)
+
+ self.assertEqual(len(args), 2)
+ for arg in args:
+ self.assertEqual(arg['type'], 'delete')
+
+ if arg['id'] == '5':
+ self.assertEqual(arg['version'], '10')
+ elif arg['id'] == '6':
+ self.assertEqual(arg['version'], '11')
+ else: # Unknown result out of AWS that shouldn't be there
+ self.assertTrue(False)
+
+
+class CloudSearchSDFManipulation(CloudSearchDocumentTest):
+ response = {
+ 'status': 'success',
+ 'adds': 1,
+ 'deletes': 0,
+ }
+
+
+ def test_cloudsearch_initial_sdf_is_blank(self):
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+
+ self.assertEqual(document.get_sdf(), '[]')
+
+ def test_cloudsearch_single_document_sdf(self):
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+
+ document.add("1234", 10, {"id": "1234", "title": "Title 1",
+ "category": ["cat_a", "cat_b", "cat_c"]})
+
+ self.assertNotEqual(document.get_sdf(), '[]')
+
+ document.clear_sdf()
+
+ self.assertEqual(document.get_sdf(), '[]')
+
+class CloudSearchBadSDFTesting(CloudSearchDocumentTest):
+ response = {
+ 'status': 'success',
+ 'adds': 1,
+ 'deletes': 0,
+ }
+
+ def test_cloudsearch_erroneous_sdf(self):
+ original = boto.log.error
+ boto.log.error = MagicMock()
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+
+ document.add("1234", 10, {"id": "1234", "title": None,
+ "category": ["cat_a", "cat_b", "cat_c"]})
+
+ document.commit()
+ self.assertNotEqual(len(boto.log.error.call_args_list), 1)
+
+ boto.log.error = original
+
+
+class CloudSearchDocumentErrorBadUnicode(CloudSearchDocumentTest):
+ response = {
+ 'status': 'error',
+ 'adds': 0,
+ 'deletes': 0,
+ 'errors': [{'message': 'Illegal Unicode character in document'}]
+ }
+
+ def test_fake_bad_unicode(self):
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+ document.add("1234", 10, {"id": "1234", "title": "Title 1",
+ "category": ["cat_a", "cat_b", "cat_c"]})
+ self.assertRaises(EncodingError, document.commit)
+
+
+class CloudSearchDocumentErrorDocsTooBig(CloudSearchDocumentTest):
+ response = {
+ 'status': 'error',
+ 'adds': 0,
+ 'deletes': 0,
+ 'errors': [{'message': 'The Content-Length is too long'}]
+ }
+
+ def test_fake_docs_too_big(self):
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+ document.add("1234", 10, {"id": "1234", "title": "Title 1",
+ "category": ["cat_a", "cat_b", "cat_c"]})
+
+ self.assertRaises(ContentTooLongError, document.commit)
+
+
+class CloudSearchDocumentErrorMismatch(CloudSearchDocumentTest):
+ response = {
+ 'status': 'error',
+ 'adds': 0,
+ 'deletes': 0,
+ 'errors': [{'message': 'Something went wrong'}]
+ }
+
+ def test_fake_failure(self):
+ document = DocumentServiceConnection(
+ endpoint="doc-demo-userdomain.us-east-1.cloudsearch.amazonaws.com")
+
+ document.add("1234", 10, {"id": "1234", "title": "Title 1",
+ "category": ["cat_a", "cat_b", "cat_c"]})
+
+ self.assertRaises(CommitMismatchError, document.commit)
diff --git a/tests/unit/cloudsearch/test_search.py b/tests/unit/cloudsearch/test_search.py
new file mode 100644
index 00000000..b6c23dd4
--- /dev/null
+++ b/tests/unit/cloudsearch/test_search.py
@@ -0,0 +1,325 @@
+#!/usr/bin env python
+
+from tests.unit import unittest
+from httpretty import HTTPretty
+
+import urlparse
+import json
+
+from boto.cloudsearch.search import SearchConnection
+
+HOSTNAME = "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com"
+FULL_URL = 'http://%s/2011-02-01/search' % HOSTNAME
+
+
+class CloudSearchSearchTest(unittest.TestCase):
+
+ hits = [
+ {
+ 'id': '12341',
+ 'title': 'Document 1',
+ },
+ {
+ 'id': '12342',
+ 'title': 'Document 2',
+ },
+ {
+ 'id': '12343',
+ 'title': 'Document 3',
+ },
+ {
+ 'id': '12344',
+ 'title': 'Document 4',
+ },
+ {
+ 'id': '12345',
+ 'title': 'Document 5',
+ },
+ {
+ 'id': '12346',
+ 'title': 'Document 6',
+ },
+ {
+ 'id': '12347',
+ 'title': 'Document 7',
+ },
+ ]
+
+ response = {
+ 'rank': '-text_relevance',
+ 'match-expr':"Test",
+ 'hits': {
+ 'found': 30,
+ 'start': 0,
+ 'hit':hits
+ },
+ 'info': {
+ 'rid':'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08',
+ 'time-ms': 2,
+ 'cpu-time-ms': 0
+ }
+
+ }
+
+ def get_args(self, requestline):
+ (_, request, _) = requestline.split(" ")
+ (_, request) = request.split("?", 1)
+ args = urlparse.parse_qs(request)
+ return args
+
+ def setUp(self):
+ HTTPretty.enable()
+ HTTPretty.register_uri(HTTPretty.GET, FULL_URL,
+ body=json.dumps(self.response),
+ content_type="text/xml")
+
+ def tearDown(self):
+ HTTPretty.disable()
+
+ def test_cloudsearch_qsearch(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test')
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['q'], ["Test"])
+ self.assertEqual(args['start'], ["0"])
+ self.assertEqual(args['size'], ["10"])
+
+ def test_cloudsearch_bqsearch(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(bq="'Test'")
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['bq'], ["'Test'"])
+
+ def test_cloudsearch_search_details(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', size=50, start=20)
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['q'], ["Test"])
+ self.assertEqual(args['size'], ["50"])
+ self.assertEqual(args['start'], ["20"])
+
+ def test_cloudsearch_facet_single(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', facet=["Author"])
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['facet'], ["Author"])
+
+ def test_cloudsearch_facet_multiple(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', facet=["author", "cat"])
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['facet'], ["author,cat"])
+
+ def test_cloudsearch_facet_constraint_single(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(
+ q='Test',
+ facet_constraints={'author': "'John Smith','Mark Smith'"})
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['facet-author-constraints'],
+ ["'John Smith','Mark Smith'"])
+
+ def test_cloudsearch_facet_constraint_multiple(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(
+ q='Test',
+ facet_constraints={'author': "'John Smith','Mark Smith'",
+ 'category': "'News','Reviews'"})
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['facet-author-constraints'],
+ ["'John Smith','Mark Smith'"])
+ self.assertEqual(args['facet-category-constraints'],
+ ["'News','Reviews'"])
+
+ def test_cloudsearch_facet_sort_single(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', facet_sort={'author': 'alpha'})
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['facet-author-sort'], ['alpha'])
+
+ def test_cloudsearch_facet_sort_multiple(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', facet_sort={'author': 'alpha',
+ 'cat': 'count'})
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['facet-author-sort'], ['alpha'])
+ self.assertEqual(args['facet-cat-sort'], ['count'])
+
+ def test_cloudsearch_top_n_single(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', facet_top_n={'author': 5})
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['facet-author-top-n'], ['5'])
+
+ def test_cloudsearch_top_n_multiple(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', facet_top_n={'author': 5, 'cat': 10})
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['facet-author-top-n'], ['5'])
+ self.assertEqual(args['facet-cat-top-n'], ['10'])
+
+ def test_cloudsearch_rank_single(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', rank=["date"])
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['rank'], ['date'])
+
+ def test_cloudsearch_rank_multiple(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', rank=["date", "score"])
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['rank'], ['date,score'])
+
+ def test_cloudsearch_result_fields_single(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', return_fields=['author'])
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['return-fields'], ['author'])
+
+ def test_cloudsearch_result_fields_multiple(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', return_fields=['author', 'title'])
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['return-fields'], ['author,title'])
+
+
+ def test_cloudsearch_t_field_single(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', t={'year':'2001..2007'})
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['t-year'], ['2001..2007'])
+
+ def test_cloudsearch_t_field_multiple(self):
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ search.search(q='Test', t={'year':'2001..2007', 'score':'10..50'})
+
+ args = self.get_args(HTTPretty.last_request.raw_requestline)
+
+ self.assertEqual(args['t-year'], ['2001..2007'])
+ self.assertEqual(args['t-score'], ['10..50'])
+
+
+ def test_cloudsearch_results_meta(self):
+ """Check returned metadata is parsed correctly"""
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ results = search.search(q='Test')
+
+ # These rely on the default response which is fed into HTTPretty
+ self.assertEqual(results.rank, "-text_relevance")
+ self.assertEqual(results.match_expression, "Test")
+
+ def test_cloudsearch_results_info(self):
+ """Check num_pages_needed is calculated correctly"""
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ results = search.search(q='Test')
+
+ # This relies on the default response which is fed into HTTPretty
+ self.assertEqual(results.num_pages_needed, 3.0)
+
+ def test_cloudsearch_results_matched(self):
+ """
+ Check that information objects are passed back through the API
+ correctly.
+ """
+ search = SearchConnection(endpoint=HOSTNAME)
+ query = search.build_query(q='Test')
+
+ results = search(query)
+
+ self.assertEqual(results.search_service, search)
+ self.assertEqual(results.query, query)
+
+ def test_cloudsearch_results_hits(self):
+ """Check that documents are parsed properly from AWS"""
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ results = search.search(q='Test')
+
+ hits = map(lambda x: x['id'], results.docs)
+
+ # This relies on the default response which is fed into HTTPretty
+ self.assertEqual(
+ hits, ["12341", "12342", "12343", "12344",
+ "12345", "12346", "12347"])
+
+ def test_cloudsearch_results_iterator(self):
+ """Check the results iterator"""
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ results = search.search(q='Test')
+ results_correct = iter(["12341", "12342", "12343", "12344",
+ "12345", "12346", "12347"])
+ for x in results:
+ self.assertEqual(x['id'], results_correct.next())
+
+
+ def test_cloudsearch_results_internal_consistancy(self):
+ """Check the documents length matches the iterator details"""
+ search = SearchConnection(endpoint=HOSTNAME)
+
+ results = search.search(q='Test')
+
+ self.assertEqual(len(results), len(results.docs))
+
+ def test_cloudsearch_search_nextpage(self):
+ """Check next page query is correct"""
+ search = SearchConnection(endpoint=HOSTNAME)
+ query1 = search.build_query(q='Test')
+ query2 = search.build_query(q='Test')
+
+ results = search(query2)
+
+ self.assertEqual(results.next_page().query.start,
+ query1.start + query1.size)
+ self.assertEqual(query1.q, query2.q)
diff --git a/tests/unit/ec2/test_connection.py b/tests/unit/ec2/test_connection.py
index 5f28510f..d06288dc 100644
--- a/tests/unit/ec2/test_connection.py
+++ b/tests/unit/ec2/test_connection.py
@@ -476,5 +476,91 @@ class TestCopySnapshot(TestEC2ConnectionBase):
'Version'])
+class TestAccountAttributes(TestEC2ConnectionBase):
+ def default_body(self):
+ return """
+ <DescribeAccountAttributesResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
+ <requestId>6d042e8a-4bc3-43e8-8265-3cbc54753f14</requestId>
+ <accountAttributeSet>
+ <item>
+ <attributeName>vpc-max-security-groups-per-interface</attributeName>
+ <attributeValueSet>
+ <item>
+ <attributeValue>5</attributeValue>
+ </item>
+ </attributeValueSet>
+ </item>
+ <item>
+ <attributeName>max-instances</attributeName>
+ <attributeValueSet>
+ <item>
+ <attributeValue>50</attributeValue>
+ </item>
+ </attributeValueSet>
+ </item>
+ <item>
+ <attributeName>supported-platforms</attributeName>
+ <attributeValueSet>
+ <item>
+ <attributeValue>EC2</attributeValue>
+ </item>
+ <item>
+ <attributeValue>VPC</attributeValue>
+ </item>
+ </attributeValueSet>
+ </item>
+ <item>
+ <attributeName>default-vpc</attributeName>
+ <attributeValueSet>
+ <item>
+ <attributeValue>none</attributeValue>
+ </item>
+ </attributeValueSet>
+ </item>
+ </accountAttributeSet>
+ </DescribeAccountAttributesResponse>
+ """
+
+ def test_describe_account_attributes(self):
+ self.set_http_response(status_code=200)
+ parsed = self.ec2.describe_account_attributes()
+ self.assertEqual(len(parsed), 4)
+ self.assertEqual(parsed[0].attribute_name,
+ 'vpc-max-security-groups-per-interface')
+ self.assertEqual(parsed[0].attribute_values,
+ ['5'])
+ self.assertEqual(parsed[-1].attribute_name,
+ 'default-vpc')
+ self.assertEqual(parsed[-1].attribute_values,
+ ['none'])
+
+
+class TestDescribeVPCAttribute(TestEC2ConnectionBase):
+ def default_body(self):
+ return """
+ <DescribeVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
+ <requestId>request_id</requestId>
+ <vpcId>vpc-id</vpcId>
+ <enableDnsHostnames>
+ <value>false</value>
+ </enableDnsHostnames>
+ </DescribeVpcAttributeResponse>
+ """
+
+ def test_describe_vpc_attribute(self):
+ self.set_http_response(status_code=200)
+ parsed = self.ec2.describe_vpc_attribute('vpc-id',
+ 'enableDnsHostnames')
+ self.assertEqual(parsed.vpc_id, 'vpc-id')
+ self.assertFalse(parsed.enable_dns_hostnames)
+ self.assert_request_parameters({
+ 'Action': 'DescribeVpcAttribute',
+ 'VpcId': 'vpc-id',
+ 'Attribute': 'enableDnsHostnames',},
+ ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
+ 'SignatureVersion', 'Timestamp',
+ 'Version'])
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/glacier/test_concurrent.py b/tests/unit/glacier/test_concurrent.py
index 87a46a7b..b9f984e1 100644
--- a/tests/unit/glacier/test_concurrent.py
+++ b/tests/unit/glacier/test_concurrent.py
@@ -20,6 +20,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
+import tempfile
from Queue import Queue
import mock
@@ -27,6 +28,8 @@ from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.glacier.concurrent import ConcurrentUploader, ConcurrentDownloader
+from boto.glacier.concurrent import UploadWorkerThread
+from boto.glacier.concurrent import _END_SENTINEL
class FakeThreadedConcurrentUploader(ConcurrentUploader):
@@ -40,6 +43,7 @@ class FakeThreadedConcurrentUploader(ConcurrentUploader):
for i in xrange(total_parts):
hash_chunks[i] = 'foo'
+
class FakeThreadedConcurrentDownloader(ConcurrentDownloader):
def _start_download_threads(self, results_queue, worker_queue):
self.results_queue = results_queue
@@ -116,5 +120,57 @@ class TestConcurrentUploader(unittest.TestCase):
self.assertEqual(len(items), 12)
+class TestUploaderThread(unittest.TestCase):
+ def setUp(self):
+ self.fileobj = tempfile.NamedTemporaryFile()
+ self.filename = self.fileobj.name
+
+ def test_fileobj_closed_when_thread_shuts_down(self):
+ thread = UploadWorkerThread(mock.Mock(), 'vault_name',
+ self.filename, 'upload_id',
+ Queue(), Queue())
+ fileobj = thread._fileobj
+ self.assertFalse(fileobj.closed)
+ # By settings should_continue to False, it should immediately
+ # exit, and we can still verify cleanup behavior.
+ thread.should_continue = False
+ thread.run()
+ self.assertTrue(fileobj.closed)
+
+ def test_upload_errors_have_exception_messages(self):
+ api = mock.Mock()
+ job_queue = Queue()
+ result_queue = Queue()
+ upload_thread = UploadWorkerThread(
+ api, 'vault_name', self.filename,
+ 'upload_id', job_queue, result_queue, num_retries=1,
+ time_between_retries=0)
+ api.upload_part.side_effect = Exception("exception message")
+ job_queue.put((0, 1024))
+ job_queue.put(_END_SENTINEL)
+
+ upload_thread.run()
+ result = result_queue.get(timeout=1)
+ self.assertIn("exception message", str(result))
+
+ def test_num_retries_is_obeyed(self):
+ # total attempts is 1 + num_retries so if I have num_retries of 2,
+ # I'll attempt the upload once, and if that fails I'll retry up to
+ # 2 more times for a total of 3 attempts.
+ api = mock.Mock()
+ job_queue = Queue()
+ result_queue = Queue()
+ upload_thread = UploadWorkerThread(
+ api, 'vault_name', self.filename,
+ 'upload_id', job_queue, result_queue, num_retries=2,
+ time_between_retries=0)
+ api.upload_part.side_effect = Exception()
+ job_queue.put((0, 1024))
+ job_queue.put(_END_SENTINEL)
+
+ upload_thread.run()
+ self.assertEqual(api.upload_part.call_count, 3)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/glacier/test_vault.py b/tests/unit/glacier/test_vault.py
index bb4605a8..f61d5874 100644
--- a/tests/unit/glacier/test_vault.py
+++ b/tests/unit/glacier/test_vault.py
@@ -95,6 +95,16 @@ class TestConcurrentUploads(unittest.TestCase):
'my description')
self.assertEqual(archive_id, 'archive_id')
+ def test_concurrent_upload_forwards_kwargs(self):
+ v = vault.Vault(None, None)
+ with mock.patch('boto.glacier.vault.ConcurrentUploader') as c:
+ c.return_value.upload.return_value = 'archive_id'
+ archive_id = v.concurrent_create_archive_from_file(
+ 'filename', 'my description', num_threads=10,
+ part_size=1024 * 1024 * 1024 * 8)
+ c.assert_called_with(None, None, num_threads=10,
+ part_size=1024 * 1024 * 1024 * 8)
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/provider/test_provider.py b/tests/unit/provider/test_provider.py
index edece979..cbeea4a9 100644
--- a/tests/unit/provider/test_provider.py
+++ b/tests/unit/provider/test_provider.py
@@ -7,6 +7,19 @@ import mock
from boto import provider
+INSTANCE_CONFIG = {
+ 'allowall': {
+ u'AccessKeyId': u'iam_access_key',
+ u'Code': u'Success',
+ u'Expiration': u'2012-09-01T03:57:34Z',
+ u'LastUpdated': u'2012-08-31T21:43:40Z',
+ u'SecretAccessKey': u'iam_secret_key',
+ u'Token': u'iam_token',
+ u'Type': u'AWS-HMAC'
+ }
+}
+
+
class TestProvider(unittest.TestCase):
def setUp(self):
self.environ = {}
@@ -112,24 +125,14 @@ class TestProvider(unittest.TestCase):
self.assertIsNone(p.security_token)
def test_metadata_server_credentials(self):
- instance_config = {
- 'iam': {
- 'security-credentials': {
- 'allowall': {u'AccessKeyId': u'iam_access_key',
- u'Code': u'Success',
- u'Expiration': u'2012-09-01T03:57:34Z',
- u'LastUpdated': u'2012-08-31T21:43:40Z',
- u'SecretAccessKey': u'iam_secret_key',
- u'Token': u'iam_token',
- u'Type': u'AWS-HMAC'}
- }
- }
- }
- self.get_instance_metadata.return_value = instance_config
+ self.get_instance_metadata.return_value = INSTANCE_CONFIG
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'iam_access_key')
self.assertEqual(p.secret_key, 'iam_secret_key')
self.assertEqual(p.security_token, 'iam_token')
+ self.assertEqual(
+ self.get_instance_metadata.call_args[1]['data'],
+ 'meta-data/iam/security-credentials')
def test_refresh_credentials(self):
now = datetime.now()
@@ -144,13 +147,7 @@ class TestProvider(unittest.TestCase):
u'Token': u'first_token',
u'Type': u'AWS-HMAC'
}
- instance_config = {
- 'iam': {
- 'security-credentials': {
- 'allowall': credentials
- }
- }
- }
+ instance_config = {'allowall': credentials}
self.get_instance_metadata.return_value = instance_config
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'first_access_key')
@@ -171,6 +168,20 @@ class TestProvider(unittest.TestCase):
self.assertEqual(p.secret_key, 'second_secret_key')
self.assertEqual(p.security_token, 'second_token')
+ @mock.patch('boto.provider.config.getint')
+ @mock.patch('boto.provider.config.getfloat')
+ def test_metadata_config_params(self, config_float, config_int):
+ config_int.return_value = 10
+ config_float.return_value = 4.0
+ self.get_instance_metadata.return_value = INSTANCE_CONFIG
+ p = provider.Provider('aws')
+ self.assertEqual(p.access_key, 'iam_access_key')
+ self.assertEqual(p.secret_key, 'iam_secret_key')
+ self.assertEqual(p.security_token, 'iam_token')
+ self.get_instance_metadata.assert_called_with(
+ timeout=4.0, num_retries=10,
+ data='meta-data/iam/security-credentials')
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/s3/test_bucket.py b/tests/unit/s3/test_bucket.py
new file mode 100644
index 00000000..de7e27cc
--- /dev/null
+++ b/tests/unit/s3/test_bucket.py
@@ -0,0 +1,48 @@
+from tests.unit import unittest
+from tests.unit import AWSMockServiceTestCase
+
+from boto.s3.connection import S3Connection
+from boto.s3.bucket import Bucket
+
+class TestS3Bucket(AWSMockServiceTestCase):
+ connection_class = S3Connection
+
+ def setUp(self):
+ super(TestS3Bucket, self).setUp()
+
+ def test_bucket_create_bucket(self):
+ self.set_http_response(status_code=200)
+ bucket = self.service_connection.create_bucket('mybucket_create')
+ self.assertEqual(bucket.name, 'mybucket_create')
+
+ def test_bucket_constructor(self):
+ self.set_http_response(status_code=200)
+ bucket = Bucket(self.service_connection, 'mybucket_constructor')
+ self.assertEqual(bucket.name, 'mybucket_constructor')
+
+ def test_bucket_basics(self):
+ self.set_http_response(status_code=200)
+ bucket = self.service_connection.create_bucket('mybucket')
+ self.assertEqual(bucket.__repr__(), '<Bucket: mybucket>')
+
+ def test_bucket_new_key(self):
+ self.set_http_response(status_code=200)
+ bucket = self.service_connection.create_bucket('mybucket')
+ key = bucket.new_key('mykey')
+
+ self.assertEqual(key.bucket, bucket)
+ self.assertEqual(key.key, 'mykey')
+
+ def test_bucket_new_key_missing_name(self):
+ self.set_http_response(status_code=200)
+ bucket = self.service_connection.create_bucket('mybucket')
+
+ with self.assertRaises(ValueError):
+ key = bucket.new_key('')
+
+ def test_bucket_delete_key_missing_name(self):
+ self.set_http_response(status_code=200)
+ bucket = self.service_connection.create_bucket('mybucket')
+
+ with self.assertRaises(ValueError):
+ key = bucket.delete_key('')
diff --git a/tests/unit/s3/test_keyfile.py b/tests/unit/s3/test_keyfile.py
index fcb9871d..bf90664a 100644
--- a/tests/unit/s3/test_keyfile.py
+++ b/tests/unit/s3/test_keyfile.py
@@ -57,7 +57,7 @@ class KeyfileTest(unittest.TestCase):
try:
self.keyfile.tell()
except ValueError, e:
- self.assertEqual(e.message, 'I/O operation on closed file')
+ self.assertEqual(str(e), 'I/O operation on closed file')
def testSeek(self):
self.assertEqual(self.keyfile.read(4), self.contents[:4])
@@ -70,7 +70,7 @@ class KeyfileTest(unittest.TestCase):
try:
self.keyfile.seek(-5)
except IOError, e:
- self.assertEqual(e.message, 'Invalid argument')
+ self.assertEqual(str(e), 'Invalid argument')
# Reading past end of file is supposed to return empty string.
self.keyfile.read(10)
@@ -92,7 +92,7 @@ class KeyfileTest(unittest.TestCase):
try:
self.keyfile.seek(-100, os.SEEK_END)
except IOError, e:
- self.assertEqual(e.message, 'Invalid argument')
+ self.assertEqual(str(e), 'Invalid argument')
def testSeekCur(self):
self.assertEqual(self.keyfile.read(1), self.contents[0])
diff --git a/tests/unit/s3/test_uri.py b/tests/unit/s3/test_uri.py
new file mode 100644
index 00000000..ab682191
--- /dev/null
+++ b/tests/unit/s3/test_uri.py
@@ -0,0 +1,257 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 Google, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import boto
+import tempfile
+import urllib
+from boto.exception import InvalidUriError
+from boto import storage_uri
+from boto.s3.keyfile import KeyFile
+from tests.integration.s3.mock_storage_service import MockBucket
+from tests.integration.s3.mock_storage_service import MockBucketStorageUri
+from tests.integration.s3.mock_storage_service import MockConnection
+from tests.unit import unittest
+
+"""Unit tests for StorageUri interface."""
+
+class UriTest(unittest.TestCase):
+
+ def test_provider_uri(self):
+ for prov in ('gs', 's3'):
+ uri_str = '%s://' % prov
+ uri = boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ self.assertEqual(prov, uri.scheme)
+ self.assertEqual(uri_str, uri.uri)
+ self.assertFalse(hasattr(uri, 'versionless_uri'))
+ self.assertEqual('', uri.bucket_name)
+ self.assertEqual('', uri.object_name)
+ self.assertEqual(None, uri.version_id)
+ self.assertEqual(None, uri.generation)
+ self.assertEqual(uri.names_provider(), True)
+ self.assertEqual(uri.names_container(), True)
+ self.assertEqual(uri.names_bucket(), False)
+ self.assertEqual(uri.names_object(), False)
+ self.assertEqual(uri.names_directory(), False)
+ self.assertEqual(uri.names_file(), False)
+ self.assertEqual(uri.is_stream(), False)
+ self.assertEqual(uri.is_version_specific, False)
+
+ def test_bucket_uri_no_trailing_slash(self):
+ for prov in ('gs', 's3'):
+ uri_str = '%s://bucket' % prov
+ uri = boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ self.assertEqual(prov, uri.scheme)
+ self.assertEqual('%s/' % uri_str, uri.uri)
+ self.assertFalse(hasattr(uri, 'versionless_uri'))
+ self.assertEqual('bucket', uri.bucket_name)
+ self.assertEqual('', uri.object_name)
+ self.assertEqual(None, uri.version_id)
+ self.assertEqual(None, uri.generation)
+ self.assertEqual(uri.names_provider(), False)
+ self.assertEqual(uri.names_container(), True)
+ self.assertEqual(uri.names_bucket(), True)
+ self.assertEqual(uri.names_object(), False)
+ self.assertEqual(uri.names_directory(), False)
+ self.assertEqual(uri.names_file(), False)
+ self.assertEqual(uri.is_stream(), False)
+ self.assertEqual(uri.is_version_specific, False)
+
+ def test_bucket_uri_with_trailing_slash(self):
+ for prov in ('gs', 's3'):
+ uri_str = '%s://bucket/' % prov
+ uri = boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ self.assertEqual(prov, uri.scheme)
+ self.assertEqual(uri_str, uri.uri)
+ self.assertFalse(hasattr(uri, 'versionless_uri'))
+ self.assertEqual('bucket', uri.bucket_name)
+ self.assertEqual('', uri.object_name)
+ self.assertEqual(None, uri.version_id)
+ self.assertEqual(None, uri.generation)
+ self.assertEqual(uri.names_provider(), False)
+ self.assertEqual(uri.names_container(), True)
+ self.assertEqual(uri.names_bucket(), True)
+ self.assertEqual(uri.names_object(), False)
+ self.assertEqual(uri.names_directory(), False)
+ self.assertEqual(uri.names_file(), False)
+ self.assertEqual(uri.is_stream(), False)
+ self.assertEqual(uri.is_version_specific, False)
+
+ def test_non_versioned_object_uri(self):
+ for prov in ('gs', 's3'):
+ uri_str = '%s://bucket/obj/a/b' % prov
+ uri = boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ self.assertEqual(prov, uri.scheme)
+ self.assertEqual(uri_str, uri.uri)
+ self.assertEqual(uri_str, uri.versionless_uri)
+ self.assertEqual('bucket', uri.bucket_name)
+ self.assertEqual('obj/a/b', uri.object_name)
+ self.assertEqual(None, uri.version_id)
+ self.assertEqual(None, uri.generation)
+ self.assertEqual(uri.names_provider(), False)
+ self.assertEqual(uri.names_container(), False)
+ self.assertEqual(uri.names_bucket(), False)
+ self.assertEqual(uri.names_object(), True)
+ self.assertEqual(uri.names_directory(), False)
+ self.assertEqual(uri.names_file(), False)
+ self.assertEqual(uri.is_stream(), False)
+ self.assertEqual(uri.is_version_specific, False)
+
+ def test_versioned_gs_object_uri(self):
+ uri_str = 'gs://bucket/obj/a/b#1359908801674000'
+ uri = boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ self.assertEqual('gs', uri.scheme)
+ self.assertEqual(uri_str, uri.uri)
+ self.assertEqual('gs://bucket/obj/a/b', uri.versionless_uri)
+ self.assertEqual('bucket', uri.bucket_name)
+ self.assertEqual('obj/a/b', uri.object_name)
+ self.assertEqual(None, uri.version_id)
+ self.assertEqual(1359908801674000, uri.generation)
+ self.assertEqual(uri.names_provider(), False)
+ self.assertEqual(uri.names_container(), False)
+ self.assertEqual(uri.names_bucket(), False)
+ self.assertEqual(uri.names_object(), True)
+ self.assertEqual(uri.names_directory(), False)
+ self.assertEqual(uri.names_file(), False)
+ self.assertEqual(uri.is_stream(), False)
+ self.assertEqual(uri.is_version_specific, True)
+
+ def test_versioned_gs_object_uri_with_legacy_generation_value(self):
+ uri_str = 'gs://bucket/obj/a/b#1'
+ uri = boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ self.assertEqual('gs', uri.scheme)
+ self.assertEqual(uri_str, uri.uri)
+ self.assertEqual('gs://bucket/obj/a/b', uri.versionless_uri)
+ self.assertEqual('bucket', uri.bucket_name)
+ self.assertEqual('obj/a/b', uri.object_name)
+ self.assertEqual(None, uri.version_id)
+ self.assertEqual(1, uri.generation)
+ self.assertEqual(uri.names_provider(), False)
+ self.assertEqual(uri.names_container(), False)
+ self.assertEqual(uri.names_bucket(), False)
+ self.assertEqual(uri.names_object(), True)
+ self.assertEqual(uri.names_directory(), False)
+ self.assertEqual(uri.names_file(), False)
+ self.assertEqual(uri.is_stream(), False)
+ self.assertEqual(uri.is_version_specific, True)
+
+ def test_roundtrip_versioned_gs_object_uri_parsed(self):
+ uri_str = 'gs://bucket/obj#1359908801674000'
+ uri = boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ roundtrip_uri = boto.storage_uri(uri.uri, validate=False,
+ suppress_consec_slashes=False)
+ self.assertEqual(uri.uri, roundtrip_uri.uri)
+ self.assertEqual(uri.is_version_specific, True)
+
+ def test_versioned_s3_object_uri(self):
+ uri_str = 's3://bucket/obj/a/b#eMuM0J15HkJ9QHlktfNP5MfA.oYR2q6S'
+ uri = boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ self.assertEqual('s3', uri.scheme)
+ self.assertEqual(uri_str, uri.uri)
+ self.assertEqual('s3://bucket/obj/a/b', uri.versionless_uri)
+ self.assertEqual('bucket', uri.bucket_name)
+ self.assertEqual('obj/a/b', uri.object_name)
+ self.assertEqual('eMuM0J15HkJ9QHlktfNP5MfA.oYR2q6S', uri.version_id)
+ self.assertEqual(None, uri.generation)
+ self.assertEqual(uri.names_provider(), False)
+ self.assertEqual(uri.names_container(), False)
+ self.assertEqual(uri.names_bucket(), False)
+ self.assertEqual(uri.names_object(), True)
+ self.assertEqual(uri.names_directory(), False)
+ self.assertEqual(uri.names_file(), False)
+ self.assertEqual(uri.is_stream(), False)
+ self.assertEqual(uri.is_version_specific, True)
+
+ def test_explicit_file_uri(self):
+ tmp_dir = tempfile.tempdir
+ uri_str = 'file://%s' % urllib.pathname2url(tmp_dir)
+ uri = boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ self.assertEqual('file', uri.scheme)
+ self.assertEqual(uri_str, uri.uri)
+ self.assertFalse(hasattr(uri, 'versionless_uri'))
+ self.assertEqual('', uri.bucket_name)
+ self.assertEqual(tmp_dir, uri.object_name)
+ self.assertFalse(hasattr(uri, 'version_id'))
+ self.assertFalse(hasattr(uri, 'generation'))
+ self.assertFalse(hasattr(uri, 'is_version_specific'))
+ self.assertEqual(uri.names_provider(), False)
+ self.assertEqual(uri.names_bucket(), False)
+ # Don't check uri.names_container(), uri.names_directory(),
+ # uri.names_file(), or uri.names_object(), because for file URIs these
+ # functions look at the file system and apparently unit tests run
+ # chroot'd.
+ self.assertEqual(uri.is_stream(), False)
+
+ def test_implicit_file_uri(self):
+ tmp_dir = tempfile.tempdir
+ uri_str = '%s' % urllib.pathname2url(tmp_dir)
+ uri = boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ self.assertEqual('file', uri.scheme)
+ self.assertEqual('file://%s' % tmp_dir, uri.uri)
+ self.assertFalse(hasattr(uri, 'versionless_uri'))
+ self.assertEqual('', uri.bucket_name)
+ self.assertEqual(tmp_dir, uri.object_name)
+ self.assertFalse(hasattr(uri, 'version_id'))
+ self.assertFalse(hasattr(uri, 'generation'))
+ self.assertFalse(hasattr(uri, 'is_version_specific'))
+ self.assertEqual(uri.names_provider(), False)
+ self.assertEqual(uri.names_bucket(), False)
+ # Don't check uri.names_container(), uri.names_directory(),
+ # uri.names_file(), or uri.names_object(), because for file URIs these
+ # functions look at the file system and apparently unit tests run
+ # chroot'd.
+ self.assertEqual(uri.is_stream(), False)
+
+ def test_gs_object_uri_contains_sharp_not_matching_version_syntax(self):
+ uri_str = 'gs://bucket/obj#13a990880167400'
+ uri = boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ self.assertEqual('gs', uri.scheme)
+ self.assertEqual(uri_str, uri.uri)
+ self.assertEqual('gs://bucket/obj#13a990880167400',
+ uri.versionless_uri)
+ self.assertEqual('bucket', uri.bucket_name)
+ self.assertEqual('obj#13a990880167400', uri.object_name)
+ self.assertEqual(None, uri.version_id)
+ self.assertEqual(None, uri.generation)
+ self.assertEqual(uri.names_provider(), False)
+ self.assertEqual(uri.names_container(), False)
+ self.assertEqual(uri.names_bucket(), False)
+ self.assertEqual(uri.names_object(), True)
+ self.assertEqual(uri.names_directory(), False)
+ self.assertEqual(uri.names_file(), False)
+ self.assertEqual(uri.is_stream(), False)
+ self.assertEqual(uri.is_version_specific, False)
+
+ def test_invalid_scheme(self):
+ uri_str = 'mars://bucket/object'
+ try:
+ boto.storage_uri(uri_str, validate=False,
+ suppress_consec_slashes=False)
+ except InvalidUriError as e:
+ self.assertIn('Unrecognized scheme', e.message)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/unit/s3/test_website.py b/tests/unit/s3/test_website.py
index 22c66825..74c25858 100644
--- a/tests/unit/s3/test_website.py
+++ b/tests/unit/s3/test_website.py
@@ -22,6 +22,7 @@
from tests.unit import unittest
import xml.dom.minidom
+import xml.sax
from boto.s3.website import WebsiteConfiguration
from boto.s3.website import RedirectLocation
@@ -30,6 +31,7 @@ from boto.s3.website import Condition
from boto.s3.website import RoutingRules
from boto.s3.website import RoutingRule
from boto.s3.website import Redirect
+from boto import handler
def pretty_print_xml(text):
@@ -74,7 +76,7 @@ class TestS3WebsiteConfiguration(unittest.TestCase):
xml = config.to_xml()
self.assertIn(
('<RedirectAllRequestsTo><HostName>'
- 'example.com</HostName>\n<Protocol>https</Protocol>'
+ 'example.com</HostName><Protocol>https</Protocol>'
'</RedirectAllRequestsTo>'), xml)
def test_routing_rules_key_prefix(self):
@@ -141,6 +143,34 @@ class TestS3WebsiteConfiguration(unittest.TestCase):
"""
self.assertEqual(x(expected_xml), x(xml))
+ def test_key_prefix(self):
+ x = pretty_print_xml
+ rules = RoutingRules()
+ condition = Condition(key_prefix="images/")
+ redirect = Redirect(replace_key='folderdeleted.html')
+ rules.add_rule(RoutingRule(condition, redirect))
+ config = WebsiteConfiguration(suffix='index.html', routing_rules=rules)
+ xml = config.to_xml()
+
+ expected_xml = """<?xml version="1.0" encoding="UTF-8"?>
+ <WebsiteConfiguration xmlns='http://s3.amazonaws.com/doc/2006-03-01/'>
+ <IndexDocument>
+ <Suffix>index.html</Suffix>
+ </IndexDocument>
+ <RoutingRules>
+ <RoutingRule>
+ <Condition>
+ <KeyPrefixEquals>images/</KeyPrefixEquals>
+ </Condition>
+ <Redirect>
+ <ReplaceKeyWith>folderdeleted.html</ReplaceKeyWith>
+ </Redirect>
+ </RoutingRule>
+ </RoutingRules>
+ </WebsiteConfiguration>
+ """
+ self.assertEqual(x(expected_xml), x(xml))
+
def test_builders(self):
x = pretty_print_xml
# This is a more declarative way to create rules.
@@ -158,3 +188,43 @@ class TestS3WebsiteConfiguration(unittest.TestCase):
hostname='example.com', replace_key_prefix='report-404/'))
xml2 = rules2.to_xml()
self.assertEqual(x(xml), x(xml2))
+
+ def test_parse_xml(self):
+ x = pretty_print_xml
+ xml_in = """<?xml version="1.0" encoding="UTF-8"?>
+ <WebsiteConfiguration xmlns='http://s3.amazonaws.com/doc/2006-03-01/'>
+ <IndexDocument>
+ <Suffix>index.html</Suffix>
+ </IndexDocument>
+ <ErrorDocument>
+ <Key>error.html</Key>
+ </ErrorDocument>
+ <RoutingRules>
+ <RoutingRule>
+ <Condition>
+ <KeyPrefixEquals>docs/</KeyPrefixEquals>
+ </Condition>
+ <Redirect>
+ <Protocol>https</Protocol>
+ <HostName>www.example.com</HostName>
+ <ReplaceKeyWith>documents/</ReplaceKeyWith>
+ <HttpRedirectCode>302</HttpRedirectCode>
+ </Redirect>
+ </RoutingRule>
+ <RoutingRule>
+ <Condition>
+ <HttpErrorCodeReturnedEquals>404</HttpErrorCodeReturnedEquals>
+ </Condition>
+ <Redirect>
+ <HostName>example.com</HostName>
+ <ReplaceKeyPrefixWith>report-404/</ReplaceKeyPrefixWith>
+ </Redirect>
+ </RoutingRule>
+ </RoutingRules>
+ </WebsiteConfiguration>
+ """
+ webconfig = WebsiteConfiguration()
+ h = handler.XmlHandler(webconfig, None)
+ xml.sax.parseString(xml_in, h)
+ xml_out = webconfig.to_xml()
+ self.assertEqual(x(xml_in), x(xml_out))
diff --git a/tests/unit/test_connection.py b/tests/unit/test_connection.py
index 213f5db9..d2c3e2aa 100644
--- a/tests/unit/test_connection.py
+++ b/tests/unit/test_connection.py
@@ -19,8 +19,14 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
+import urlparse
from tests.unit import unittest
+from httpretty import HTTPretty
+
from boto.connection import AWSQueryConnection
+from boto.exception import BotoServerError
+from boto.regioninfo import RegionInfo
+from boto.compat import json
class TestListParamsSerialization(unittest.TestCase):
@@ -56,5 +62,205 @@ class TestListParamsSerialization(unittest.TestCase):
}, params)
+class MockAWSService(AWSQueryConnection):
+ """
+ Fake AWS Service
+
+ This is used to test the AWSQueryConnection object is behaving properly.
+ """
+
+ APIVersion = '2012-01-01'
+ def _required_auth_capability(self):
+ return ['sign-v2']
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, host=None, port=None,
+ proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, region=None, path='/',
+ api_version=None, security_token=None,
+ validate_certs=True):
+ self.region = region
+ AWSQueryConnection.__init__(self, aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ proxy_user, proxy_pass,
+ self.region.endpoint, debug,
+ https_connection_factory, path,
+ security_token,
+ validate_certs=validate_certs)
+
+class TestAWSQueryConnection(unittest.TestCase):
+ def setUp(self):
+ self.region = RegionInfo(name='cc-zone-1',
+ endpoint='mockservice.cc-zone-1.amazonaws.com',
+ connection_cls=MockAWSService)
+
+ HTTPretty.enable()
+
+ def tearDown(self):
+ HTTPretty.disable()
+
+class TestAWSQueryConnectionSimple(TestAWSQueryConnection):
+ def test_query_connection_basis(self):
+ HTTPretty.register_uri(HTTPretty.POST,
+ 'https://%s/' % self.region.endpoint,
+ json.dumps({'test': 'secure'}),
+ content_type='application/json')
+
+ conn = self.region.connect(aws_access_key_id='access_key',
+ aws_secret_access_key='secret')
+
+ self.assertEqual(conn.host, 'mockservice.cc-zone-1.amazonaws.com')
+
+ def test_single_command(self):
+ HTTPretty.register_uri(HTTPretty.POST,
+ 'https://%s/' % self.region.endpoint,
+ json.dumps({'test': 'secure'}),
+ content_type='application/json')
+
+ conn = self.region.connect(aws_access_key_id='access_key',
+ aws_secret_access_key='secret')
+ resp = conn.make_request('myCmd',
+ {'par1': 'foo', 'par2': 'baz'},
+ "/",
+ "POST")
+
+ args = urlparse.parse_qs(HTTPretty.last_request.body)
+ self.assertEqual(args['AWSAccessKeyId'], ['access_key'])
+ self.assertEqual(args['SignatureMethod'], ['HmacSHA256'])
+ self.assertEqual(args['Version'], [conn.APIVersion])
+ self.assertEqual(args['par1'], ['foo'])
+ self.assertEqual(args['par2'], ['baz'])
+
+ self.assertEqual(resp.read(), '{"test": "secure"}')
+
+ def test_multi_commands(self):
+ """Check connection re-use"""
+ HTTPretty.register_uri(HTTPretty.POST,
+ 'https://%s/' % self.region.endpoint,
+ json.dumps({'test': 'secure'}),
+ content_type='application/json')
+
+ conn = self.region.connect(aws_access_key_id='access_key',
+ aws_secret_access_key='secret')
+
+ resp1 = conn.make_request('myCmd1',
+ {'par1': 'foo', 'par2': 'baz'},
+ "/",
+ "POST")
+ body1 = urlparse.parse_qs(HTTPretty.last_request.body)
+
+ resp2 = conn.make_request('myCmd2',
+ {'par3': 'bar', 'par4': 'narf'},
+ "/",
+ "POST")
+ body2 = urlparse.parse_qs(HTTPretty.last_request.body)
+
+ self.assertEqual(body1['par1'], ['foo'])
+ self.assertEqual(body1['par2'], ['baz'])
+ with self.assertRaises(KeyError):
+ body1['par3']
+
+ self.assertEqual(body2['par3'], ['bar'])
+ self.assertEqual(body2['par4'], ['narf'])
+ with self.assertRaises(KeyError):
+ body2['par1']
+
+ self.assertEqual(resp1.read(), '{"test": "secure"}')
+ self.assertEqual(resp2.read(), '{"test": "secure"}')
+
+ def test_non_secure(self):
+ HTTPretty.register_uri(HTTPretty.POST,
+ 'http://%s/' % self.region.endpoint,
+ json.dumps({'test': 'normal'}),
+ content_type='application/json')
+
+ conn = self.region.connect(aws_access_key_id='access_key',
+ aws_secret_access_key='secret',
+ is_secure=False)
+ resp = conn.make_request('myCmd1',
+ {'par1': 'foo', 'par2': 'baz'},
+ "/",
+ "POST")
+
+ self.assertEqual(resp.read(), '{"test": "normal"}')
+
+ def test_alternate_port(self):
+ HTTPretty.register_uri(HTTPretty.POST,
+ 'http://%s:8080/' % self.region.endpoint,
+ json.dumps({'test': 'alternate'}),
+ content_type='application/json')
+
+ conn = self.region.connect(aws_access_key_id='access_key',
+ aws_secret_access_key='secret',
+ port=8080,
+ is_secure=False)
+ resp = conn.make_request('myCmd1',
+ {'par1': 'foo', 'par2': 'baz'},
+ "/",
+ "POST")
+
+ self.assertEqual(resp.read(), '{"test": "alternate"}')
+
+ def test_temp_failure(self):
+ responses = [HTTPretty.Response(body="{'test': 'fail'}", status=500),
+ HTTPretty.Response(body="{'test': 'success'}", status=200)]
+
+ HTTPretty.register_uri(HTTPretty.POST,
+ 'https://%s/temp_fail/' % self.region.endpoint,
+ responses=responses)
+
+ conn = self.region.connect(aws_access_key_id='access_key',
+ aws_secret_access_key='secret')
+ resp = conn.make_request('myCmd1',
+ {'par1': 'foo', 'par2': 'baz'},
+ '/temp_fail/',
+ 'POST')
+ self.assertEqual(resp.read(), "{'test': 'success'}")
+
+class TestAWSQueryStatus(TestAWSQueryConnection):
+
+ def test_get_status(self):
+ HTTPretty.register_uri(HTTPretty.GET,
+ 'https://%s/status' % self.region.endpoint,
+ '<status>ok</status>',
+ content_type='text/xml')
+
+ conn = self.region.connect(aws_access_key_id='access_key',
+ aws_secret_access_key='secret')
+ resp = conn.get_status('getStatus',
+ {'par1': 'foo', 'par2': 'baz'},
+ 'status')
+
+ self.assertEqual(resp, "ok")
+
+ def test_get_status_blank_error(self):
+ HTTPretty.register_uri(HTTPretty.GET,
+ 'https://%s/status' % self.region.endpoint,
+ '',
+ content_type='text/xml')
+
+ conn = self.region.connect(aws_access_key_id='access_key',
+ aws_secret_access_key='secret')
+ with self.assertRaises(BotoServerError):
+ resp = conn.get_status('getStatus',
+ {'par1': 'foo', 'par2': 'baz'},
+ 'status')
+
+ def test_get_status_error(self):
+ HTTPretty.register_uri(HTTPretty.GET,
+ 'https://%s/status' % self.region.endpoint,
+ '<status>error</status>',
+ content_type='text/xml',
+ status=400)
+
+ conn = self.region.connect(aws_access_key_id='access_key',
+ aws_secret_access_key='secret')
+ with self.assertRaises(BotoServerError):
+ resp = conn.get_status('getStatus',
+ {'par1': 'foo', 'par2': 'baz'},
+ 'status')
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/test_exception.py b/tests/unit/test_exception.py
new file mode 100644
index 00000000..684ca0ce
--- /dev/null
+++ b/tests/unit/test_exception.py
@@ -0,0 +1,78 @@
+from tests.unit import unittest
+
+from boto.exception import BotoServerError
+
+from httpretty import HTTPretty, httprettified
+
+class TestBotoServerError(unittest.TestCase):
+
+ def test_botoservererror_basics(self):
+ bse = BotoServerError('400', 'Bad Request')
+ self.assertEqual(bse.status, '400')
+ self.assertEqual(bse.reason, 'Bad Request')
+
+ def test_message_elb_xml(self):
+ # This test XML response comes from #509
+ xml = """
+<ErrorResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2011-11-15/">
+ <Error>
+ <Type>Sender</Type>
+ <Code>LoadBalancerNotFound</Code>
+ <Message>Cannot find Load Balancer webapp-balancer2</Message>
+ </Error>
+ <RequestId>093f80d0-4473-11e1-9234-edce8ec08e2d</RequestId>
+</ErrorResponse>"""
+ bse = BotoServerError('400', 'Bad Request', body=xml)
+
+ self.assertEqual(bse.error_message, 'Cannot find Load Balancer webapp-balancer2')
+ self.assertEqual(bse.request_id, '093f80d0-4473-11e1-9234-edce8ec08e2d')
+ self.assertEqual(bse.error_code, 'LoadBalancerNotFound')
+ self.assertEqual(bse.status, '400')
+ self.assertEqual(bse.reason, 'Bad Request')
+
+ def test_message_sd_xml(self):
+ # Sample XML response from: https://forums.aws.amazon.com/thread.jspa?threadID=87393
+ xml = """
+<Response>
+ <Errors>
+ <Error>
+ <Code>AuthorizationFailure</Code>
+ <Message>Session does not have permission to perform (sdb:CreateDomain) on resource (arn:aws:sdb:us-east-1:xxxxxxx:domain/test_domain). Contact account owner.</Message>
+ <BoxUsage>0.0055590278</BoxUsage>
+ </Error>
+ </Errors>
+ <RequestID>e73bb2bb-63e3-9cdc-f220-6332de66dbbe</RequestID>
+</Response>"""
+ bse = BotoServerError('403', 'Forbidden', body=xml)
+ self.assertEqual(bse.error_message,
+ 'Session does not have permission to perform (sdb:CreateDomain) on '
+ 'resource (arn:aws:sdb:us-east-1:xxxxxxx:domain/test_domain). '
+ 'Contact account owner.')
+ self.assertEqual(bse.box_usage, '0.0055590278')
+ self.assertEqual(bse.error_code, 'AuthorizationFailure')
+ self.assertEqual(bse.status, '403')
+ self.assertEqual(bse.reason, 'Forbidden')
+
+ @httprettified
+ def test_xmlns_not_loaded(self):
+ xml = '<ErrorResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2011-11-15/">'
+ bse = BotoServerError('403', 'Forbidden', body=xml)
+ self.assertEqual([], HTTPretty.latest_requests)
+
+ @httprettified
+ def test_xml_entity_not_loaded(self):
+ xml = '<!DOCTYPE Message [<!ENTITY xxe SYSTEM "http://aws.amazon.com/">]><Message>error:&xxe;</Message>'
+ bse = BotoServerError('403', 'Forbidden', body=xml)
+ self.assertEqual([], HTTPretty.latest_requests)
+
+ def test_message_not_xml(self):
+ body = 'This is not XML'
+
+ bse = BotoServerError('400', 'Bad Request', body=body)
+ self.assertEqual(bse.error_message, 'This is not XML')
+
+ def test_getters(self):
+ body = "This is the body"
+
+ bse = BotoServerError('400', 'Bad Request', body=body)
+ self.assertEqual(bse.code, bse.error_code)
diff --git a/tests/unit/utils/__init__.py b/tests/unit/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/utils/__init__.py
diff --git a/tests/unit/utils/test_utils.py b/tests/unit/utils/test_utils.py
index 205d3d87..abb85353 100644
--- a/tests/unit/utils/test_utils.py
+++ b/tests/unit/utils/test_utils.py
@@ -23,8 +23,12 @@ import unittest
import hashlib
import hmac
+import mock
+
from boto.utils import Password
from boto.utils import pythonize_name
+from boto.utils import _build_instance_metadata_url
+from boto.utils import retry_url
class TestPassword(unittest.TestCase):
@@ -105,5 +109,82 @@ class TestPythonizeName(unittest.TestCase):
self.assertEqual(pythonize_name('HTTPStatus200Ok'), 'http_status_200_ok')
+class TestBuildInstanceMetadataURL(unittest.TestCase):
+ def test_normal(self):
+ # This is the all-defaults case.
+ self.assertEqual(_build_instance_metadata_url(
+ 'http://169.254.169.254',
+ 'latest',
+ 'meta-data'
+ ),
+ 'http://169.254.169.254/latest/meta-data/'
+ )
+
+ def test_custom_path(self):
+ self.assertEqual(_build_instance_metadata_url(
+ 'http://169.254.169.254',
+ 'latest',
+ 'dynamic'
+ ),
+ 'http://169.254.169.254/latest/dynamic/'
+ )
+
+ def test_custom_version(self):
+ self.assertEqual(_build_instance_metadata_url(
+ 'http://169.254.169.254',
+ '1.0',
+ 'meta-data'
+ ),
+ 'http://169.254.169.254/1.0/meta-data/'
+ )
+
+ def test_custom_url(self):
+ self.assertEqual(_build_instance_metadata_url(
+ 'http://10.0.1.5',
+ 'latest',
+ 'meta-data'
+ ),
+ 'http://10.0.1.5/latest/meta-data/'
+ )
+
+ def test_all_custom(self):
+ self.assertEqual(_build_instance_metadata_url(
+ 'http://10.0.1.5',
+ '2013-03-22',
+ 'user-data'
+ ),
+ 'http://10.0.1.5/2013-03-22/user-data/'
+ )
+
+
+class TestRetryURL(unittest.TestCase):
+ def setUp(self):
+ self.urlopen_patch = mock.patch('urllib2.urlopen')
+ self.opener_patch = mock.patch('urllib2.build_opener')
+ self.urlopen = self.urlopen_patch.start()
+ self.opener = self.opener_patch.start()
+
+ def tearDown(self):
+ self.urlopen_patch.stop()
+ self.opener_patch.stop()
+
+ def set_normal_response(self, response):
+ fake_response = mock.Mock()
+ fake_response.read.return_value = response
+ self.urlopen.return_value = fake_response
+
+ def set_no_proxy_allowed_response(self, response):
+ fake_response = mock.Mock()
+ fake_response.read.return_value = response
+ self.opener.return_value.open.return_value = fake_response
+
+ def test_retry_url_uses_proxy(self):
+ self.set_normal_response('normal response')
+ self.set_no_proxy_allowed_response('no proxy response')
+
+ response = retry_url('http://10.10.10.10/foo', num_retries=1)
+ self.assertEqual(response, 'no proxy response')
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/tests/unit/vpc/__init__.py b/tests/unit/vpc/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/unit/vpc/__init__.py
diff --git a/tests/unit/vpc/test_vpc.py b/tests/unit/vpc/test_vpc.py
new file mode 100644
index 00000000..499d1582
--- /dev/null
+++ b/tests/unit/vpc/test_vpc.py
@@ -0,0 +1,40 @@
+# -*- coding: UTF-8 -*-
+from tests.unit import unittest
+from tests.unit import AWSMockServiceTestCase
+
+from boto.vpc import VPCConnection
+
+DESCRIBE_VPCS = r'''<?xml version="1.0" encoding="UTF-8"?>
+<DescribeVpcsResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
+ <requestId>623040d1-b51c-40bc-8080-93486f38d03d</requestId>
+ <vpcSet>
+ <item>
+ <vpcId>vpc-12345678</vpcId>
+ <state>available</state>
+ <cidrBlock>172.16.0.0/16</cidrBlock>
+ <dhcpOptionsId>dopt-12345678</dhcpOptionsId>
+ <instanceTenancy>default</instanceTenancy>
+ <isDefault>false</isDefault>
+ </item>
+ </vpcSet>
+</DescribeVpcsResponse>'''
+
+class TestDescriveVPCs(AWSMockServiceTestCase):
+
+ connection_class = VPCConnection
+
+ def default_body(self):
+ return DESCRIBE_VPCS
+
+ def test_get_vpcs(self):
+ self.set_http_response(status_code=200)
+
+ api_response = self.service_connection.get_all_vpcs()
+ self.assertEqual(len(api_response), 1)
+
+ vpc = api_response[0]
+ self.assertFalse(vpc.is_default)
+ self.assertEqual(vpc.instance_tenancy,'default')
+
+if __name__ == '__main__':
+ unittest.main() \ No newline at end of file
diff --git a/tests/unit/vpc/test_vpnconnection.py b/tests/unit/vpc/test_vpnconnection.py
new file mode 100644
index 00000000..dfce90ff
--- /dev/null
+++ b/tests/unit/vpc/test_vpnconnection.py
@@ -0,0 +1,123 @@
+# -*- coding: UTF-8 -*-
+from tests.unit import unittest
+from tests.unit import AWSMockServiceTestCase
+
+from boto.vpc import VPCConnection
+
+DESCRIBE_VPNCONNECTIONS = r'''<?xml version="1.0" encoding="UTF-8"?>
+<DescribeVpnConnectionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
+ <requestId>12345678-asdf-ghjk-zxcv-0987654321nb</requestId>
+ <vpnConnectionSet>
+ <item>
+ <vpnConnectionId>vpn-12qw34er56ty</vpnConnectionId>
+ <state>available</state>
+ <customerGatewayConfiguration>
+ &lt;?xml version="1.0" encoding="UTF-8"?&gt;
+ </customerGatewayConfiguration>
+ <type>ipsec.1</type>
+ <customerGatewayId>cgw-1234qwe9</customerGatewayId>
+ <vpnGatewayId>vgw-lkjh1234</vpnGatewayId>
+ <tagSet>
+ <item>
+ <key>Name</key>
+ <value>VPN 1</value>
+ </item>
+ </tagSet>
+ <vgwTelemetry>
+ <item>
+ <outsideIpAddress>123.45.67.89</outsideIpAddress>
+ <status>DOWN</status>
+ <lastStatusChange>2013-03-19T19:20:34.000Z</lastStatusChange>
+ <statusMessage/>
+ <acceptedRouteCount>0</acceptedRouteCount>
+ </item>
+ <item>
+ <outsideIpAddress>123.45.67.90</outsideIpAddress>
+ <status>UP</status>
+ <lastStatusChange>2013-03-20T08:00:14.000Z</lastStatusChange>
+ <statusMessage/>
+ <acceptedRouteCount>0</acceptedRouteCount>
+ </item>
+ </vgwTelemetry>
+ <options>
+ <staticRoutesOnly>true</staticRoutesOnly>
+ </options>
+ <routes>
+ <item>
+ <destinationCidrBlock>192.168.0.0/24</destinationCidrBlock>
+ <source>static</source>
+ <state>available</state>
+ </item>
+ </routes>
+ </item>
+ <item>
+ <vpnConnectionId>vpn-qwerty12</vpnConnectionId>
+ <state>pending</state>
+ <customerGatewayConfiguration>
+ &lt;?xml version="1.0" encoding="UTF-8"?&gt;
+ </customerGatewayConfiguration>
+ <type>ipsec.1</type>
+ <customerGatewayId>cgw-01234567</customerGatewayId>
+ <vpnGatewayId>vgw-asdfghjk</vpnGatewayId>
+ <vgwTelemetry>
+ <item>
+ <outsideIpAddress>134.56.78.78</outsideIpAddress>
+ <status>UP</status>
+ <lastStatusChange>2013-03-20T01:46:30.000Z</lastStatusChange>
+ <statusMessage/>
+ <acceptedRouteCount>0</acceptedRouteCount>
+ </item>
+ <item>
+ <outsideIpAddress>134.56.78.79</outsideIpAddress>
+ <status>UP</status>
+ <lastStatusChange>2013-03-19T19:23:59.000Z</lastStatusChange>
+ <statusMessage/>
+ <acceptedRouteCount>0</acceptedRouteCount>
+ </item>
+ </vgwTelemetry>
+ <options>
+ <staticRoutesOnly>true</staticRoutesOnly>
+ </options>
+ <routes>
+ <item>
+ <destinationCidrBlock>10.0.0.0/16</destinationCidrBlock>
+ <source>static</source>
+ <state>pending</state>
+ </item>
+ </routes>
+ </item>
+ </vpnConnectionSet>
+</DescribeVpnConnectionsResponse>'''
+
+class TestDescriveVPNConnections(AWSMockServiceTestCase):
+
+ connection_class = VPCConnection
+
+ def default_body(self):
+ return DESCRIBE_VPNCONNECTIONS
+
+ def test_get_vpcs(self):
+ self.set_http_response(status_code=200)
+
+ api_response = self.service_connection.get_all_vpn_connections()
+ self.assertEqual(len(api_response), 2)
+
+ vpn0 = api_response[0]
+ self.assertEqual(vpn0.type,'ipsec.1')
+ self.assertEqual(vpn0.customer_gateway_id,'cgw-1234qwe9')
+ self.assertEqual(vpn0.vpn_gateway_id,'vgw-lkjh1234')
+ self.assertEqual(len(vpn0.tunnels),2)
+ self.assertDictEqual(vpn0.tags,{'Name':'VPN 1'})
+
+ vpn1 = api_response[1]
+ self.assertEqual(vpn1.state,'pending')
+ self.assertEqual(len(vpn1.static_routes),1)
+ self.assertTrue(vpn1.options.static_routes_only)
+ self.assertEqual(vpn1.tunnels[0].status,'UP')
+ self.assertEqual(vpn1.tunnels[1].status,'UP')
+ self.assertDictEqual(vpn1.tags,{})
+ self.assertEqual(vpn1.static_routes[0].source,'static')
+ self.assertEqual(vpn1.static_routes[0].state,'pending')
+
+if __name__ == '__main__':
+ unittest.main() \ No newline at end of file