summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnsible Core Team <info@ansible.com>2020-03-09 09:40:31 +0000
committerMatt Martz <matt@sivel.net>2020-03-23 11:14:21 -0500
commit6d910034a0a12f93b323e88b809c66afde6e713a (patch)
tree99aac704166cb859c6e366a9521b644fb5b6247c
parent8f7e9f73ce69a413cd8d650b50635f55674492d1 (diff)
downloadansible-6d910034a0a12f93b323e88b809c66afde6e713a.tar.gz
Migrated to ansible.amazon
-rw-r--r--lib/ansible/module_utils/aws/__init__.py0
-rw-r--r--lib/ansible/module_utils/aws/acm.py212
-rw-r--r--lib/ansible/module_utils/aws/batch.py103
-rw-r--r--lib/ansible/module_utils/aws/cloudfront_facts.py235
-rw-r--r--lib/ansible/module_utils/aws/core.py335
-rw-r--r--lib/ansible/module_utils/aws/direct_connect.py87
-rw-r--r--lib/ansible/module_utils/aws/elb_utils.py112
-rw-r--r--lib/ansible/module_utils/aws/elbv2.py891
-rw-r--r--lib/ansible/module_utils/aws/iam.py49
-rw-r--r--lib/ansible/module_utils/aws/rds.py232
-rw-r--r--lib/ansible/module_utils/aws/s3.py50
-rw-r--r--lib/ansible/module_utils/aws/urls.py210
-rw-r--r--lib/ansible/module_utils/aws/waf.py222
-rw-r--r--lib/ansible/module_utils/aws/waiters.py405
-rw-r--r--lib/ansible/module_utils/ec2.py758
l---------lib/ansible/modules/cloud/amazon/_aws_az_facts.py1
l---------lib/ansible/modules/cloud/amazon/_aws_caller_facts.py1
l---------lib/ansible/modules/cloud/amazon/_cloudformation_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_ami_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_eni_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_group_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_snapshot_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_vol_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_vpc_dhcp_option_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_vpc_net_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_vpc_subnet_facts.py1
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_az_info.py110
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_caller_info.py114
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_s3.py925
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudformation.py837
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudformation_info.py354
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2.py1766
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_ami.py738
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_ami_info.py281
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_elb_lb.py1365
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_eni.py633
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_eni_info.py275
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_group.py1345
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_group_info.py143
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_key.py271
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_metadata_facts.py564
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_snapshot.py336
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_snapshot_info.py258
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_tag.py201
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_tag_info.py92
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vol.py632
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vol_info.py141
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py414
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option_info.py157
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_net.py524
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_net_info.py306
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_subnet.py604
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_info.py250
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_bucket.py767
-rw-r--r--lib/ansible/plugins/action/aws_s3.py71
-rw-r--r--lib/ansible/plugins/callback/aws_resource_actions.py72
-rw-r--r--lib/ansible/plugins/doc_fragments/aws.py75
-rw-r--r--lib/ansible/plugins/doc_fragments/aws_credentials.py42
-rw-r--r--lib/ansible/plugins/doc_fragments/aws_region.py18
-rw-r--r--lib/ansible/plugins/doc_fragments/ec2.py18
-rw-r--r--lib/ansible/plugins/inventory/aws_ec2.py659
-rw-r--r--lib/ansible/plugins/inventory/aws_rds.py326
-rw-r--r--lib/ansible/plugins/lookup/aws_account_attribute.py131
-rw-r--r--lib/ansible/plugins/lookup/aws_secret.py140
-rw-r--r--lib/ansible/plugins/lookup/aws_service_ip_ranges.py79
-rw-r--r--lib/ansible/plugins/lookup/aws_ssm.py233
-rw-r--r--test/integration/targets/aws_caller_info/aliases2
-rw-r--r--test/integration/targets/aws_caller_info/tasks/main.yaml15
-rw-r--r--test/integration/targets/aws_s3/aliases2
-rw-r--r--test/integration/targets/aws_s3/defaults/main.yml3
-rw-r--r--test/integration/targets/aws_s3/files/hello.txt1
-rw-r--r--test/integration/targets/aws_s3/meta/main.yml0
-rw-r--r--test/integration/targets/aws_s3/tasks/main.yml590
-rw-r--r--test/integration/targets/cloudformation/aliases3
-rw-r--r--test/integration/targets/cloudformation/defaults/main.yml8
-rw-r--r--test/integration/targets/cloudformation/files/cf_template.json37
-rw-r--r--test/integration/targets/cloudformation/tasks/main.yml463
-rw-r--r--test/integration/targets/ec2_ami/aliases4
-rw-r--r--test/integration/targets/ec2_ami/defaults/main.yml8
-rw-r--r--test/integration/targets/ec2_ami/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_ami/tasks/main.yml462
-rw-r--r--test/integration/targets/ec2_ami/vars/main.yml20
-rw-r--r--test/integration/targets/ec2_elb_lb/aliases2
-rw-r--r--test/integration/targets/ec2_elb_lb/defaults/main.yml3
-rw-r--r--test/integration/targets/ec2_elb_lb/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_elb_lb/tasks/main.yml425
-rw-r--r--test/integration/targets/ec2_elb_lb/vars/main.yml2
-rw-r--r--test/integration/targets/ec2_group/aliases3
-rw-r--r--test/integration/targets/ec2_group/defaults/main.yml4
-rw-r--r--test/integration/targets/ec2_group/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_group/tasks/credential_tests.yml161
-rw-r--r--test/integration/targets/ec2_group/tasks/data_validation.yml44
-rw-r--r--test/integration/targets/ec2_group/tasks/diff_mode.yml184
-rw-r--r--test/integration/targets/ec2_group/tasks/ec2_classic.yml88
-rw-r--r--test/integration/targets/ec2_group/tasks/egress_tests.yml198
-rw-r--r--test/integration/targets/ec2_group/tasks/ipv6_default_tests.yml103
-rw-r--r--test/integration/targets/ec2_group/tasks/main.yml1536
-rw-r--r--test/integration/targets/ec2_group/tasks/multi_account.yml124
-rw-r--r--test/integration/targets/ec2_group/tasks/multi_nested_target.yml230
-rw-r--r--test/integration/targets/ec2_group/tasks/numeric_protos.yml71
-rw-r--r--test/integration/targets/ec2_group/tasks/rule_group_create.yml132
-rw-r--r--test/integration/targets/ec2_key/aliases2
-rw-r--r--test/integration/targets/ec2_key/defaults/main.yml3
-rw-r--r--test/integration/targets/ec2_key/meta/main.yml4
-rw-r--r--test/integration/targets/ec2_key/tasks/main.yml164
-rw-r--r--test/integration/targets/ec2_metadata_facts/aliases3
-rw-r--r--test/integration/targets/ec2_metadata_facts/defaults/main.yml2
-rw-r--r--test/integration/targets/ec2_metadata_facts/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_metadata_facts/tasks/main.yml2
-rw-r--r--test/integration/targets/ec2_metadata_facts/vars/main.yml2
-rw-r--r--test/integration/targets/ec2_snapshot/aliases3
-rw-r--r--test/integration/targets/ec2_snapshot/defaults/main.yml2
-rw-r--r--test/integration/targets/ec2_snapshot/tasks/main.yml256
-rw-r--r--test/integration/targets/ec2_tag/aliases3
-rw-r--r--test/integration/targets/ec2_tag/defaults/main.yml2
-rw-r--r--test/integration/targets/ec2_tag/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_tag/tasks/main.yml144
-rw-r--r--test/integration/targets/ec2_tag/vars/main.yml2
-rw-r--r--test/integration/targets/ec2_vol/aliases2
-rw-r--r--test/integration/targets/ec2_vol/defaults/main.yml5
-rw-r--r--test/integration/targets/ec2_vol/tasks/main.yml373
-rw-r--r--test/integration/targets/ec2_vol_info/aliases2
-rw-r--r--test/integration/targets/ec2_vol_info/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_vol_info/tasks/main.yml123
-rw-r--r--test/integration/targets/ec2_vpc_net/aliases3
-rw-r--r--test/integration/targets/ec2_vpc_net/defaults/main.yml5
-rw-r--r--test/integration/targets/ec2_vpc_net/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_vpc_net/tasks/main.yml1306
-rw-r--r--test/integration/targets/ec2_vpc_subnet/aliases3
-rw-r--r--test/integration/targets/ec2_vpc_subnet/defaults/main.yml4
-rw-r--r--test/integration/targets/ec2_vpc_subnet/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_vpc_subnet/tasks/main.yml618
-rw-r--r--test/integration/targets/inventory_aws_ec2/aliases2
-rw-r--r--test/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml11
-rw-r--r--test/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml9
-rw-r--r--test/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml64
-rw-r--r--test/integration/targets/inventory_aws_ec2/playbooks/setup.yml62
-rw-r--r--test/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml39
-rw-r--r--test/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml9
-rw-r--r--test/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml18
-rw-r--r--test/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml91
-rw-r--r--test/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml79
-rw-r--r--test/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml74
-rwxr-xr-xtest/integration/targets/inventory_aws_ec2/runme.sh35
-rw-r--r--test/integration/targets/inventory_aws_ec2/templates/inventory.yml12
-rw-r--r--test/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml12
-rw-r--r--test/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml20
-rw-r--r--test/integration/targets/inventory_aws_ec2/test.aws_ec2.yml0
-rw-r--r--test/integration/targets/inventory_aws_rds/aliases2
-rw-r--r--test/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml11
-rw-r--r--test/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml9
-rw-r--r--test/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml54
-rw-r--r--test/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml9
-rw-r--r--test/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml18
-rw-r--r--test/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml74
-rw-r--r--test/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml62
-rw-r--r--test/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml64
-rwxr-xr-xtest/integration/targets/inventory_aws_rds/runme.sh35
-rw-r--r--test/integration/targets/inventory_aws_rds/templates/inventory.j210
-rw-r--r--test/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j213
-rw-r--r--test/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j217
-rw-r--r--test/integration/targets/inventory_aws_rds/test.aws_rds.yml0
-rw-r--r--test/integration/targets/s3_bucket/aliases2
-rw-r--r--test/integration/targets/s3_bucket/inventory12
-rw-r--r--test/integration/targets/s3_bucket/main.yml12
-rw-r--r--test/integration/targets/s3_bucket/meta/main.yml4
-rw-r--r--test/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml2
-rw-r--r--test/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml4
-rw-r--r--test/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml146
-rw-r--r--test/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml54
-rw-r--r--test/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml88
-rw-r--r--test/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml88
-rw-r--r--test/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml20
-rw-r--r--test/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml26
-rw-r--r--test/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml64
-rw-r--r--test/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml256
-rw-r--r--test/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json12
-rw-r--r--test/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json12
-rwxr-xr-xtest/integration/targets/s3_bucket/runme.sh12
-rw-r--r--test/sanity/ignore.txt38
-rw-r--r--test/units/module_utils/aws/test_aws_module.py139
-rw-r--r--test/units/module_utils/ec2/test_aws.py101
-rw-r--r--test/units/module_utils/test_ec2.py234
-rw-r--r--test/units/modules/cloud/amazon/test_aws_s3.py38
-rw-r--r--test/units/modules/cloud/amazon/test_cloudformation.py205
-rw-r--r--test/units/modules/cloud/amazon/test_ec2_group.py83
-rw-r--r--test/units/plugins/inventory/test_aws_ec2.py183
-rw-r--r--test/units/plugins/lookup/fixtures/avi.json104
-rw-r--r--test/units/plugins/lookup/test_aws_secret.py90
-rw-r--r--test/units/plugins/lookup/test_aws_ssm.py166
190 files changed, 0 insertions, 31279 deletions
diff --git a/lib/ansible/module_utils/aws/__init__.py b/lib/ansible/module_utils/aws/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/aws/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/aws/acm.py b/lib/ansible/module_utils/aws/acm.py
deleted file mode 100644
index a2ac4505bf..0000000000
--- a/lib/ansible/module_utils/aws/acm.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2019 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-#
-# Author:
-# - Matthew Davis <Matthew.Davis.2@team.telstra.com>
-# on behalf of Telstra Corporation Limited
-#
-# Common functionality to be used by the modules:
-# - acm
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-"""
-Common Amazon Certificate Manager facts shared between modules
-"""
-import traceback
-from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, HAS_BOTO3, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
-from ansible.module_utils._text import to_bytes
-
-
-try:
- import botocore
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # caught by imported HAS_BOTO3
-
-
-class ACMServiceManager(object):
- """Handles ACM Facts Services"""
-
- def __init__(self, module):
- self.module = module
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- self.client = module.client('acm')
-
- @AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['RequestInProgressException'])
- def delete_certificate_with_backoff(self, client, arn):
- client.delete_certificate(CertificateArn=arn)
-
- def delete_certificate(self, client, module, arn):
- module.debug("Attempting to delete certificate %s" % arn)
- try:
- self.delete_certificate_with_backoff(client, arn)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't delete certificate %s" % arn)
- module.debug("Successfully deleted certificate %s" % arn)
-
- @AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['RequestInProgressException'])
- def list_certificates_with_backoff(self, client, statuses=None):
- paginator = client.get_paginator('list_certificates')
- kwargs = dict()
- if statuses:
- kwargs['CertificateStatuses'] = statuses
- return paginator.paginate(**kwargs).build_full_result()['CertificateSummaryList']
-
- @AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['ResourceNotFoundException', 'RequestInProgressException'])
- def get_certificate_with_backoff(self, client, certificate_arn):
- response = client.get_certificate(CertificateArn=certificate_arn)
- # strip out response metadata
- return {'Certificate': response['Certificate'],
- 'CertificateChain': response['CertificateChain']}
-
- @AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['ResourceNotFoundException', 'RequestInProgressException'])
- def describe_certificate_with_backoff(self, client, certificate_arn):
- return client.describe_certificate(CertificateArn=certificate_arn)['Certificate']
-
- @AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['ResourceNotFoundException', 'RequestInProgressException'])
- def list_certificate_tags_with_backoff(self, client, certificate_arn):
- return client.list_tags_for_certificate(CertificateArn=certificate_arn)['Tags']
-
- # Returns a list of certificates
- # if domain_name is specified, returns only certificates with that domain
- # if an ARN is specified, returns only that certificate
- # only_tags is a dict, e.g. {'key':'value'}. If specified this function will return
- # only certificates which contain all those tags (key exists, value matches).
- def get_certificates(self, client, module, domain_name=None, statuses=None, arn=None, only_tags=None):
- try:
- all_certificates = self.list_certificates_with_backoff(client=client, statuses=statuses)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain certificates")
- if domain_name:
- certificates = [cert for cert in all_certificates
- if cert['DomainName'] == domain_name]
- else:
- certificates = all_certificates
-
- if arn:
- # still return a list, not just one item
- certificates = [c for c in certificates if c['CertificateArn'] == arn]
-
- results = []
- for certificate in certificates:
- try:
- cert_data = self.describe_certificate_with_backoff(client, certificate['CertificateArn'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain certificate metadata for domain %s" % certificate['DomainName'])
-
- # in some states, ACM resources do not have a corresponding cert
- if cert_data['Status'] not in ['PENDING_VALIDATION', 'VALIDATION_TIMED_OUT', 'FAILED']:
- try:
- cert_data.update(self.get_certificate_with_backoff(client, certificate['CertificateArn']))
- except (BotoCoreError, ClientError, KeyError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain certificate data for domain %s" % certificate['DomainName'])
- cert_data = camel_dict_to_snake_dict(cert_data)
- try:
- tags = self.list_certificate_tags_with_backoff(client, certificate['CertificateArn'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain tags for domain %s" % certificate['DomainName'])
-
- cert_data['tags'] = boto3_tag_list_to_ansible_dict(tags)
- results.append(cert_data)
-
- if only_tags:
- for tag_key in only_tags:
- try:
- results = [c for c in results if ('tags' in c) and (tag_key in c['tags']) and (c['tags'][tag_key] == only_tags[tag_key])]
- except (TypeError, AttributeError) as e:
- for c in results:
- if 'tags' not in c:
- module.debug("cert is %s" % str(c))
- module.fail_json(msg="ACM tag filtering err", exception=e)
-
- return results
-
- # returns the domain name of a certificate (encoded in the public cert)
- # for a given ARN
- # A cert with that ARN must already exist
- def get_domain_of_cert(self, client, module, arn):
- if arn is None:
- module.fail(msg="Internal error with ACM domain fetching, no certificate ARN specified")
- try:
- cert_data = self.describe_certificate_with_backoff(client=client, certificate_arn=arn)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain certificate data for arn %s" % arn)
- return cert_data['DomainName']
-
- @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
- def import_certificate_with_backoff(self, client, certificate, private_key, certificate_chain, arn):
- if certificate_chain:
- if arn:
- ret = client.import_certificate(Certificate=to_bytes(certificate),
- PrivateKey=to_bytes(private_key),
- CertificateChain=to_bytes(certificate_chain),
- CertificateArn=arn)
- else:
- ret = client.import_certificate(Certificate=to_bytes(certificate),
- PrivateKey=to_bytes(private_key),
- CertificateChain=to_bytes(certificate_chain))
- else:
- if arn:
- ret = client.import_certificate(Certificate=to_bytes(certificate),
- PrivateKey=to_bytes(private_key),
- CertificateArn=arn)
- else:
- ret = client.import_certificate(Certificate=to_bytes(certificate),
- PrivateKey=to_bytes(private_key))
- return ret['CertificateArn']
-
- # Tags are a normal Ansible style dict
- # {'Key':'Value'}
- @AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['ResourceNotFoundException', 'RequestInProgressException'])
- def tag_certificate_with_backoff(self, client, arn, tags):
- aws_tags = ansible_dict_to_boto3_tag_list(tags)
- client.add_tags_to_certificate(CertificateArn=arn, Tags=aws_tags)
-
- def import_certificate(self, client, module, certificate, private_key, arn=None, certificate_chain=None, tags=None):
-
- original_arn = arn
-
- # upload cert
- try:
- arn = self.import_certificate_with_backoff(client, certificate, private_key, certificate_chain, arn)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't upload new certificate")
-
- if original_arn and (arn != original_arn):
- # I'm not sure whether the API guarentees that the ARN will not change
- # I'm failing just in case.
- # If I'm wrong, I'll catch it in the integration tests.
- module.fail_json(msg="ARN changed with ACM update, from %s to %s" % (original_arn, arn))
-
- # tag that cert
- try:
- self.tag_certificate_with_backoff(client, arn, tags)
- except (BotoCoreError, ClientError) as e:
- module.debug("Attempting to delete the cert we just created, arn=%s" % arn)
- try:
- self.delete_certificate_with_backoff(client, arn)
- except Exception as f:
- module.warn("Certificate %s exists, and is not tagged. So Ansible will not see it on the next run.")
- module.fail_json_aws(e, msg="Couldn't tag certificate %s, couldn't delete it either" % arn)
- module.fail_json_aws(e, msg="Couldn't tag certificate %s" % arn)
-
- return arn
diff --git a/lib/ansible/module_utils/aws/batch.py b/lib/ansible/module_utils/aws/batch.py
deleted file mode 100644
index 3c92798ddb..0000000000
--- a/lib/ansible/module_utils/aws/batch.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright (c) 2017 Ansible Project
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-"""
-This module adds shared support for Batch modules.
-"""
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, snake_dict_to_camel_dict
-
-try:
- from botocore.exceptions import ClientError
-except ImportError:
- pass # Handled by HAS_BOTO3
-
-
-class AWSConnection(object):
- """
- Create the connection object and client objects as required.
- """
-
- def __init__(self, ansible_obj, resources, boto3=True):
-
- ansible_obj.deprecate("The 'ansible.module_utils.aws.batch.AWSConnection' class is deprecated please use 'AnsibleAWSModule.client()'",
- version='2.14')
-
- self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
-
- self.resource_client = dict()
- if not resources:
- resources = ['batch']
-
- resources.append('iam')
-
- for resource in resources:
- aws_connect_kwargs.update(dict(region=self.region,
- endpoint=self.endpoint,
- conn_type='client',
- resource=resource
- ))
- self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
-
- # if region is not provided, then get default profile/session region
- if not self.region:
- self.region = self.resource_client['batch'].meta.region_name
-
- # set account ID
- try:
- self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
- except (ClientError, ValueError, KeyError, IndexError):
- self.account_id = ''
-
- def client(self, resource='batch'):
- return self.resource_client[resource]
-
-
-def cc(key):
- """
- Changes python key into Camel case equivalent. For example, 'compute_environment_name' becomes
- 'computeEnvironmentName'.
-
- :param key:
- :return:
- """
- components = key.split('_')
- return components[0] + "".join([token.capitalize() for token in components[1:]])
-
-
-def set_api_params(module, module_params):
- """
- Sets module parameters to those expected by the boto3 API.
- :param module:
- :param module_params:
- :return:
- """
- api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
- return snake_dict_to_camel_dict(api_params)
diff --git a/lib/ansible/module_utils/aws/cloudfront_facts.py b/lib/ansible/module_utils/aws/cloudfront_facts.py
deleted file mode 100644
index 780f4026a1..0000000000
--- a/lib/ansible/module_utils/aws/cloudfront_facts.py
+++ /dev/null
@@ -1,235 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2017 Willem van Ketwich
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-#
-# Author:
-# - Willem van Ketwich <willem@vanketwich.com.au>
-#
-# Common functionality to be used by the modules:
-# - cloudfront_distribution
-# - cloudfront_invalidation
-# - cloudfront_origin_access_identity
-"""
-Common cloudfront facts shared between modules
-"""
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
-from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
-
-try:
- import botocore
-except ImportError:
- pass
-
-
-class CloudFrontFactsServiceManager(object):
- """Handles CloudFront Facts Services"""
-
- def __init__(self, module):
- self.module = module
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- self.client = boto3_conn(module, conn_type='client',
- resource='cloudfront', region=region,
- endpoint=ec2_url, **aws_connect_kwargs)
-
- def get_distribution(self, distribution_id):
- try:
- return self.client.get_distribution(Id=distribution_id)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing distribution")
-
- def get_distribution_config(self, distribution_id):
- try:
- return self.client.get_distribution_config(Id=distribution_id)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing distribution configuration")
-
- def get_origin_access_identity(self, origin_access_identity_id):
- try:
- return self.client.get_cloud_front_origin_access_identity(Id=origin_access_identity_id)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing origin access identity")
-
- def get_origin_access_identity_config(self, origin_access_identity_id):
- try:
- return self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing origin access identity configuration")
-
- def get_invalidation(self, distribution_id, invalidation_id):
- try:
- return self.client.get_invalidation(DistributionId=distribution_id, Id=invalidation_id)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing invalidation")
-
- def get_streaming_distribution(self, distribution_id):
- try:
- return self.client.get_streaming_distribution(Id=distribution_id)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing streaming distribution")
-
- def get_streaming_distribution_config(self, distribution_id):
- try:
- return self.client.get_streaming_distribution_config(Id=distribution_id)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error describing streaming distribution")
-
- def list_origin_access_identities(self):
- try:
- paginator = self.client.get_paginator('list_cloud_front_origin_access_identities')
- result = paginator.paginate().build_full_result().get('CloudFrontOriginAccessIdentityList', {})
- return result.get('Items', [])
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities")
-
- def list_distributions(self, keyed=True):
- try:
- paginator = self.client.get_paginator('list_distributions')
- result = paginator.paginate().build_full_result().get('DistributionList', {})
- distribution_list = result.get('Items', [])
- if not keyed:
- return distribution_list
- return self.keyed_list_helper(distribution_list)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error listing distributions")
-
- def list_distributions_by_web_acl_id(self, web_acl_id):
- try:
- result = self.client.list_distributions_by_web_acl_id(WebAclId=web_acl_id)
- distribution_list = result.get('DistributionList', {}).get('Items', [])
- return self.keyed_list_helper(distribution_list)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error listing distributions by web acl id")
-
- def list_invalidations(self, distribution_id):
- try:
- paginator = self.client.get_paginator('list_invalidations')
- result = paginator.paginate(DistributionId=distribution_id).build_full_result()
- return result.get('InvalidationList', {}).get('Items', [])
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error listing invalidations")
-
- def list_streaming_distributions(self, keyed=True):
- try:
- paginator = self.client.get_paginator('list_streaming_distributions')
- result = paginator.paginate().build_full_result()
- streaming_distribution_list = result.get('StreamingDistributionList', {}).get('Items', [])
- if not keyed:
- return streaming_distribution_list
- return self.keyed_list_helper(streaming_distribution_list)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error listing streaming distributions")
-
- def summary(self):
- summary_dict = {}
- summary_dict.update(self.summary_get_distribution_list(False))
- summary_dict.update(self.summary_get_distribution_list(True))
- summary_dict.update(self.summary_get_origin_access_identity_list())
- return summary_dict
-
- def summary_get_origin_access_identity_list(self):
- try:
- origin_access_identity_list = {'origin_access_identities': []}
- origin_access_identities = self.list_origin_access_identities()
- for origin_access_identity in origin_access_identities:
- oai_id = origin_access_identity['Id']
- oai_full_response = self.get_origin_access_identity(oai_id)
- oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
- origin_access_identity_list['origin_access_identities'].append(oai_summary)
- return origin_access_identity_list
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error generating summary of origin access identities")
-
- def summary_get_distribution_list(self, streaming=False):
- try:
- list_name = 'streaming_distributions' if streaming else 'distributions'
- key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
- distribution_list = {list_name: []}
- distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
- for dist in distributions:
- temp_distribution = {}
- for key_name in key_list:
- temp_distribution[key_name] = dist[key_name]
- temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])]
- temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
- if not streaming:
- temp_distribution['WebACLId'] = dist['WebACLId']
- invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
- if invalidation_ids:
- temp_distribution['Invalidations'] = invalidation_ids
- resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'])
- temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
- distribution_list[list_name].append(temp_distribution)
- return distribution_list
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error generating summary of distributions")
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error generating summary of distributions")
-
- def get_etag_from_distribution_id(self, distribution_id, streaming):
- distribution = {}
- if not streaming:
- distribution = self.get_distribution(distribution_id)
- else:
- distribution = self.get_streaming_distribution(distribution_id)
- return distribution['ETag']
-
- def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
- try:
- invalidation_ids = []
- invalidations = self.list_invalidations(distribution_id)
- for invalidation in invalidations:
- invalidation_ids.append(invalidation['Id'])
- return invalidation_ids
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error getting list of invalidation ids")
-
- def get_distribution_id_from_domain_name(self, domain_name):
- try:
- distribution_id = ""
- distributions = self.list_distributions(False)
- distributions += self.list_streaming_distributions(False)
- for dist in distributions:
- if 'Items' in dist['Aliases']:
- for alias in dist['Aliases']['Items']:
- if str(alias).lower() == domain_name.lower():
- distribution_id = dist['Id']
- break
- return distribution_id
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error getting distribution id from domain name")
-
- def get_aliases_from_distribution_id(self, distribution_id):
- try:
- distribution = self.get_distribution(distribution_id)
- return distribution['DistributionConfig']['Aliases'].get('Items', [])
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id")
-
- def keyed_list_helper(self, list_to_key):
- keyed_list = dict()
- for item in list_to_key:
- distribution_id = item['Id']
- if 'Items' in item['Aliases']:
- aliases = item['Aliases']['Items']
- for alias in aliases:
- keyed_list.update({alias: item})
- keyed_list.update({distribution_id: item})
- return keyed_list
diff --git a/lib/ansible/module_utils/aws/core.py b/lib/ansible/module_utils/aws/core.py
deleted file mode 100644
index c4527b6deb..0000000000
--- a/lib/ansible/module_utils/aws/core.py
+++ /dev/null
@@ -1,335 +0,0 @@
-#
-# Copyright 2017 Michael De La Rue | Ansible
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-"""This module adds shared support for generic Amazon AWS modules
-
-**This code is not yet ready for use in user modules. As of 2017**
-**and through to 2018, the interface is likely to change**
-**aggressively as the exact correct interface for ansible AWS modules**
-**is identified. In particular, until this notice goes away or is**
-**changed, methods may disappear from the interface. Please don't**
-**publish modules using this except directly to the main Ansible**
-**development repository.**
-
-In order to use this module, include it as part of a custom
-module as shown below.
-
- from ansible.module_utils.aws import AnsibleAWSModule
- module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
- mutually_exclusive=list1, required_together=list2)
-
-The 'AnsibleAWSModule' module provides similar, but more restricted,
-interfaces to the normal Ansible module. It also includes the
-additional methods for connecting to AWS using the standard module arguments
-
- m.resource('lambda') # - get an AWS connection as a boto3 resource.
-
-or
-
- m.client('sts') # - get an AWS connection as a boto3 client.
-
-To make use of AWSRetry easier, it can now be wrapped around any call from a
-module-created client. To add retries to a client, create a client:
-
- m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
-
-Any calls from that client can be made to use the decorator passed at call-time
-using the `aws_retry` argument. By default, no retries are used.
-
- ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
- ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
-
-The call will be retried the specified number of times, so the calling functions
-don't need to be wrapped in the backoff decorator.
-"""
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import re
-import logging
-import traceback
-from functools import wraps
-from distutils.version import LooseVersion
-
-try:
- from cStringIO import StringIO
-except ImportError:
- # Python 3
- from io import StringIO
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils._text import to_native
-from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn
-from ansible.module_utils.ec2 import get_aws_connection_info, get_aws_region
-
-# We will also export HAS_BOTO3 so end user modules can use it.
-__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code')
-
-
-class AnsibleAWSModule(object):
- """An ansible module class for AWS modules
-
- AnsibleAWSModule provides an a class for building modules which
- connect to Amazon Web Services. The interface is currently more
- restricted than the basic module class with the aim that later the
- basic module class can be reduced. If you find that any key
- feature is missing please contact the author/Ansible AWS team
- (available on #ansible-aws on IRC) to request the additional
- features needed.
- """
- default_settings = {
- "default_args": True,
- "check_boto3": True,
- "auto_retry": True,
- "module_class": AnsibleModule
- }
-
- def __init__(self, **kwargs):
- local_settings = {}
- for key in AnsibleAWSModule.default_settings:
- try:
- local_settings[key] = kwargs.pop(key)
- except KeyError:
- local_settings[key] = AnsibleAWSModule.default_settings[key]
- self.settings = local_settings
-
- if local_settings["default_args"]:
- # ec2_argument_spec contains the region so we use that; there's a patch coming which
- # will add it to aws_argument_spec so if that's accepted then later we should change
- # over
- argument_spec_full = ec2_argument_spec()
- try:
- argument_spec_full.update(kwargs["argument_spec"])
- except (TypeError, NameError):
- pass
- kwargs["argument_spec"] = argument_spec_full
-
- self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs)
-
- if local_settings["check_boto3"] and not HAS_BOTO3:
- self._module.fail_json(
- msg=missing_required_lib('botocore or boto3'))
-
- self.check_mode = self._module.check_mode
- self._diff = self._module._diff
- self._name = self._module._name
-
- self._botocore_endpoint_log_stream = StringIO()
- self.logger = None
- if self.params.get('debug_botocore_endpoint_logs'):
- self.logger = logging.getLogger('botocore.endpoint')
- self.logger.setLevel(logging.DEBUG)
- self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream))
-
- @property
- def params(self):
- return self._module.params
-
- def _get_resource_action_list(self):
- actions = []
- for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'):
- ln = ln.strip()
- if not ln:
- continue
- found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln)
- if found_operational_request:
- operation_request = found_operational_request.group(0)[20:-1]
- resource = re.search(r"https://.*?\.", ln).group(0)[8:-1]
- actions.append("{0}:{1}".format(resource, operation_request))
- return list(set(actions))
-
- def exit_json(self, *args, **kwargs):
- if self.params.get('debug_botocore_endpoint_logs'):
- kwargs['resource_actions'] = self._get_resource_action_list()
- return self._module.exit_json(*args, **kwargs)
-
- def fail_json(self, *args, **kwargs):
- if self.params.get('debug_botocore_endpoint_logs'):
- kwargs['resource_actions'] = self._get_resource_action_list()
- return self._module.fail_json(*args, **kwargs)
-
- def debug(self, *args, **kwargs):
- return self._module.debug(*args, **kwargs)
-
- def warn(self, *args, **kwargs):
- return self._module.warn(*args, **kwargs)
-
- def deprecate(self, *args, **kwargs):
- return self._module.deprecate(*args, **kwargs)
-
- def boolean(self, *args, **kwargs):
- return self._module.boolean(*args, **kwargs)
-
- def md5(self, *args, **kwargs):
- return self._module.md5(*args, **kwargs)
-
- def client(self, service, retry_decorator=None):
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
- conn = boto3_conn(self, conn_type='client', resource=service,
- region=region, endpoint=ec2_url, **aws_connect_kwargs)
- return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator)
-
- def resource(self, service):
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
- return boto3_conn(self, conn_type='resource', resource=service,
- region=region, endpoint=ec2_url, **aws_connect_kwargs)
-
- @property
- def region(self, boto3=True):
- return get_aws_region(self, boto3)
-
- def fail_json_aws(self, exception, msg=None):
- """call fail_json with processed exception
-
- function for converting exceptions thrown by AWS SDK modules,
- botocore, boto3 and boto, into nice error messages.
- """
- last_traceback = traceback.format_exc()
-
- # to_native is trusted to handle exceptions that str() could
- # convert to text.
- try:
- except_msg = to_native(exception.message)
- except AttributeError:
- except_msg = to_native(exception)
-
- if msg is not None:
- message = '{0}: {1}'.format(msg, except_msg)
- else:
- message = except_msg
-
- try:
- response = exception.response
- except AttributeError:
- response = None
-
- failure = dict(
- msg=message,
- exception=last_traceback,
- **self._gather_versions()
- )
-
- if response is not None:
- failure.update(**camel_dict_to_snake_dict(response))
-
- self.fail_json(**failure)
-
- def _gather_versions(self):
- """Gather AWS SDK (boto3 and botocore) dependency versions
-
- Returns {'boto3_version': str, 'botocore_version': str}
- Returns {} if neither are installed
- """
- if not HAS_BOTO3:
- return {}
- import boto3
- import botocore
- return dict(boto3_version=boto3.__version__,
- botocore_version=botocore.__version__)
-
- def boto3_at_least(self, desired):
- """Check if the available boto3 version is greater than or equal to a desired version.
-
- Usage:
- if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'):
- # conditionally fail on old boto3 versions if a specific feature is not supported
- module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.")
- """
- existing = self._gather_versions()
- return LooseVersion(existing['boto3_version']) >= LooseVersion(desired)
-
- def botocore_at_least(self, desired):
- """Check if the available botocore version is greater than or equal to a desired version.
-
- Usage:
- if not module.botocore_at_least('1.2.3'):
- module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3')
- if not module.botocore_at_least('1.5.3'):
- module.warn('Botocore did not include waiters for Service X before 1.5.3. '
- 'To wait until Service X resources are fully available, update botocore.')
- """
- existing = self._gather_versions()
- return LooseVersion(existing['botocore_version']) >= LooseVersion(desired)
-
-
-class _RetryingBotoClientWrapper(object):
- __never_wait = (
- 'get_paginator', 'can_paginate',
- 'get_waiter', 'generate_presigned_url',
- )
-
- def __init__(self, client, retry):
- self.client = client
- self.retry = retry
-
- def _create_optional_retry_wrapper_function(self, unwrapped):
- retrying_wrapper = self.retry(unwrapped)
-
- @wraps(unwrapped)
- def deciding_wrapper(aws_retry=False, *args, **kwargs):
- if aws_retry:
- return retrying_wrapper(*args, **kwargs)
- else:
- return unwrapped(*args, **kwargs)
- return deciding_wrapper
-
- def __getattr__(self, name):
- unwrapped = getattr(self.client, name)
- if name in self.__never_wait:
- return unwrapped
- elif callable(unwrapped):
- wrapped = self._create_optional_retry_wrapper_function(unwrapped)
- setattr(self, name, wrapped)
- return wrapped
- else:
- return unwrapped
-
-
-def is_boto3_error_code(code, e=None):
- """Check if the botocore exception is raised by a specific error code.
-
- Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
-
- Example:
- try:
- ec2.describe_instances(InstanceIds=['potato'])
- except is_boto3_error_code('InvalidInstanceID.Malformed'):
- # handle the error for that code case
- except botocore.exceptions.ClientError as e:
- # handle the generic error case for all other codes
- """
- from botocore.exceptions import ClientError
- if e is None:
- import sys
- dummy, e, dummy = sys.exc_info()
- if isinstance(e, ClientError) and e.response['Error']['Code'] == code:
- return ClientError
- return type('NeverEverRaisedException', (Exception,), {})
-
-
-def get_boto3_client_method_parameters(client, method_name, required=False):
- op = client.meta.method_to_api_mapping.get(method_name)
- input_shape = client._service_model.operation_model(op).input_shape
- if not input_shape:
- parameters = []
- elif required:
- parameters = list(input_shape.required_members)
- else:
- parameters = list(input_shape.members.keys())
- return parameters
diff --git a/lib/ansible/module_utils/aws/direct_connect.py b/lib/ansible/module_utils/aws/direct_connect.py
deleted file mode 100644
index 6419739095..0000000000
--- a/lib/ansible/module_utils/aws/direct_connect.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright (c) 2017 Ansible Project
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-"""
-This module adds shared support for Direct Connect modules.
-"""
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import traceback
-try:
- import botocore
-except ImportError:
- pass
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-
-class DirectConnectError(Exception):
- def __init__(self, msg, last_traceback=None, exception=None):
- self.msg = msg
- self.last_traceback = last_traceback
- self.exception = exception
-
-
-def delete_connection(client, connection_id):
- try:
- client.delete_connection(connectionId=connection_id)
- except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Failed to delete DirectConnection {0}.".format(connection_id),
- last_traceback=traceback.format_exc(),
- exception=e)
-
-
-def associate_connection_and_lag(client, connection_id, lag_id):
- try:
- client.associate_connection_with_lag(connectionId=connection_id,
- lagId=lag_id)
- except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Failed to associate Direct Connect connection {0}"
- " with link aggregation group {1}.".format(connection_id, lag_id),
- last_traceback=traceback.format_exc(),
- exception=e)
-
-
-def disassociate_connection_and_lag(client, connection_id, lag_id):
- try:
- client.disassociate_connection_from_lag(connectionId=connection_id,
- lagId=lag_id)
- except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Failed to disassociate Direct Connect connection {0}"
- " from link aggregation group {1}.".format(connection_id, lag_id),
- last_traceback=traceback.format_exc(),
- exception=e)
-
-
-def delete_virtual_interface(client, virtual_interface):
- try:
- client.delete_virtual_interface(virtualInterfaceId=virtual_interface)
- except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Could not delete virtual interface {0}".format(virtual_interface),
- last_traceback=traceback.format_exc(),
- exception=e)
diff --git a/lib/ansible/module_utils/aws/elb_utils.py b/lib/ansible/module_utils/aws/elb_utils.py
deleted file mode 100644
index 4027d3309a..0000000000
--- a/lib/ansible/module_utils/aws/elb_utils.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.module_utils.ec2 import AWSRetry
-
-# Non-ansible imports
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass
-
-
-def get_elb(connection, module, elb_name):
- """
- Get an ELB based on name. If not found, return None.
-
- :param connection: AWS boto3 elbv2 connection
- :param module: Ansible module
- :param elb_name: Name of load balancer to get
- :return: boto3 ELB dict or None if not found
- """
- try:
- return _get_elb(connection, module, elb_name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
-
-
-@AWSRetry.jittered_backoff()
-def _get_elb(connection, module, elb_name):
- """
- Get an ELB based on name using AWSRetry. If not found, return None.
-
- :param connection: AWS boto3 elbv2 connection
- :param module: Ansible module
- :param elb_name: Name of load balancer to get
- :return: boto3 ELB dict or None if not found
- """
-
- try:
- load_balancer_paginator = connection.get_paginator('describe_load_balancers')
- return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())['LoadBalancers'][0]
- except (BotoCoreError, ClientError) as e:
- if e.response['Error']['Code'] == 'LoadBalancerNotFound':
- return None
- else:
- raise e
-
-
-def get_elb_listener(connection, module, elb_arn, listener_port):
- """
- Get an ELB listener based on the port provided. If not found, return None.
-
- :param connection: AWS boto3 elbv2 connection
- :param module: Ansible module
- :param elb_arn: ARN of the ELB to look at
- :param listener_port: Port of the listener to look for
- :return: boto3 ELB listener dict or None if not found
- """
-
- try:
- listener_paginator = connection.get_paginator('describe_listeners')
- listeners = (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result())['Listeners']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
-
- l = None
-
- for listener in listeners:
- if listener['Port'] == listener_port:
- l = listener
- break
-
- return l
-
-
-def get_elb_listener_rules(connection, module, listener_arn):
- """
- Get rules for a particular ELB listener using the listener ARN.
-
- :param connection: AWS boto3 elbv2 connection
- :param module: Ansible module
- :param listener_arn: ARN of the ELB listener
- :return: boto3 ELB rules list
- """
-
- try:
- return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)['Rules']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
-
-
-def convert_tg_name_to_arn(connection, module, tg_name):
- """
- Get ARN of a target group using the target group's name
-
- :param connection: AWS boto3 elbv2 connection
- :param module: Ansible module
- :param tg_name: Name of the target group
- :return: target group ARN string
- """
-
- try:
- response = AWSRetry.jittered_backoff()(connection.describe_target_groups)(Names=[tg_name])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
-
- tg_arn = response['TargetGroups'][0]['TargetGroupArn']
-
- return tg_arn
diff --git a/lib/ansible/module_utils/aws/elbv2.py b/lib/ansible/module_utils/aws/elbv2.py
deleted file mode 100644
index 0b68bde3a4..0000000000
--- a/lib/ansible/module_utils/aws/elbv2.py
+++ /dev/null
@@ -1,891 +0,0 @@
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-# Ansible imports
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names, \
- ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, compare_policies as compare_dicts, \
- AWSRetry
-from ansible.module_utils.aws.elb_utils import get_elb, get_elb_listener, convert_tg_name_to_arn
-
-# Non-ansible imports
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass
-import traceback
-from copy import deepcopy
-
-
-class ElasticLoadBalancerV2(object):
-
- def __init__(self, connection, module):
-
- self.connection = connection
- self.module = module
- self.changed = False
- self.new_load_balancer = False
- self.scheme = module.params.get("scheme")
- self.name = module.params.get("name")
- self.subnet_mappings = module.params.get("subnet_mappings")
- self.subnets = module.params.get("subnets")
- self.deletion_protection = module.params.get("deletion_protection")
- self.wait = module.params.get("wait")
-
- if module.params.get("tags") is not None:
- self.tags = ansible_dict_to_boto3_tag_list(module.params.get("tags"))
- else:
- self.tags = None
- self.purge_tags = module.params.get("purge_tags")
-
- self.elb = get_elb(connection, module, self.name)
- if self.elb is not None:
- self.elb_attributes = self.get_elb_attributes()
- self.elb['tags'] = self.get_elb_tags()
- else:
- self.elb_attributes = None
-
- def wait_for_status(self, elb_arn):
- """
- Wait for load balancer to reach 'active' status
-
- :param elb_arn: The load balancer ARN
- :return:
- """
-
- try:
- waiter = self.connection.get_waiter('load_balancer_available')
- waiter.wait(LoadBalancerArns=[elb_arn])
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- def get_elb_attributes(self):
- """
- Get load balancer attributes
-
- :return:
- """
-
- try:
- attr_list = AWSRetry.jittered_backoff()(
- self.connection.describe_load_balancer_attributes
- )(LoadBalancerArn=self.elb['LoadBalancerArn'])['Attributes']
-
- elb_attributes = boto3_tag_list_to_ansible_dict(attr_list)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- # Replace '.' with '_' in attribute key names to make it more Ansibley
- return dict((k.replace('.', '_'), v) for k, v in elb_attributes.items())
-
- def update_elb_attributes(self):
- """
- Update the elb_attributes parameter
- :return:
- """
- self.elb_attributes = self.get_elb_attributes()
-
- def get_elb_tags(self):
- """
- Get load balancer tags
-
- :return:
- """
-
- try:
- return AWSRetry.jittered_backoff()(
- self.connection.describe_tags
- )(ResourceArns=[self.elb['LoadBalancerArn']])['TagDescriptions'][0]['Tags']
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- def delete_tags(self, tags_to_delete):
- """
- Delete elb tags
-
- :return:
- """
-
- try:
- AWSRetry.jittered_backoff()(
- self.connection.remove_tags
- )(ResourceArns=[self.elb['LoadBalancerArn']], TagKeys=tags_to_delete)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- self.changed = True
-
- def modify_tags(self):
- """
- Modify elb tags
-
- :return:
- """
-
- try:
- AWSRetry.jittered_backoff()(
- self.connection.add_tags
- )(ResourceArns=[self.elb['LoadBalancerArn']], Tags=self.tags)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- self.changed = True
-
- def delete(self):
- """
- Delete elb
- :return:
- """
-
- try:
- AWSRetry.jittered_backoff()(
- self.connection.delete_load_balancer
- )(LoadBalancerArn=self.elb['LoadBalancerArn'])
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- self.changed = True
-
- def compare_subnets(self):
- """
- Compare user subnets with current ELB subnets
-
- :return: bool True if they match otherwise False
- """
-
- subnet_mapping_id_list = []
- subnet_mappings = []
-
- # Check if we're dealing with subnets or subnet_mappings
- if self.subnets is not None:
- # Convert subnets to subnet_mappings format for comparison
- for subnet in self.subnets:
- subnet_mappings.append({'SubnetId': subnet})
-
- if self.subnet_mappings is not None:
- # Use this directly since we're comparing as a mapping
- subnet_mappings = self.subnet_mappings
-
- # Build a subnet_mapping style struture of what's currently
- # on the load balancer
- for subnet in self.elb['AvailabilityZones']:
- this_mapping = {'SubnetId': subnet['SubnetId']}
- for address in subnet.get('LoadBalancerAddresses', []):
- if 'AllocationId' in address:
- this_mapping['AllocationId'] = address['AllocationId']
- break
-
- subnet_mapping_id_list.append(this_mapping)
-
- return set(frozenset(mapping.items()) for mapping in subnet_mapping_id_list) == set(frozenset(mapping.items()) for mapping in subnet_mappings)
-
- def modify_subnets(self):
- """
- Modify elb subnets to match module parameters
- :return:
- """
-
- try:
- AWSRetry.jittered_backoff()(
- self.connection.set_subnets
- )(LoadBalancerArn=self.elb['LoadBalancerArn'], Subnets=self.subnets)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- self.changed = True
-
- def update(self):
- """
- Update the elb from AWS
- :return:
- """
-
- self.elb = get_elb(self.connection, self.module, self.module.params.get("name"))
- self.elb['tags'] = self.get_elb_tags()
-
-
-class ApplicationLoadBalancer(ElasticLoadBalancerV2):
-
- def __init__(self, connection, connection_ec2, module):
- """
-
- :param connection: boto3 connection
- :param module: Ansible module
- """
- super(ApplicationLoadBalancer, self).__init__(connection, module)
-
- self.connection_ec2 = connection_ec2
-
- # Ansible module parameters specific to ALBs
- self.type = 'application'
- if module.params.get('security_groups') is not None:
- try:
- self.security_groups = AWSRetry.jittered_backoff()(
- get_ec2_security_group_ids_from_names
- )(module.params.get('security_groups'), self.connection_ec2, boto3=True)
- except ValueError as e:
- self.module.fail_json(msg=str(e), exception=traceback.format_exc())
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
- else:
- self.security_groups = module.params.get('security_groups')
- self.access_logs_enabled = module.params.get("access_logs_enabled")
- self.access_logs_s3_bucket = module.params.get("access_logs_s3_bucket")
- self.access_logs_s3_prefix = module.params.get("access_logs_s3_prefix")
- self.idle_timeout = module.params.get("idle_timeout")
- self.http2 = module.params.get("http2")
-
- if self.elb is not None and self.elb['Type'] != 'application':
- self.module.fail_json(msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.")
-
- def create_elb(self):
- """
- Create a load balancer
- :return:
- """
-
- # Required parameters
- params = dict()
- params['Name'] = self.name
- params['Type'] = self.type
-
- # Other parameters
- if self.subnets is not None:
- params['Subnets'] = self.subnets
- if self.subnet_mappings is not None:
- params['SubnetMappings'] = self.subnet_mappings
- if self.security_groups is not None:
- params['SecurityGroups'] = self.security_groups
- params['Scheme'] = self.scheme
- if self.tags:
- params['Tags'] = self.tags
-
- try:
- self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)['LoadBalancers'][0]
- self.changed = True
- self.new_load_balancer = True
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- if self.wait:
- self.wait_for_status(self.elb['LoadBalancerArn'])
-
- def modify_elb_attributes(self):
- """
- Update Application ELB attributes if required
-
- :return:
- """
-
- update_attributes = []
-
- if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']:
- update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()})
- if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']:
- update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket})
- if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']:
- update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix})
- if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
- update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
- if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']:
- update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)})
- if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']:
- update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()})
-
- if update_attributes:
- try:
- AWSRetry.jittered_backoff()(
- self.connection.modify_load_balancer_attributes
- )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes)
- self.changed = True
- except (BotoCoreError, ClientError) as e:
- # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state
- if self.new_load_balancer:
- AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn'])
- self.module.fail_json_aws(e)
-
- def compare_security_groups(self):
- """
- Compare user security groups with current ELB security groups
-
- :return: bool True if they match otherwise False
- """
-
- if set(self.elb['SecurityGroups']) != set(self.security_groups):
- return False
- else:
- return True
-
- def modify_security_groups(self):
- """
- Modify elb security groups to match module parameters
- :return:
- """
-
- try:
- AWSRetry.jittered_backoff()(
- self.connection.set_security_groups
- )(LoadBalancerArn=self.elb['LoadBalancerArn'], SecurityGroups=self.security_groups)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- self.changed = True
-
-
-class NetworkLoadBalancer(ElasticLoadBalancerV2):
-
- def __init__(self, connection, connection_ec2, module):
-
- """
-
- :param connection: boto3 connection
- :param module: Ansible module
- """
- super(NetworkLoadBalancer, self).__init__(connection, module)
-
- self.connection_ec2 = connection_ec2
-
- # Ansible module parameters specific to NLBs
- self.type = 'network'
- self.cross_zone_load_balancing = module.params.get('cross_zone_load_balancing')
-
- if self.elb is not None and self.elb['Type'] != 'network':
- self.module.fail_json(msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.")
-
- def create_elb(self):
- """
- Create a load balancer
- :return:
- """
-
- # Required parameters
- params = dict()
- params['Name'] = self.name
- params['Type'] = self.type
-
- # Other parameters
- if self.subnets is not None:
- params['Subnets'] = self.subnets
- if self.subnet_mappings is not None:
- params['SubnetMappings'] = self.subnet_mappings
- params['Scheme'] = self.scheme
- if self.tags:
- params['Tags'] = self.tags
-
- try:
- self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)['LoadBalancers'][0]
- self.changed = True
- self.new_load_balancer = True
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- if self.wait:
- self.wait_for_status(self.elb['LoadBalancerArn'])
-
- def modify_elb_attributes(self):
- """
- Update Network ELB attributes if required
-
- :return:
- """
-
- update_attributes = []
-
- if self.cross_zone_load_balancing is not None and str(self.cross_zone_load_balancing).lower() != \
- self.elb_attributes['load_balancing_cross_zone_enabled']:
- update_attributes.append({'Key': 'load_balancing.cross_zone.enabled', 'Value': str(self.cross_zone_load_balancing).lower()})
- if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
- update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
-
- if update_attributes:
- try:
- AWSRetry.jittered_backoff()(
- self.connection.modify_load_balancer_attributes
- )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes)
- self.changed = True
- except (BotoCoreError, ClientError) as e:
- # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state
- if self.new_load_balancer:
- AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn'])
- self.module.fail_json_aws(e)
-
- def modify_subnets(self):
- """
- Modify elb subnets to match module parameters (unsupported for NLB)
- :return:
- """
-
- self.module.fail_json(msg='Modifying subnets and elastic IPs is not supported for Network Load Balancer')
-
-
-class ELBListeners(object):
-
- def __init__(self, connection, module, elb_arn):
-
- self.connection = connection
- self.module = module
- self.elb_arn = elb_arn
- listeners = module.params.get("listeners")
- if listeners is not None:
- # Remove suboption argspec defaults of None from each listener
- listeners = [dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None) for listener_dict in listeners]
- self.listeners = self._ensure_listeners_default_action_has_arn(listeners)
- self.current_listeners = self._get_elb_listeners()
- self.purge_listeners = module.params.get("purge_listeners")
- self.changed = False
-
- def update(self):
- """
- Update the listeners for the ELB
-
- :return:
- """
- self.current_listeners = self._get_elb_listeners()
-
- def _get_elb_listeners(self):
- """
- Get ELB listeners
-
- :return:
- """
-
- try:
- listener_paginator = self.connection.get_paginator('describe_listeners')
- return (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=self.elb_arn).build_full_result())['Listeners']
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- def _ensure_listeners_default_action_has_arn(self, listeners):
- """
- If a listener DefaultAction has been passed with a Target Group Name instead of ARN, lookup the ARN and
- replace the name.
-
- :param listeners: a list of listener dicts
- :return: the same list of dicts ensuring that each listener DefaultActions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed.
- """
-
- if not listeners:
- listeners = []
-
- fixed_listeners = []
- for listener in listeners:
- fixed_actions = []
- for action in listener['DefaultActions']:
- if 'TargetGroupName' in action:
- action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection,
- self.module,
- action['TargetGroupName'])
- del action['TargetGroupName']
- fixed_actions.append(action)
- listener['DefaultActions'] = fixed_actions
- fixed_listeners.append(listener)
-
- return fixed_listeners
-
- def compare_listeners(self):
- """
-
- :return:
- """
- listeners_to_modify = []
- listeners_to_delete = []
- listeners_to_add = deepcopy(self.listeners)
-
- # Check each current listener port to see if it's been passed to the module
- for current_listener in self.current_listeners:
- current_listener_passed_to_module = False
- for new_listener in self.listeners[:]:
- new_listener['Port'] = int(new_listener['Port'])
- if current_listener['Port'] == new_listener['Port']:
- current_listener_passed_to_module = True
- # Remove what we match so that what is left can be marked as 'to be added'
- listeners_to_add.remove(new_listener)
- modified_listener = self._compare_listener(current_listener, new_listener)
- if modified_listener:
- modified_listener['Port'] = current_listener['Port']
- modified_listener['ListenerArn'] = current_listener['ListenerArn']
- listeners_to_modify.append(modified_listener)
- break
-
- # If the current listener was not matched against passed listeners and purge is True, mark for removal
- if not current_listener_passed_to_module and self.purge_listeners:
- listeners_to_delete.append(current_listener['ListenerArn'])
-
- return listeners_to_add, listeners_to_modify, listeners_to_delete
-
- def _compare_listener(self, current_listener, new_listener):
- """
- Compare two listeners.
-
- :param current_listener:
- :param new_listener:
- :return:
- """
-
- modified_listener = {}
-
- # Port
- if current_listener['Port'] != new_listener['Port']:
- modified_listener['Port'] = new_listener['Port']
-
- # Protocol
- if current_listener['Protocol'] != new_listener['Protocol']:
- modified_listener['Protocol'] = new_listener['Protocol']
-
- # If Protocol is HTTPS, check additional attributes
- if current_listener['Protocol'] == 'HTTPS' and new_listener['Protocol'] == 'HTTPS':
- # Cert
- if current_listener['SslPolicy'] != new_listener['SslPolicy']:
- modified_listener['SslPolicy'] = new_listener['SslPolicy']
- if current_listener['Certificates'][0]['CertificateArn'] != new_listener['Certificates'][0]['CertificateArn']:
- modified_listener['Certificates'] = []
- modified_listener['Certificates'].append({})
- modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn']
- elif current_listener['Protocol'] != 'HTTPS' and new_listener['Protocol'] == 'HTTPS':
- modified_listener['SslPolicy'] = new_listener['SslPolicy']
- modified_listener['Certificates'] = []
- modified_listener['Certificates'].append({})
- modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn']
-
- # Default action
-
- # Check proper rule format on current listener
- if len(current_listener['DefaultActions']) > 1:
- for action in current_listener['DefaultActions']:
- if 'Order' not in action:
- self.module.fail_json(msg="'Order' key not found in actions. "
- "installed version of botocore does not support "
- "multiple actions, please upgrade botocore to version "
- "1.10.30 or higher")
-
- # If the lengths of the actions are the same, we'll have to verify that the
- # contents of those actions are the same
- if len(current_listener['DefaultActions']) == len(new_listener['DefaultActions']):
- # if actions have just one element, compare the contents and then update if
- # they're different
- if len(current_listener['DefaultActions']) == 1 and len(new_listener['DefaultActions']) == 1:
- if current_listener['DefaultActions'] != new_listener['DefaultActions']:
- modified_listener['DefaultActions'] = new_listener['DefaultActions']
- # if actions have multiple elements, we'll have to order them first before comparing.
- # multiple actions will have an 'Order' key for this purpose
- else:
- current_actions_sorted = sorted(current_listener['DefaultActions'], key=lambda x: x['Order'])
- new_actions_sorted = sorted(new_listener['DefaultActions'], key=lambda x: x['Order'])
-
- # the AWS api won't return the client secret, so we'll have to remove it
- # or the module will always see the new and current actions as different
- # and try to apply the same config
- new_actions_sorted_no_secret = []
- for action in new_actions_sorted:
- # the secret is currently only defined in the oidc config
- if action['Type'] == 'authenticate-oidc':
- action['AuthenticateOidcConfig'].pop('ClientSecret')
- new_actions_sorted_no_secret.append(action)
- else:
- new_actions_sorted_no_secret.append(action)
-
- if current_actions_sorted != new_actions_sorted_no_secret:
- modified_listener['DefaultActions'] = new_listener['DefaultActions']
- # If the action lengths are different, then replace with the new actions
- else:
- modified_listener['DefaultActions'] = new_listener['DefaultActions']
-
- if modified_listener:
- return modified_listener
- else:
- return None
-
-
-class ELBListener(object):
-
- def __init__(self, connection, module, listener, elb_arn):
- """
-
- :param connection:
- :param module:
- :param listener:
- :param elb_arn:
- """
-
- self.connection = connection
- self.module = module
- self.listener = listener
- self.elb_arn = elb_arn
-
- def add(self):
-
- try:
- # Rules is not a valid parameter for create_listener
- if 'Rules' in self.listener:
- self.listener.pop('Rules')
- AWSRetry.jittered_backoff()(self.connection.create_listener)(LoadBalancerArn=self.elb_arn, **self.listener)
- except (BotoCoreError, ClientError) as e:
- if '"Order", must be one of: Type, TargetGroupArn' in str(e):
- self.module.fail_json(msg="installed version of botocore does not support "
- "multiple actions, please upgrade botocore to version "
- "1.10.30 or higher")
- else:
- self.module.fail_json_aws(e)
-
- def modify(self):
-
- try:
- # Rules is not a valid parameter for modify_listener
- if 'Rules' in self.listener:
- self.listener.pop('Rules')
- AWSRetry.jittered_backoff()(self.connection.modify_listener)(**self.listener)
- except (BotoCoreError, ClientError) as e:
- if '"Order", must be one of: Type, TargetGroupArn' in str(e):
- self.module.fail_json(msg="installed version of botocore does not support "
- "multiple actions, please upgrade botocore to version "
- "1.10.30 or higher")
- else:
- self.module.fail_json_aws(e)
-
- def delete(self):
-
- try:
- AWSRetry.jittered_backoff()(self.connection.delete_listener)(ListenerArn=self.listener)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
-
-class ELBListenerRules(object):
-
- def __init__(self, connection, module, elb_arn, listener_rules, listener_port):
-
- self.connection = connection
- self.module = module
- self.elb_arn = elb_arn
- self.rules = self._ensure_rules_action_has_arn(listener_rules)
- self.changed = False
-
- # Get listener based on port so we can use ARN
- self.current_listener = get_elb_listener(connection, module, elb_arn, listener_port)
- self.listener_arn = self.current_listener['ListenerArn']
- self.rules_to_add = deepcopy(self.rules)
- self.rules_to_modify = []
- self.rules_to_delete = []
-
- # If the listener exists (i.e. has an ARN) get rules for the listener
- if 'ListenerArn' in self.current_listener:
- self.current_rules = self._get_elb_listener_rules()
- else:
- self.current_rules = []
-
- def _ensure_rules_action_has_arn(self, rules):
- """
- If a rule Action has been passed with a Target Group Name instead of ARN, lookup the ARN and
- replace the name.
-
- :param rules: a list of rule dicts
- :return: the same list of dicts ensuring that each rule Actions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed.
- """
-
- fixed_rules = []
- for rule in rules:
- fixed_actions = []
- for action in rule['Actions']:
- if 'TargetGroupName' in action:
- action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection, self.module, action['TargetGroupName'])
- del action['TargetGroupName']
- fixed_actions.append(action)
- rule['Actions'] = fixed_actions
- fixed_rules.append(rule)
-
- return fixed_rules
-
- def _get_elb_listener_rules(self):
-
- try:
- return AWSRetry.jittered_backoff()(self.connection.describe_rules)(ListenerArn=self.current_listener['ListenerArn'])['Rules']
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- def _compare_condition(self, current_conditions, condition):
- """
-
- :param current_conditions:
- :param condition:
- :return:
- """
-
- condition_found = False
-
- for current_condition in current_conditions:
- if current_condition.get('SourceIpConfig'):
- if (current_condition['Field'] == condition['Field'] and
- current_condition['SourceIpConfig']['Values'][0] == condition['SourceIpConfig']['Values'][0]):
- condition_found = True
- break
- elif current_condition['Field'] == condition['Field'] and sorted(current_condition['Values']) == sorted(condition['Values']):
- condition_found = True
- break
-
- return condition_found
-
- def _compare_rule(self, current_rule, new_rule):
- """
-
- :return:
- """
-
- modified_rule = {}
-
- # Priority
- if int(current_rule['Priority']) != int(new_rule['Priority']):
- modified_rule['Priority'] = new_rule['Priority']
-
- # Actions
-
- # Check proper rule format on current listener
- if len(current_rule['Actions']) > 1:
- for action in current_rule['Actions']:
- if 'Order' not in action:
- self.module.fail_json(msg="'Order' key not found in actions. "
- "installed version of botocore does not support "
- "multiple actions, please upgrade botocore to version "
- "1.10.30 or higher")
-
- # If the lengths of the actions are the same, we'll have to verify that the
- # contents of those actions are the same
- if len(current_rule['Actions']) == len(new_rule['Actions']):
- # if actions have just one element, compare the contents and then update if
- # they're different
- if len(current_rule['Actions']) == 1 and len(new_rule['Actions']) == 1:
- if current_rule['Actions'] != new_rule['Actions']:
- modified_rule['Actions'] = new_rule['Actions']
- # if actions have multiple elements, we'll have to order them first before comparing.
- # multiple actions will have an 'Order' key for this purpose
- else:
- current_actions_sorted = sorted(current_rule['Actions'], key=lambda x: x['Order'])
- new_actions_sorted = sorted(new_rule['Actions'], key=lambda x: x['Order'])
-
- # the AWS api won't return the client secret, so we'll have to remove it
- # or the module will always see the new and current actions as different
- # and try to apply the same config
- new_actions_sorted_no_secret = []
- for action in new_actions_sorted:
- # the secret is currently only defined in the oidc config
- if action['Type'] == 'authenticate-oidc':
- action['AuthenticateOidcConfig'].pop('ClientSecret')
- new_actions_sorted_no_secret.append(action)
- else:
- new_actions_sorted_no_secret.append(action)
-
- if current_actions_sorted != new_actions_sorted_no_secret:
- modified_rule['Actions'] = new_rule['Actions']
- # If the action lengths are different, then replace with the new actions
- else:
- modified_rule['Actions'] = new_rule['Actions']
-
- # Conditions
- modified_conditions = []
- for condition in new_rule['Conditions']:
- if not self._compare_condition(current_rule['Conditions'], condition):
- modified_conditions.append(condition)
-
- if modified_conditions:
- modified_rule['Conditions'] = modified_conditions
-
- return modified_rule
-
- def compare_rules(self):
- """
-
- :return:
- """
-
- rules_to_modify = []
- rules_to_delete = []
- rules_to_add = deepcopy(self.rules)
-
- for current_rule in self.current_rules:
- current_rule_passed_to_module = False
- for new_rule in self.rules[:]:
- if current_rule['Priority'] == str(new_rule['Priority']):
- current_rule_passed_to_module = True
- # Remove what we match so that what is left can be marked as 'to be added'
- rules_to_add.remove(new_rule)
- modified_rule = self._compare_rule(current_rule, new_rule)
- if modified_rule:
- modified_rule['Priority'] = int(current_rule['Priority'])
- modified_rule['RuleArn'] = current_rule['RuleArn']
- modified_rule['Actions'] = new_rule['Actions']
- modified_rule['Conditions'] = new_rule['Conditions']
- rules_to_modify.append(modified_rule)
- break
-
- # If the current rule was not matched against passed rules, mark for removal
- if not current_rule_passed_to_module and not current_rule['IsDefault']:
- rules_to_delete.append(current_rule['RuleArn'])
-
- return rules_to_add, rules_to_modify, rules_to_delete
-
-
-class ELBListenerRule(object):
-
- def __init__(self, connection, module, rule, listener_arn):
-
- self.connection = connection
- self.module = module
- self.rule = rule
- self.listener_arn = listener_arn
- self.changed = False
-
- def create(self):
- """
- Create a listener rule
-
- :return:
- """
-
- try:
- self.rule['ListenerArn'] = self.listener_arn
- self.rule['Priority'] = int(self.rule['Priority'])
- AWSRetry.jittered_backoff()(self.connection.create_rule)(**self.rule)
- except (BotoCoreError, ClientError) as e:
- if '"Order", must be one of: Type, TargetGroupArn' in str(e):
- self.module.fail_json(msg="installed version of botocore does not support "
- "multiple actions, please upgrade botocore to version "
- "1.10.30 or higher")
- else:
- self.module.fail_json_aws(e)
-
- self.changed = True
-
- def modify(self):
- """
- Modify a listener rule
-
- :return:
- """
-
- try:
- del self.rule['Priority']
- AWSRetry.jittered_backoff()(self.connection.modify_rule)(**self.rule)
- except (BotoCoreError, ClientError) as e:
- if '"Order", must be one of: Type, TargetGroupArn' in str(e):
- self.module.fail_json(msg="installed version of botocore does not support "
- "multiple actions, please upgrade botocore to version "
- "1.10.30 or higher")
- else:
- self.module.fail_json_aws(e)
-
- self.changed = True
-
- def delete(self):
- """
- Delete a listener rule
-
- :return:
- """
-
- try:
- AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule['RuleArn'])
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e)
-
- self.changed = True
diff --git a/lib/ansible/module_utils/aws/iam.py b/lib/ansible/module_utils/aws/iam.py
deleted file mode 100644
index f05999aa37..0000000000
--- a/lib/ansible/module_utils/aws/iam.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import traceback
-
-try:
- from botocore.exceptions import ClientError, NoCredentialsError
-except ImportError:
- pass # caught by HAS_BOTO3
-
-from ansible.module_utils._text import to_native
-
-
-def get_aws_account_id(module):
- """ Given AnsibleAWSModule instance, get the active AWS account ID
-
- get_account_id tries too find out the account that we are working
- on. It's not guaranteed that this will be easy so we try in
- several different ways. Giving either IAM or STS privilages to
- the account should be enough to permit this.
- """
- account_id = None
- try:
- sts_client = module.client('sts')
- account_id = sts_client.get_caller_identity().get('Account')
- # non-STS sessions may also get NoCredentialsError from this STS call, so
- # we must catch that too and try the IAM version
- except (ClientError, NoCredentialsError):
- try:
- iam_client = module.client('iam')
- account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
- except ClientError as e:
- if (e.response['Error']['Code'] == 'AccessDenied'):
- except_msg = to_native(e)
- # don't match on `arn:aws` because of China region `arn:aws-cn` and similar
- account_id = except_msg.search(r"arn:\w+:iam::([0-9]{12,32}):\w+/").group(1)
- if account_id is None:
- module.fail_json_aws(e, msg="Could not get AWS account information")
- except Exception as e:
- module.fail_json(
- msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.",
- exception=traceback.format_exc()
- )
- if not account_id:
- module.fail_json(msg="Failed while determining AWS account ID. Try allowing sts:GetCallerIdentity or iam:GetUser permissions.")
- return to_native(account_id)
diff --git a/lib/ansible/module_utils/aws/rds.py b/lib/ansible/module_utils/aws/rds.py
deleted file mode 100644
index a0f6cb9ffe..0000000000
--- a/lib/ansible/module_utils/aws/rds.py
+++ /dev/null
@@ -1,232 +0,0 @@
-# Copyright: (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.aws.waiters import get_waiter
-from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
-from ansible.module_utils.ec2 import compare_aws_tags, AWSRetry, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError, WaiterError
-except ImportError:
- pass
-
-from collections import namedtuple
-from time import sleep
-
-
-Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'cluster', 'instance'])
-# Whitelist boto3 client methods for cluster and instance resources
-cluster_method_names = [
- 'create_db_cluster', 'restore_db_cluster_from_db_snapshot', 'restore_db_cluster_from_s3',
- 'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource',
- 'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster'
-]
-instance_method_names = [
- 'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3',
- 'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance',
- 'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource',
- 'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance'
-]
-
-
-def get_rds_method_attribute(method_name, module):
- readable_op = method_name.replace('_', ' ').replace('db', 'DB')
- if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params:
- cluster = True
- instance = False
- if method_name == 'delete_db_cluster':
- waiter = 'cluster_deleted'
- else:
- waiter = 'cluster_available'
- elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params:
- cluster = False
- instance = True
- if method_name == 'delete_db_instance':
- waiter = 'db_instance_deleted'
- elif method_name == 'stop_db_instance':
- waiter = 'db_instance_stopped'
- else:
- waiter = 'db_instance_available'
- else:
- raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/aws/rds.py".format(method_name))
-
- return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op, cluster=cluster, instance=instance)
-
-
-def get_final_identifier(method_name, module):
- apply_immediately = module.params['apply_immediately']
- if get_rds_method_attribute(method_name, module).cluster:
- identifier = module.params['db_cluster_identifier']
- updated_identifier = module.params['new_db_cluster_identifier']
- elif get_rds_method_attribute(method_name, module).instance:
- identifier = module.params['db_instance_identifier']
- updated_identifier = module.params['new_db_instance_identifier']
- else:
- raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/aws/rds.py".format(method_name))
- if not module.check_mode and updated_identifier and apply_immediately:
- identifier = updated_identifier
- return identifier
-
-
-def handle_errors(module, exception, method_name, parameters):
-
- if not isinstance(exception, ClientError):
- module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters))
-
- changed = True
- error_code = exception.response['Error']['Code']
- if method_name == 'modify_db_instance' and error_code == 'InvalidParameterCombination':
- if 'No modifications were requested' in to_text(exception):
- changed = False
- elif 'ModifyDbCluster API' in to_text(exception):
- module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster')
- else:
- module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
- elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState':
- if 'DB Instance is not a read replica' in to_text(exception):
- changed = False
- else:
- module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
- elif method_name == 'create_db_instance' and exception.response['Error']['Code'] == 'InvalidParameterValue':
- accepted_engines = [
- 'aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-se',
- 'oracle-se1', 'oracle-se2', 'postgres', 'sqlserver-ee', 'sqlserver-ex', 'sqlserver-se', 'sqlserver-web'
- ]
- if parameters.get('Engine') not in accepted_engines:
- module.fail_json_aws(exception, msg='DB engine {0} should be one of {1}'.format(parameters.get('Engine'), accepted_engines))
- else:
- module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
- else:
- module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
-
- return changed
-
-
-def call_method(client, module, method_name, parameters):
- result = {}
- changed = True
- if not module.check_mode:
- wait = module.params['wait']
- # TODO: stabilize by adding get_rds_method_attribute(method_name).extra_retry_codes
- method = getattr(client, method_name)
- try:
- if method_name == 'modify_db_instance':
- # check if instance is in an available state first, if possible
- if wait:
- wait_for_status(client, module, module.params['db_instance_identifier'], method_name)
- result = AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidDBInstanceState'])(method)(**parameters)
- else:
- result = AWSRetry.jittered_backoff()(method)(**parameters)
- except (BotoCoreError, ClientError) as e:
- changed = handle_errors(module, e, method_name, parameters)
-
- if wait and changed:
- identifier = get_final_identifier(method_name, module)
- wait_for_status(client, module, identifier, method_name)
- return result, changed
-
-
-def wait_for_instance_status(client, module, db_instance_id, waiter_name):
- def wait(client, db_instance_id, waiter_name, extra_retry_codes):
- retry = AWSRetry.jittered_backoff(catch_extra_error_codes=extra_retry_codes)
- try:
- waiter = client.get_waiter(waiter_name)
- except ValueError:
- # using a waiter in ansible.module_utils.aws.waiters
- waiter = get_waiter(client, waiter_name)
- waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id)
-
- waiter_expected_status = {
- 'db_instance_deleted': 'deleted',
- 'db_instance_stopped': 'stopped',
- }
- expected_status = waiter_expected_status.get(waiter_name, 'available')
- if expected_status == 'available':
- extra_retry_codes = ['DBInstanceNotFound']
- else:
- extra_retry_codes = []
- for attempt_to_wait in range(0, 10):
- try:
- wait(client, db_instance_id, waiter_name, extra_retry_codes)
- break
- except WaiterError as e:
- # Instance may be renamed and AWSRetry doesn't handle WaiterError
- if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound':
- sleep(10)
- continue
- module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format(
- db_instance_id, expected_status)
- )
-
-
-def wait_for_cluster_status(client, module, db_cluster_id, waiter_name):
- try:
- waiter = get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id)
- except WaiterError as e:
- if waiter_name == 'cluster_deleted':
- msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id)
- else:
- msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id)
- module.fail_json_aws(e, msg=msg)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id))
-
-
-def wait_for_status(client, module, identifier, method_name):
- waiter_name = get_rds_method_attribute(method_name, module).waiter
- if get_rds_method_attribute(method_name, module).cluster:
- wait_for_cluster_status(client, module, identifier, waiter_name)
- elif get_rds_method_attribute(method_name, module).instance:
- wait_for_instance_status(client, module, identifier, waiter_name)
- else:
- raise NotImplementedError("method {0} hasn't been added to the whitelist of handled methods".format(method_name))
-
-
-def get_tags(client, module, cluster_arn):
- try:
- return boto3_tag_list_to_ansible_dict(
- client.list_tags_for_resource(ResourceName=cluster_arn)['TagList']
- )
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to describe tags")
-
-
-def arg_spec_to_rds_params(options_dict):
- tags = options_dict.pop('tags')
- has_processor_features = False
- if 'processor_features' in options_dict:
- has_processor_features = True
- processor_features = options_dict.pop('processor_features')
- camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True)
- for key in list(camel_options.keys()):
- for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')):
- if old in key:
- camel_options[key.replace(old, new)] = camel_options.pop(key)
- camel_options['Tags'] = tags
- if has_processor_features:
- camel_options['ProcessorFeatures'] = processor_features
- return camel_options
-
-
-def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
- if tags is None:
- return False
- tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags)
- changed = bool(tags_to_add or tags_to_remove)
- if tags_to_add:
- call_method(
- client, module, method_name='add_tags_to_resource',
- parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)}
- )
- if tags_to_remove:
- call_method(
- client, module, method_name='remove_tags_from_resource',
- parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove}
- )
- return changed
diff --git a/lib/ansible/module_utils/aws/s3.py b/lib/ansible/module_utils/aws/s3.py
deleted file mode 100644
index 2185869d49..0000000000
--- a/lib/ansible/module_utils/aws/s3.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright (c) 2018 Red Hat, Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # Handled by the calling module
-
-HAS_MD5 = True
-try:
- from hashlib import md5
-except ImportError:
- try:
- from md5 import md5
- except ImportError:
- HAS_MD5 = False
-
-
-def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
- if not HAS_MD5:
- return None
-
- if '-' in etag:
- # Multi-part ETag; a hash of the hashes of each part.
- parts = int(etag[1:-1].split('-')[1])
- digests = []
-
- s3_kwargs = dict(
- Bucket=bucket,
- Key=obj,
- )
- if version:
- s3_kwargs['VersionId'] = version
-
- with open(filename, 'rb') as f:
- for part_num in range(1, parts + 1):
- s3_kwargs['PartNumber'] = part_num
- try:
- head = s3.head_object(**s3_kwargs)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to get head object")
- digests.append(md5(f.read(int(head['ContentLength']))))
-
- digest_squared = md5(b''.join(m.digest() for m in digests))
- return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
- else: # Compute the MD5 sum normally
- return '"{0}"'.format(module.md5(filename))
diff --git a/lib/ansible/module_utils/aws/urls.py b/lib/ansible/module_utils/aws/urls.py
deleted file mode 100644
index f4db08a064..0000000000
--- a/lib/ansible/module_utils/aws/urls.py
+++ /dev/null
@@ -1,210 +0,0 @@
-# Copyright: (c) 2018, Aaron Haaf <aabonh@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import datetime
-import hashlib
-import hmac
-import operator
-
-from ansible.module_utils.urls import open_url
-from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, HAS_BOTO3
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-
-try:
- from boto3 import session
-except ImportError:
- pass
-
-
-def hexdigest(s):
- """
- Returns the sha256 hexdigest of a string after encoding.
- """
-
- return hashlib.sha256(s.encode("utf-8")).hexdigest()
-
-
-def format_querystring(params=None):
- """
- Returns properly url-encoded query string from the provided params dict.
-
- It's specially sorted for cannonical requests
- """
-
- if not params:
- return ""
-
- # Query string values must be URL-encoded (space=%20). The parameters must be sorted by name.
- return urlencode(sorted(params.items(), operator.itemgetter(0)))
-
-
-# Key derivation functions. See:
-# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
-def sign(key, msg):
- '''
- Return digest for key applied to msg
- '''
-
- return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
-
-
-def get_signature_key(key, dateStamp, regionName, serviceName):
- '''
- Returns signature key for AWS resource
- '''
-
- kDate = sign(("AWS4" + key).encode("utf-8"), dateStamp)
- kRegion = sign(kDate, regionName)
- kService = sign(kRegion, serviceName)
- kSigning = sign(kService, "aws4_request")
- return kSigning
-
-
-def get_aws_credentials_object(module):
- '''
- Returns aws_access_key_id, aws_secret_access_key, session_token for a module.
- '''
-
- if not HAS_BOTO3:
- module.fail_json("get_aws_credentials_object requires boto3")
-
- dummy, dummy, boto_params = get_aws_connection_info(module, boto3=True)
- s = session.Session(**boto_params)
-
- return s.get_credentials()
-
-
-# Reference: https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
-def signed_request(
- module=None,
- method="GET", service=None, host=None, uri=None,
- query=None, body="", headers=None,
- session_in_header=True, session_in_query=False
-):
- """Generate a SigV4 request to an AWS resource for a module
-
- This is used if you wish to authenticate with AWS credentials to a secure endpoint like an elastisearch domain.
-
- Returns :class:`HTTPResponse` object.
-
- Example:
- result = signed_request(
- module=this,
- service="es",
- host="search-recipes1-xxxxxxxxx.us-west-2.es.amazonaws.com",
- )
-
- :kwarg host: endpoint to talk to
- :kwarg service: AWS id of service (like `ec2` or `es`)
- :kwarg module: An AnsibleAWSModule to gather connection info from
-
- :kwarg body: (optional) Payload to send
- :kwarg method: (optional) HTTP verb to use
- :kwarg query: (optional) dict of query params to handle
- :kwarg uri: (optional) Resource path without query parameters
-
- :kwarg session_in_header: (optional) Add the session token to the headers
- :kwarg session_in_query: (optional) Add the session token to the query parameters
-
- :returns: HTTPResponse
- """
-
- if not HAS_BOTO3:
- module.fail_json("A sigv4 signed_request requires boto3")
-
- # "Constants"
-
- t = datetime.datetime.utcnow()
- amz_date = t.strftime("%Y%m%dT%H%M%SZ")
- datestamp = t.strftime("%Y%m%d") # Date w/o time, used in credential scope
- algorithm = "AWS4-HMAC-SHA256"
-
- # AWS stuff
-
- region, dummy, dummy = get_aws_connection_info(module, boto3=True)
- credentials = get_aws_credentials_object(module)
- access_key = credentials.access_key
- secret_key = credentials.secret_key
- session_token = credentials.token
-
- if not access_key:
- module.fail_json(msg="aws_access_key_id is missing")
- if not secret_key:
- module.fail_json(msg="aws_secret_access_key is missing")
-
- credential_scope = "/".join([datestamp, region, service, "aws4_request"])
-
- # Argument Defaults
-
- uri = uri or "/"
- query_string = format_querystring(query) if query else ""
-
- headers = headers or dict()
- query = query or dict()
-
- headers.update({
- "host": host,
- "x-amz-date": amz_date,
- })
-
- # Handle adding of session_token if present
- if session_token:
- if session_in_header:
- headers["X-Amz-Security-Token"] = session_token
- if session_in_query:
- query["X-Amz-Security-Token"] = session_token
-
- if method == "GET":
- body = ""
-
- # Derived data
-
- body_hash = hexdigest(body)
- signed_headers = ";".join(sorted(headers.keys()))
-
- # Setup Cannonical request to generate auth token
-
- cannonical_headers = "\n".join([
- key.lower().strip() + ":" + value for key, value in headers.items()
- ]) + "\n" # Note additional trailing newline
-
- cannonical_request = "\n".join([
- method,
- uri,
- query_string,
- cannonical_headers,
- signed_headers,
- body_hash,
- ])
-
- string_to_sign = "\n".join([algorithm, amz_date, credential_scope, hexdigest(cannonical_request)])
-
- # Sign the Cannonical request
-
- signing_key = get_signature_key(secret_key, datestamp, region, service)
- signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
-
- # Make auth header with that info
-
- authorization_header = "{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}".format(
- algorithm, access_key, credential_scope, signed_headers, signature
- )
-
- # PERFORM THE REQUEST!
-
- url = "https://" + host + uri
-
- if query_string != "":
- url = url + "?" + query_string
-
- final_headers = {
- "x-amz-date": amz_date,
- "Authorization": authorization_header,
- }
-
- final_headers.update(headers)
-
- return open_url(url, method=method, data=body, headers=final_headers)
diff --git a/lib/ansible/module_utils/aws/waf.py b/lib/ansible/module_utils/aws/waf.py
deleted file mode 100644
index d4a17efcbd..0000000000
--- a/lib/ansible/module_utils/aws/waf.py
+++ /dev/null
@@ -1,222 +0,0 @@
-# Copyright (c) 2017 Will Thames
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-"""
-This module adds shared support for Web Application Firewall modules
-"""
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
-from ansible.module_utils.aws.waiters import get_waiter
-
-try:
- import botocore
-except ImportError:
- pass # caught by imported HAS_BOTO3
-
-
-MATCH_LOOKUP = {
- 'byte': {
- 'method': 'byte_match_set',
- 'conditionset': 'ByteMatchSet',
- 'conditiontuple': 'ByteMatchTuple',
- 'type': 'ByteMatch'
- },
- 'geo': {
- 'method': 'geo_match_set',
- 'conditionset': 'GeoMatchSet',
- 'conditiontuple': 'GeoMatchConstraint',
- 'type': 'GeoMatch'
- },
- 'ip': {
- 'method': 'ip_set',
- 'conditionset': 'IPSet',
- 'conditiontuple': 'IPSetDescriptor',
- 'type': 'IPMatch'
- },
- 'regex': {
- 'method': 'regex_match_set',
- 'conditionset': 'RegexMatchSet',
- 'conditiontuple': 'RegexMatchTuple',
- 'type': 'RegexMatch'
- },
- 'size': {
- 'method': 'size_constraint_set',
- 'conditionset': 'SizeConstraintSet',
- 'conditiontuple': 'SizeConstraint',
- 'type': 'SizeConstraint'
- },
- 'sql': {
- 'method': 'sql_injection_match_set',
- 'conditionset': 'SqlInjectionMatchSet',
- 'conditiontuple': 'SqlInjectionMatchTuple',
- 'type': 'SqlInjectionMatch',
- },
- 'xss': {
- 'method': 'xss_match_set',
- 'conditionset': 'XssMatchSet',
- 'conditiontuple': 'XssMatchTuple',
- 'type': 'XssMatch'
- },
-}
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_rule_with_backoff(client, rule_id):
- return client.get_rule(RuleId=rule_id)['Rule']
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_byte_match_set_with_backoff(client, byte_match_set_id):
- return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)['ByteMatchSet']
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_ip_set_with_backoff(client, ip_set_id):
- return client.get_ip_set(IPSetId=ip_set_id)['IPSet']
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_size_constraint_set_with_backoff(client, size_constraint_set_id):
- return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)['SizeConstraintSet']
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_sql_injection_match_set_with_backoff(client, sql_injection_match_set_id):
- return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)['SqlInjectionMatchSet']
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_xss_match_set_with_backoff(client, xss_match_set_id):
- return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)['XssMatchSet']
-
-
-def get_rule(client, module, rule_id):
- try:
- rule = get_rule_with_backoff(client, rule_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain waf rule")
-
- match_sets = {
- 'ByteMatch': get_byte_match_set_with_backoff,
- 'IPMatch': get_ip_set_with_backoff,
- 'SizeConstraint': get_size_constraint_set_with_backoff,
- 'SqlInjectionMatch': get_sql_injection_match_set_with_backoff,
- 'XssMatch': get_xss_match_set_with_backoff
- }
- if 'Predicates' in rule:
- for predicate in rule['Predicates']:
- if predicate['Type'] in match_sets:
- predicate.update(match_sets[predicate['Type']](client, predicate['DataId']))
- # replaced by Id from the relevant MatchSet
- del(predicate['DataId'])
- return rule
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_web_acl_with_backoff(client, web_acl_id):
- return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
-
-
-def get_web_acl(client, module, web_acl_id):
- try:
- web_acl = get_web_acl_with_backoff(client, web_acl_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain web acl")
-
- if web_acl:
- try:
- for rule in web_acl['Rules']:
- rule.update(get_rule(client, module, rule['RuleId']))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain web acl rule")
- return camel_dict_to_snake_dict(web_acl)
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def list_rules_with_backoff(client):
- paginator = client.get_paginator('list_rules')
- return paginator.paginate().build_full_result()['Rules']
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def list_regional_rules_with_backoff(client):
- resp = client.list_rules()
- rules = []
- while resp:
- rules += resp['Rules']
- resp = client.list_rules(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None
- return rules
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def list_web_acls_with_backoff(client):
- paginator = client.get_paginator('list_web_acls')
- return paginator.paginate().build_full_result()['WebACLs']
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def list_regional_web_acls_with_backoff(client):
- resp = client.list_web_acls()
- acls = []
- while resp:
- acls += resp['WebACLs']
- resp = client.list_web_acls(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None
- return acls
-
-
-def list_web_acls(client, module):
- try:
- if client.__class__.__name__ == 'WAF':
- return list_web_acls_with_backoff(client)
- elif client.__class__.__name__ == 'WAFRegional':
- return list_regional_web_acls_with_backoff(client)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain web acls")
-
-
-def get_change_token(client, module):
- try:
- token = client.get_change_token()
- return token['ChangeToken']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain change token")
-
-
-@AWSRetry.backoff(tries=10, delay=2, backoff=2.0, catch_extra_error_codes=['WAFStaleDataException'])
-def run_func_with_change_token_backoff(client, module, params, func, wait=False):
- params['ChangeToken'] = get_change_token(client, module)
- result = func(**params)
- if wait:
- get_waiter(
- client, 'change_token_in_sync',
- ).wait(
- ChangeToken=result['ChangeToken']
- )
- return result
diff --git a/lib/ansible/module_utils/aws/waiters.py b/lib/ansible/module_utils/aws/waiters.py
deleted file mode 100644
index 25db598bcb..0000000000
--- a/lib/ansible/module_utils/aws/waiters.py
+++ /dev/null
@@ -1,405 +0,0 @@
-# Copyright: (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-try:
- import botocore.waiter as core_waiter
-except ImportError:
- pass # caught by HAS_BOTO3
-
-
-ec2_data = {
- "version": 2,
- "waiters": {
- "InternetGatewayExists": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeInternetGateways",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(InternetGateways) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidInternetGatewayID.NotFound",
- "state": "retry"
- },
- ]
- },
- "RouteTableExists": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeRouteTables",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(RouteTables[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidRouteTableID.NotFound",
- "state": "retry"
- },
- ]
- },
- "SecurityGroupExists": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSecurityGroups",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(SecurityGroups[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidGroup.NotFound",
- "state": "retry"
- },
- ]
- },
- "SubnetExists": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSubnets",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(Subnets[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidSubnetID.NotFound",
- "state": "retry"
- },
- ]
- },
- "SubnetHasMapPublic": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSubnets",
- "acceptors": [
- {
- "matcher": "pathAll",
- "expected": True,
- "argument": "Subnets[].MapPublicIpOnLaunch",
- "state": "success"
- },
- ]
- },
- "SubnetNoMapPublic": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSubnets",
- "acceptors": [
- {
- "matcher": "pathAll",
- "expected": False,
- "argument": "Subnets[].MapPublicIpOnLaunch",
- "state": "success"
- },
- ]
- },
- "SubnetHasAssignIpv6": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSubnets",
- "acceptors": [
- {
- "matcher": "pathAll",
- "expected": True,
- "argument": "Subnets[].AssignIpv6AddressOnCreation",
- "state": "success"
- },
- ]
- },
- "SubnetNoAssignIpv6": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSubnets",
- "acceptors": [
- {
- "matcher": "pathAll",
- "expected": False,
- "argument": "Subnets[].AssignIpv6AddressOnCreation",
- "state": "success"
- },
- ]
- },
- "SubnetDeleted": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeSubnets",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(Subnets[]) > `0`",
- "state": "retry"
- },
- {
- "matcher": "error",
- "expected": "InvalidSubnetID.NotFound",
- "state": "success"
- },
- ]
- },
- "VpnGatewayExists": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeVpnGateways",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "length(VpnGateways[]) > `0`",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "InvalidVpnGatewayID.NotFound",
- "state": "retry"
- },
- ]
- },
- "VpnGatewayDetached": {
- "delay": 5,
- "maxAttempts": 40,
- "operation": "DescribeVpnGateways",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "VpnGateways[0].State == 'available'",
- "state": "success"
- },
- ]
- },
- }
-}
-
-
-waf_data = {
- "version": 2,
- "waiters": {
- "ChangeTokenInSync": {
- "delay": 20,
- "maxAttempts": 60,
- "operation": "GetChangeTokenStatus",
- "acceptors": [
- {
- "matcher": "path",
- "expected": True,
- "argument": "ChangeTokenStatus == 'INSYNC'",
- "state": "success"
- },
- {
- "matcher": "error",
- "expected": "WAFInternalErrorException",
- "state": "retry"
- }
- ]
- }
- }
-}
-
-eks_data = {
- "version": 2,
- "waiters": {
- "ClusterActive": {
- "delay": 20,
- "maxAttempts": 60,
- "operation": "DescribeCluster",
- "acceptors": [
- {
- "state": "success",
- "matcher": "path",
- "argument": "cluster.status",
- "expected": "ACTIVE"
- },
- {
- "state": "retry",
- "matcher": "error",
- "expected": "ResourceNotFoundException"
- }
- ]
- },
- "ClusterDeleted": {
- "delay": 20,
- "maxAttempts": 60,
- "operation": "DescribeCluster",
- "acceptors": [
- {
- "state": "retry",
- "matcher": "path",
- "argument": "cluster.status != 'DELETED'",
- "expected": True
- },
- {
- "state": "success",
- "matcher": "error",
- "expected": "ResourceNotFoundException"
- }
- ]
- }
- }
-}
-
-
-rds_data = {
- "version": 2,
- "waiters": {
- "DBInstanceStopped": {
- "delay": 20,
- "maxAttempts": 60,
- "operation": "DescribeDBInstances",
- "acceptors": [
- {
- "state": "success",
- "matcher": "pathAll",
- "argument": "DBInstances[].DBInstanceStatus",
- "expected": "stopped"
- },
- ]
- }
- }
-}
-
-
-def ec2_model(name):
- ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data)
- return ec2_models.get_waiter(name)
-
-
-def waf_model(name):
- waf_models = core_waiter.WaiterModel(waiter_config=waf_data)
- return waf_models.get_waiter(name)
-
-
-def eks_model(name):
- eks_models = core_waiter.WaiterModel(waiter_config=eks_data)
- return eks_models.get_waiter(name)
-
-
-def rds_model(name):
- rds_models = core_waiter.WaiterModel(waiter_config=rds_data)
- return rds_models.get_waiter(name)
-
-
-waiters_by_name = {
- ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter(
- 'internet_gateway_exists',
- ec2_model('InternetGatewayExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_internet_gateways
- )),
- ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
- 'route_table_exists',
- ec2_model('RouteTableExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_route_tables
- )),
- ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
- 'security_group_exists',
- ec2_model('SecurityGroupExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_security_groups
- )),
- ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
- 'subnet_exists',
- ec2_model('SubnetExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
- 'subnet_has_map_public',
- ec2_model('SubnetHasMapPublic'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
- 'subnet_no_map_public',
- ec2_model('SubnetNoMapPublic'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
- 'subnet_has_assign_ipv6',
- ec2_model('SubnetHasAssignIpv6'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
- 'subnet_no_assign_ipv6',
- ec2_model('SubnetNoAssignIpv6'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
- 'subnet_deleted',
- ec2_model('SubnetDeleted'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_subnets
- )),
- ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter(
- 'vpn_gateway_exists',
- ec2_model('VpnGatewayExists'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_vpn_gateways
- )),
- ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter(
- 'vpn_gateway_detached',
- ec2_model('VpnGatewayDetached'),
- core_waiter.NormalizedOperationMethod(
- ec2.describe_vpn_gateways
- )),
- ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
- 'change_token_in_sync',
- waf_model('ChangeTokenInSync'),
- core_waiter.NormalizedOperationMethod(
- waf.get_change_token_status
- )),
- ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
- 'change_token_in_sync',
- waf_model('ChangeTokenInSync'),
- core_waiter.NormalizedOperationMethod(
- waf.get_change_token_status
- )),
- ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
- 'cluster_active',
- eks_model('ClusterActive'),
- core_waiter.NormalizedOperationMethod(
- eks.describe_cluster
- )),
- ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter(
- 'cluster_deleted',
- eks_model('ClusterDeleted'),
- core_waiter.NormalizedOperationMethod(
- eks.describe_cluster
- )),
- ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter(
- 'db_instance_stopped',
- rds_model('DBInstanceStopped'),
- core_waiter.NormalizedOperationMethod(
- rds.describe_db_instances
- )),
-}
-
-
-def get_waiter(client, waiter_name):
- try:
- return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
- except KeyError:
- raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
- waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))
diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py
deleted file mode 100644
index 5599ee7ea3..0000000000
--- a/lib/ansible/module_utils/ec2.py
+++ /dev/null
@@ -1,758 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-import re
-import sys
-import traceback
-
-from ansible.module_utils.ansible_release import __version__
-from ansible.module_utils.basic import missing_required_lib, env_fallback
-from ansible.module_utils._text import to_native, to_text
-from ansible.module_utils.cloud import CloudRetry
-from ansible.module_utils.six import string_types, binary_type, text_type
-from ansible.module_utils.common.dict_transformations import (
- camel_dict_to_snake_dict, snake_dict_to_camel_dict,
- _camel_to_snake, _snake_to_camel,
-)
-
-BOTO_IMP_ERR = None
-try:
- import boto
- import boto.ec2 # boto does weird import stuff
- HAS_BOTO = True
-except ImportError:
- BOTO_IMP_ERR = traceback.format_exc()
- HAS_BOTO = False
-
-BOTO3_IMP_ERR = None
-try:
- import boto3
- import botocore
- HAS_BOTO3 = True
-except Exception:
- BOTO3_IMP_ERR = traceback.format_exc()
- HAS_BOTO3 = False
-
-try:
- # Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also
- # uses this (and it works as expected). Python 2.6 will trigger the ImportError.
- from functools import cmp_to_key
- PY3_COMPARISON = True
-except ImportError:
- PY3_COMPARISON = False
-
-
-class AnsibleAWSError(Exception):
- pass
-
-
-def _botocore_exception_maybe():
- """
- Allow for boto3 not being installed when using these utils by wrapping
- botocore.exceptions instead of assigning from it directly.
- """
- if HAS_BOTO3:
- return botocore.exceptions.ClientError
- return type(None)
-
-
-class AWSRetry(CloudRetry):
- base_class = _botocore_exception_maybe()
-
- @staticmethod
- def status_code_from_exception(error):
- return error.response['Error']['Code']
-
- @staticmethod
- def found(response_code, catch_extra_error_codes=None):
- # This list of failures is based on this API Reference
- # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
- #
- # TooManyRequestsException comes from inside botocore when it
- # does retrys, unfortunately however it does not try long
- # enough to allow some services such as API Gateway to
- # complete configuration. At the moment of writing there is a
- # botocore/boto3 bug open to fix this.
- #
- # https://github.com/boto/boto3/issues/876 (and linked PRs etc)
- retry_on = [
- 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
- 'InternalFailure', 'InternalError', 'TooManyRequestsException',
- 'Throttling'
- ]
- if catch_extra_error_codes:
- retry_on.extend(catch_extra_error_codes)
-
- return response_code in retry_on
-
-
-def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
- try:
- return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
- except ValueError as e:
- module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
- botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
- module.fail_json(msg=to_native(e))
- except botocore.exceptions.NoRegionError as e:
- module.fail_json(msg="The %s module requires a region and none was found in configuration, "
- "environment variables or module parameters" % module._name)
-
-
-def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
- profile = params.pop('profile_name', None)
-
- if conn_type not in ['both', 'resource', 'client']:
- raise ValueError('There is an issue in the calling code. You '
- 'must specify either both, resource, or client to '
- 'the conn_type parameter in the boto3_conn function '
- 'call')
-
- config = botocore.config.Config(
- user_agent_extra='Ansible/{0}'.format(__version__),
- )
-
- if params.get('config') is not None:
- config = config.merge(params.pop('config'))
- if params.get('aws_config') is not None:
- config = config.merge(params.pop('aws_config'))
-
- session = boto3.session.Session(
- profile_name=profile,
- )
-
- if conn_type == 'resource':
- return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
- elif conn_type == 'client':
- return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
- else:
- client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
- resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
- return client, resource
-
-
-boto3_inventory_conn = _boto3_conn
-
-
-def boto_exception(err):
- """
- Extracts the error message from a boto exception.
-
- :param err: Exception from boto
- :return: Error message
- """
- if hasattr(err, 'error_message'):
- error = err.error_message
- elif hasattr(err, 'message'):
- error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
- else:
- error = '%s: %s' % (Exception, err)
-
- return error
-
-
-def aws_common_argument_spec():
- return dict(
- debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
- ec2_url=dict(),
- aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
- aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
- validate_certs=dict(default=True, type='bool'),
- security_token=dict(aliases=['access_token'], no_log=True),
- profile=dict(),
- aws_config=dict(type='dict'),
- )
-
-
-def ec2_argument_spec():
- spec = aws_common_argument_spec()
- spec.update(
- dict(
- region=dict(aliases=['aws_region', 'ec2_region']),
- )
- )
- return spec
-
-
-def get_aws_region(module, boto3=False):
- region = module.params.get('region')
-
- if region:
- return region
-
- if 'AWS_REGION' in os.environ:
- return os.environ['AWS_REGION']
- if 'AWS_DEFAULT_REGION' in os.environ:
- return os.environ['AWS_DEFAULT_REGION']
- if 'EC2_REGION' in os.environ:
- return os.environ['EC2_REGION']
-
- if not boto3:
- if not HAS_BOTO:
- module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR)
- # boto.config.get returns None if config not found
- region = boto.config.get('Boto', 'aws_region')
- if region:
- return region
- return boto.config.get('Boto', 'ec2_region')
-
- if not HAS_BOTO3:
- module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
-
- # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
- try:
- profile_name = module.params.get('profile')
- return botocore.session.Session(profile=profile_name).get_config_variable('region')
- except botocore.exceptions.ProfileNotFound as e:
- return None
-
-
-def get_aws_connection_info(module, boto3=False):
-
- # Check module args for credentials, then check environment vars
- # access_key
-
- ec2_url = module.params.get('ec2_url')
- access_key = module.params.get('aws_access_key')
- secret_key = module.params.get('aws_secret_key')
- security_token = module.params.get('security_token')
- region = get_aws_region(module, boto3)
- profile_name = module.params.get('profile')
- validate_certs = module.params.get('validate_certs')
- config = module.params.get('aws_config')
-
- if not ec2_url:
- if 'AWS_URL' in os.environ:
- ec2_url = os.environ['AWS_URL']
- elif 'EC2_URL' in os.environ:
- ec2_url = os.environ['EC2_URL']
-
- if not access_key:
- if os.environ.get('AWS_ACCESS_KEY_ID'):
- access_key = os.environ['AWS_ACCESS_KEY_ID']
- elif os.environ.get('AWS_ACCESS_KEY'):
- access_key = os.environ['AWS_ACCESS_KEY']
- elif os.environ.get('EC2_ACCESS_KEY'):
- access_key = os.environ['EC2_ACCESS_KEY']
- elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'):
- access_key = boto.config.get('Credentials', 'aws_access_key_id')
- elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'):
- access_key = boto.config.get('default', 'aws_access_key_id')
- else:
- # in case access_key came in as empty string
- access_key = None
-
- if not secret_key:
- if os.environ.get('AWS_SECRET_ACCESS_KEY'):
- secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
- elif os.environ.get('AWS_SECRET_KEY'):
- secret_key = os.environ['AWS_SECRET_KEY']
- elif os.environ.get('EC2_SECRET_KEY'):
- secret_key = os.environ['EC2_SECRET_KEY']
- elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'):
- secret_key = boto.config.get('Credentials', 'aws_secret_access_key')
- elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'):
- secret_key = boto.config.get('default', 'aws_secret_access_key')
- else:
- # in case secret_key came in as empty string
- secret_key = None
-
- if not security_token:
- if os.environ.get('AWS_SECURITY_TOKEN'):
- security_token = os.environ['AWS_SECURITY_TOKEN']
- elif os.environ.get('AWS_SESSION_TOKEN'):
- security_token = os.environ['AWS_SESSION_TOKEN']
- elif os.environ.get('EC2_SECURITY_TOKEN'):
- security_token = os.environ['EC2_SECURITY_TOKEN']
- elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'):
- security_token = boto.config.get('Credentials', 'aws_security_token')
- elif HAS_BOTO and boto.config.get('default', 'aws_security_token'):
- security_token = boto.config.get('default', 'aws_security_token')
- else:
- # in case secret_token came in as empty string
- security_token = None
-
- if HAS_BOTO3 and boto3:
- boto_params = dict(aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- aws_session_token=security_token)
- boto_params['verify'] = validate_certs
-
- if profile_name:
- boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
- boto_params['profile_name'] = profile_name
-
- else:
- boto_params = dict(aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- security_token=security_token)
-
- # only set profile_name if passed as an argument
- if profile_name:
- boto_params['profile_name'] = profile_name
-
- boto_params['validate_certs'] = validate_certs
-
- if config is not None:
- if HAS_BOTO3 and boto3:
- boto_params['aws_config'] = botocore.config.Config(**config)
- elif HAS_BOTO and not boto3:
- if 'user_agent' in config:
- sys.modules["boto.connection"].UserAgent = config['user_agent']
-
- for param, value in boto_params.items():
- if isinstance(value, binary_type):
- boto_params[param] = text_type(value, 'utf-8', 'strict')
-
- return region, ec2_url, boto_params
-
-
-def get_ec2_creds(module):
- ''' for compatibility mode with old modules that don't/can't yet
- use ec2_connect method '''
- region, ec2_url, boto_params = get_aws_connection_info(module)
- return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
-
-
-def boto_fix_security_token_in_profile(conn, profile_name):
- ''' monkey patch for boto issue boto/boto#2100 '''
- profile = 'profile ' + profile_name
- if boto.config.has_option(profile, 'aws_security_token'):
- conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
- return conn
-
-
-def connect_to_aws(aws_module, region, **params):
- try:
- conn = aws_module.connect_to_region(region, **params)
- except(boto.provider.ProfileNotFoundError):
- raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.")
- if not conn:
- if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
- raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
- "boto or extend with endpoints_path" % (region, aws_module.__name__))
- else:
- raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
- if params.get('profile_name'):
- conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
- return conn
-
-
-def ec2_connect(module):
-
- """ Return an ec2 connection"""
-
- region, ec2_url, boto_params = get_aws_connection_info(module)
-
- # If we have a region specified, connect to its endpoint.
- if region:
- try:
- ec2 = connect_to_aws(boto.ec2, region, **boto_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
- module.fail_json(msg=str(e))
- # Otherwise, no region so we fallback to the old connection method
- elif ec2_url:
- try:
- ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
- module.fail_json(msg=str(e))
- else:
- module.fail_json(msg="Either region or ec2_url must be specified")
-
- return ec2
-
-
-def ansible_dict_to_boto3_filter_list(filters_dict):
-
- """ Convert an Ansible dict of filters to list of dicts that boto3 can use
- Args:
- filters_dict (dict): Dict of AWS filters.
- Basic Usage:
- >>> filters = {'some-aws-id': 'i-01234567'}
- >>> ansible_dict_to_boto3_filter_list(filters)
- {
- 'some-aws-id': 'i-01234567'
- }
- Returns:
- List: List of AWS filters and their values
- [
- {
- 'Name': 'some-aws-id',
- 'Values': [
- 'i-01234567',
- ]
- }
- ]
- """
-
- filters_list = []
- for k, v in filters_dict.items():
- filter_dict = {'Name': k}
- if isinstance(v, string_types):
- filter_dict['Values'] = [v]
- else:
- filter_dict['Values'] = v
-
- filters_list.append(filter_dict)
-
- return filters_list
-
-
-def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
-
- """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
- Args:
- tags_list (list): List of dicts representing AWS tags.
- tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
- tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
- Basic Usage:
- >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
- >>> boto3_tag_list_to_ansible_dict(tags_list)
- [
- {
- 'Key': 'MyTagKey',
- 'Value': 'MyTagValue'
- }
- ]
- Returns:
- Dict: Dict of key:value pairs representing AWS tags
- {
- 'MyTagKey': 'MyTagValue',
- }
- """
-
- if tag_name_key_name and tag_value_key_name:
- tag_candidates = {tag_name_key_name: tag_value_key_name}
- else:
- tag_candidates = {'key': 'value', 'Key': 'Value'}
-
- if not tags_list:
- return {}
- for k, v in tag_candidates.items():
- if k in tags_list[0] and v in tags_list[0]:
- return dict((tag[k], tag[v]) for tag in tags_list)
- raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
-
-
-def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
-
- """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
- Args:
- tags_dict (dict): Dict representing AWS resource tags.
- tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
- tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
- Basic Usage:
- >>> tags_dict = {'MyTagKey': 'MyTagValue'}
- >>> ansible_dict_to_boto3_tag_list(tags_dict)
- {
- 'MyTagKey': 'MyTagValue'
- }
- Returns:
- List: List of dicts containing tag keys and values
- [
- {
- 'Key': 'MyTagKey',
- 'Value': 'MyTagValue'
- }
- ]
- """
-
- tags_list = []
- for k, v in tags_dict.items():
- tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
-
- return tags_list
-
-
-def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
-
- """ Return list of security group IDs from security group names. Note that security group names are not unique
- across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
- will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
- a try block
- """
-
- def get_sg_name(sg, boto3):
-
- if boto3:
- return sg['GroupName']
- else:
- return sg.name
-
- def get_sg_id(sg, boto3):
-
- if boto3:
- return sg['GroupId']
- else:
- return sg.id
-
- sec_group_id_list = []
-
- if isinstance(sec_group_list, string_types):
- sec_group_list = [sec_group_list]
-
- # Get all security groups
- if boto3:
- if vpc_id:
- filters = [
- {
- 'Name': 'vpc-id',
- 'Values': [
- vpc_id,
- ]
- }
- ]
- all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
- else:
- all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
- else:
- if vpc_id:
- filters = {'vpc-id': vpc_id}
- all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
- else:
- all_sec_groups = ec2_connection.get_all_security_groups()
-
- unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
- sec_group_name_list = list(set(sec_group_list) - set(unmatched))
-
- if len(unmatched) > 0:
- # If we have unmatched names that look like an ID, assume they are
- import re
- sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
- still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
- if len(still_unmatched) > 0:
- raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
-
- sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list]
-
- return sec_group_id_list
-
-
-def _hashable_policy(policy, policy_list):
- """
- Takes a policy and returns a list, the contents of which are all hashable and sorted.
- Example input policy:
- {'Version': '2012-10-17',
- 'Statement': [{'Action': 's3:PutObjectAcl',
- 'Sid': 'AddCannedAcl2',
- 'Resource': 'arn:aws:s3:::test_policy/*',
- 'Effect': 'Allow',
- 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
- }]}
- Returned value:
- [('Statement', ((('Action', (u's3:PutObjectAcl',)),
- ('Effect', (u'Allow',)),
- ('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
- ('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
- ('Version', (u'2012-10-17',)))]
-
- """
- # Amazon will automatically convert bool and int to strings for us
- if isinstance(policy, bool):
- return tuple([str(policy).lower()])
- elif isinstance(policy, int):
- return tuple([str(policy)])
-
- if isinstance(policy, list):
- for each in policy:
- tupleified = _hashable_policy(each, [])
- if isinstance(tupleified, list):
- tupleified = tuple(tupleified)
- policy_list.append(tupleified)
- elif isinstance(policy, string_types) or isinstance(policy, binary_type):
- policy = to_text(policy)
- # convert root account ARNs to just account IDs
- if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
- policy = policy.split(':')[4]
- return [policy]
- elif isinstance(policy, dict):
- sorted_keys = list(policy.keys())
- sorted_keys.sort()
- for key in sorted_keys:
- tupleified = _hashable_policy(policy[key], [])
- if isinstance(tupleified, list):
- tupleified = tuple(tupleified)
- policy_list.append((key, tupleified))
-
- # ensure we aren't returning deeply nested structures of length 1
- if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
- policy_list = policy_list[0]
- if isinstance(policy_list, list):
- if PY3_COMPARISON:
- policy_list.sort(key=cmp_to_key(py3cmp))
- else:
- policy_list.sort()
- return policy_list
-
-
-def py3cmp(a, b):
- """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
- try:
- if a > b:
- return 1
- elif a < b:
- return -1
- else:
- return 0
- except TypeError as e:
- # check to see if they're tuple-string
- # always say strings are less than tuples (to maintain compatibility with python2)
- str_ind = to_text(e).find('str')
- tup_ind = to_text(e).find('tuple')
- if -1 not in (str_ind, tup_ind):
- if str_ind < tup_ind:
- return -1
- elif tup_ind < str_ind:
- return 1
- raise
-
-
-def compare_policies(current_policy, new_policy):
- """ Compares the existing policy and the updated policy
- Returns True if there is a difference between policies.
- """
- return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
-
-
-def sort_json_policy_dict(policy_dict):
-
- """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
- different orders will return true
- Args:
- policy_dict (dict): Dict representing IAM JSON policy.
- Basic Usage:
- >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
- >>> sort_json_policy_dict(my_iam_policy)
- Returns:
- Dict: Will return a copy of the policy as a Dict but any List will be sorted
- {
- 'Principle': {
- 'AWS': [ '7', '14', '31', '101' ]
- }
- }
- """
-
- def value_is_list(my_list):
-
- checked_list = []
- for item in my_list:
- if isinstance(item, dict):
- checked_list.append(sort_json_policy_dict(item))
- elif isinstance(item, list):
- checked_list.append(value_is_list(item))
- else:
- checked_list.append(item)
-
- # Sort list. If it's a list of dictionaries, sort by tuple of key-value
- # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
- checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
- return checked_list
-
- ordered_policy_dict = {}
- for key, value in policy_dict.items():
- if isinstance(value, dict):
- ordered_policy_dict[key] = sort_json_policy_dict(value)
- elif isinstance(value, list):
- ordered_policy_dict[key] = value_is_list(value)
- else:
- ordered_policy_dict[key] = value
-
- return ordered_policy_dict
-
-
-def map_complex_type(complex_type, type_map):
- """
- Allows to cast elements within a dictionary to a specific type
- Example of usage:
-
- DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
- 'maximum_percent': 'int',
- 'minimum_healthy_percent': 'int'
- }
-
- deployment_configuration = map_complex_type(module.params['deployment_configuration'],
- DEPLOYMENT_CONFIGURATION_TYPE_MAP)
-
- This ensures all keys within the root element are casted and valid integers
- """
-
- if complex_type is None:
- return
- new_type = type(complex_type)()
- if isinstance(complex_type, dict):
- for key in complex_type:
- if key in type_map:
- if isinstance(type_map[key], list):
- new_type[key] = map_complex_type(
- complex_type[key],
- type_map[key][0])
- else:
- new_type[key] = map_complex_type(
- complex_type[key],
- type_map[key])
- else:
- return complex_type
- elif isinstance(complex_type, list):
- for i in range(len(complex_type)):
- new_type.append(map_complex_type(
- complex_type[i],
- type_map))
- elif type_map:
- return globals()['__builtins__'][type_map](complex_type)
- return new_type
-
-
-def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
- """
- Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
- Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
- these may not be able to be used out of the box.
-
- :param current_tags_dict:
- :param new_tags_dict:
- :param purge_tags:
- :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
- :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
- """
-
- tag_key_value_pairs_to_set = {}
- tag_keys_to_unset = []
-
- for key in current_tags_dict.keys():
- if key not in new_tags_dict and purge_tags:
- tag_keys_to_unset.append(key)
-
- for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
- if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
- tag_key_value_pairs_to_set[key] = new_tags_dict[key]
-
- return tag_key_value_pairs_to_set, tag_keys_to_unset
diff --git a/lib/ansible/modules/cloud/amazon/_aws_az_facts.py b/lib/ansible/modules/cloud/amazon/_aws_az_facts.py
deleted file mode 120000
index 62447531e0..0000000000
--- a/lib/ansible/modules/cloud/amazon/_aws_az_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-aws_az_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_aws_caller_facts.py b/lib/ansible/modules/cloud/amazon/_aws_caller_facts.py
deleted file mode 120000
index 2f8c5b8177..0000000000
--- a/lib/ansible/modules/cloud/amazon/_aws_caller_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-aws_caller_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_cloudformation_facts.py b/lib/ansible/modules/cloud/amazon/_cloudformation_facts.py
deleted file mode 120000
index 19e1b826c7..0000000000
--- a/lib/ansible/modules/cloud/amazon/_cloudformation_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-cloudformation_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_ami_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_ami_facts.py
deleted file mode 120000
index ba1561bf24..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_ami_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_ami_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_eni_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_eni_facts.py
deleted file mode 120000
index fe99939153..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_eni_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_eni_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_group_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_group_facts.py
deleted file mode 120000
index 2961fa7568..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_group_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_group_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_snapshot_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_snapshot_facts.py
deleted file mode 120000
index eaaf612d83..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_snapshot_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_snapshot_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vol_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_vol_facts.py
deleted file mode 120000
index aa871abac3..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_vol_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_vol_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc_dhcp_option_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc_dhcp_option_facts.py
deleted file mode 120000
index d143c2eded..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc_dhcp_option_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_vpc_dhcp_option_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc_net_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc_net_facts.py
deleted file mode 120000
index 19f6e9d373..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc_net_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_vpc_net_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc_subnet_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc_subnet_facts.py
deleted file mode 120000
index 47d72315d2..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc_subnet_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_vpc_subnet_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/aws_az_info.py b/lib/ansible/modules/cloud/amazon/aws_az_info.py
deleted file mode 100644
index eccbf4d7d4..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_az_info.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'supported_by': 'community',
- 'status': ['preview']
-}
-
-DOCUMENTATION = '''
-module: aws_az_info
-short_description: Gather information about availability zones in AWS.
-description:
- - Gather information about availability zones in AWS.
- - This module was called C(aws_az_facts) before Ansible 2.9. The usage did not change.
-version_added: '2.5'
-author: 'Henrique Rodrigues (@Sodki)'
-options:
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
- U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for
- possible filters. Filter names and values are case sensitive. You can also use underscores
- instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
- required: false
- default: {}
- type: dict
-extends_documentation_fragment:
- - aws
- - ec2
-requirements: [botocore, boto3]
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all availability zones
-- aws_az_info:
-
-# Gather information about a single availability zone
-- aws_az_info:
- filters:
- zone-name: eu-west-1a
-'''
-
-RETURN = '''
-availability_zones:
- returned: on success
- description: >
- Availability zones that match the provided filters. Each element consists of a dict with all the information
- related to that available zone.
- type: list
- sample: "[
- {
- 'messages': [],
- 'region_name': 'us-west-1',
- 'state': 'available',
- 'zone_name': 'us-west-1b'
- },
- {
- 'messages': [],
- 'region_name': 'us-west-1',
- 'state': 'available',
- 'zone_name': 'us-west-1c'
- }
- ]"
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-
-def main():
- argument_spec = dict(
- filters=dict(default={}, type='dict')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
- if module._name == 'aws_az_facts':
- module.deprecate("The 'aws_az_facts' module has been renamed to 'aws_az_info'", version='2.14')
-
- connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
-
- # Replace filter key underscores with dashes, for compatibility
- sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items())
-
- try:
- availability_zones = connection.describe_availability_zones(
- Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
- )
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to describe availability zones.")
-
- # Turn the boto3 result into ansible_friendly_snaked_names
- snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']]
-
- module.exit_json(availability_zones=snaked_availability_zones)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_caller_info.py b/lib/ansible/modules/cloud/amazon/aws_caller_info.py
deleted file mode 100644
index b3e145d0c1..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_caller_info.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = '''
----
-module: aws_caller_info
-short_description: Get information about the user and account being used to make AWS calls.
-description:
- - This module returns information about the account and user / role from which the AWS access tokens originate.
- - The primary use of this is to get the account id for templating into ARNs or similar to avoid needing to specify this information in inventory.
- - This module was called C(aws_caller_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.6"
-
-author:
- - Ed Costello (@orthanc)
- - Stijn Dubrul (@sdubrul)
-
-requirements: [ 'botocore', 'boto3' ]
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Get the current caller identity information
- aws_caller_info:
- register: caller_info
-'''
-
-RETURN = '''
-account:
- description: The account id the access credentials are associated with.
- returned: success
- type: str
- sample: "123456789012"
-account_alias:
- description: The account alias the access credentials are associated with.
- returned: when caller has the iam:ListAccountAliases permission
- type: str
- sample: "acme-production"
-arn:
- description: The arn identifying the user the credentials are associated with.
- returned: success
- type: str
- sample: arn:aws:sts::123456789012:federated-user/my-federated-user-name
-user_id:
- description: |
- The user id the access credentials are associated with. Note that this may not correspond to
- anything you can look up in the case of roles or federated identities.
- returned: success
- type: str
- sample: 123456789012:my-federated-user-name
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def main():
- module = AnsibleAWSModule(
- argument_spec={},
- supports_check_mode=True,
- )
- if module._name == 'aws_caller_facts':
- module.deprecate("The 'aws_caller_facts' module has been renamed to 'aws_caller_info'", version='2.13')
-
- client = module.client('sts')
-
- try:
- caller_info = client.get_caller_identity()
- caller_info.pop('ResponseMetadata', None)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to retrieve caller identity')
-
- iam_client = module.client('iam')
-
- try:
- # Although a list is returned by list_account_aliases AWS supports maximum one alias per account.
- # If an alias is defined it will be returned otherwise a blank string is filled in as account_alias.
- # see https://docs.aws.amazon.com/cli/latest/reference/iam/list-account-aliases.html#output
- response = iam_client.list_account_aliases()
- if response and response['AccountAliases']:
- caller_info['account_alias'] = response['AccountAliases'][0]
- else:
- caller_info['account_alias'] = ''
- except (BotoCoreError, ClientError) as e:
- # The iam:ListAccountAliases permission is required for this operation to succeed.
- # Lacking this permission is handled gracefully by not returning the account_alias.
- pass
-
- module.exit_json(
- changed=False,
- **camel_dict_to_snake_dict(caller_info))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_s3.py b/lib/ansible/modules/cloud/amazon/aws_s3.py
deleted file mode 100644
index 54874f05ce..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_s3.py
+++ /dev/null
@@ -1,925 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: aws_s3
-short_description: manage objects in S3.
-description:
- - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and
- deleting both objects and buckets, retrieving objects as files or strings and generating download links.
- This module has a dependency on boto3 and botocore.
-notes:
- - In 2.4, this module has been renamed from C(s3) into M(aws_s3).
-version_added: "1.1"
-options:
- bucket:
- description:
- - Bucket name.
- required: true
- type: str
- dest:
- description:
- - The destination file path when downloading an object/key with a GET operation.
- version_added: "1.3"
- type: path
- encrypt:
- description:
- - When set for PUT mode, asks for server-side encryption.
- default: true
- version_added: "2.0"
- type: bool
- encryption_mode:
- description:
- - What encryption mode to use if I(encrypt=true).
- default: AES256
- choices:
- - AES256
- - aws:kms
- version_added: "2.7"
- type: str
- expiry:
- description:
- - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation.
- default: 600
- aliases: ['expiration']
- type: int
- headers:
- description:
- - Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
- version_added: "2.0"
- type: dict
- marker:
- description:
- - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
- version_added: "2.0"
- type: str
- max_keys:
- description:
- - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
- default: 1000
- version_added: "2.0"
- type: int
- metadata:
- description:
- - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
- version_added: "1.6"
- type: dict
- mode:
- description:
- - Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+),
- getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket),
- and delobj (delete object, Ansible 2.0+).
- required: true
- choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
- type: str
- object:
- description:
- - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
- type: str
- permission:
- description:
- - This option lets the user set the canned permissions on the object/bucket that are created.
- The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or
- C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read),
- C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list.
- default: ['private']
- version_added: "2.0"
- type: list
- elements: str
- prefix:
- description:
- - Limits the response to keys that begin with the specified prefix for list mode.
- default: ""
- version_added: "2.0"
- type: str
- version:
- description:
- - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
- version_added: "2.0"
- type: str
- overwrite:
- description:
- - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
- Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0.
- When this is set to 'different', the md5 sum of the local file is compared with the 'ETag' of the object/key in S3.
- The ETag may or may not be an MD5 digest of the object data. See the ETag response header here
- U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html)
- default: 'always'
- aliases: ['force']
- version_added: "1.2"
- type: str
- retries:
- description:
- - On recoverable failure, how many times to retry before actually failing.
- default: 0
- version_added: "2.0"
- type: int
- aliases: ['retry']
- s3_url:
- description:
- - S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS.
- aliases: [ S3_URL ]
- type: str
- dualstack:
- description:
- - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
- - Requires at least botocore version 1.4.45.
- type: bool
- default: false
- version_added: "2.7"
- rgw:
- description:
- - Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url).
- default: false
- version_added: "2.2"
- type: bool
- src:
- description:
- - The source file path when performing a PUT operation.
- version_added: "1.3"
- type: str
- ignore_nonexistent_bucket:
- description:
- - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the
- GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying
- I(ignore_nonexistent_bucket=true)."
- version_added: "2.3"
- type: bool
- encryption_kms_key_id:
- description:
- - KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms)
- version_added: "2.7"
- type: str
-requirements: [ "boto3", "botocore" ]
-author:
- - "Lester Wade (@lwade)"
- - "Sloane Hertel (@s-hertel)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Simple PUT operation
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- src: /usr/local/myfile.txt
- mode: put
-
-- name: Simple PUT operation in Ceph RGW S3
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- src: /usr/local/myfile.txt
- mode: put
- rgw: true
- s3_url: "http://localhost:8000"
-
-- name: Simple GET operation
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- dest: /usr/local/myfile.txt
- mode: get
-
-- name: Get a specific version of an object.
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- version: 48c9ee5131af7a716edc22df9772aa6f
- dest: /usr/local/myfile.txt
- mode: get
-
-- name: PUT/upload with metadata
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- src: /usr/local/myfile.txt
- mode: put
- metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
-
-- name: PUT/upload with custom headers
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- src: /usr/local/myfile.txt
- mode: put
- headers: 'x-amz-grant-full-control=emailAddress=owner@example.com'
-
-- name: List keys simple
- aws_s3:
- bucket: mybucket
- mode: list
-
-- name: List keys all options
- aws_s3:
- bucket: mybucket
- mode: list
- prefix: /my/desired/
- marker: /my/desired/0023.txt
- max_keys: 472
-
-- name: Create an empty bucket
- aws_s3:
- bucket: mybucket
- mode: create
- permission: public-read
-
-- name: Create a bucket with key as directory, in the EU region
- aws_s3:
- bucket: mybucket
- object: /my/directory/path
- mode: create
- region: eu-west-1
-
-- name: Delete a bucket and all contents
- aws_s3:
- bucket: mybucket
- mode: delete
-
-- name: GET an object but don't download if the file checksums match. New in 2.0
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- dest: /usr/local/myfile.txt
- mode: get
- overwrite: different
-
-- name: Delete an object from a bucket
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- mode: delobj
-'''
-
-RETURN = '''
-msg:
- description: Message indicating the status of the operation.
- returned: always
- type: str
- sample: PUT operation complete
-url:
- description: URL of the object.
- returned: (for put and geturl operations)
- type: str
- sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=<access-key>&Expires=1506888865&Signature=<signature>
-expiry:
- description: Number of seconds the presigned url is valid for.
- returned: (for geturl operation)
- type: int
- sample: 600
-contents:
- description: Contents of the object as string.
- returned: (for getstr operation)
- type: str
- sample: "Hello, world!"
-s3_keys:
- description: List of object keys.
- returned: (for list operation)
- type: list
- elements: str
- sample:
- - prefix1/
- - prefix1/key1
- - prefix1/key2
-'''
-
-import mimetypes
-import os
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-from ssl import SSLError
-from ansible.module_utils.basic import to_text, to_native
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.s3 import calculate_etag, HAS_MD5
-from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
-
-try:
- import botocore
-except ImportError:
- pass # will be detected by imported AnsibleAWSModule
-
-IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented']
-
-
-class Sigv4Required(Exception):
- pass
-
-
-def key_check(module, s3, bucket, obj, version=None, validate=True):
- exists = True
- try:
- if version:
- s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
- else:
- s3.head_object(Bucket=bucket, Key=obj)
- except botocore.exceptions.ClientError as e:
- # if a client error is thrown, check if it's a 404 error
- # if it's a 404 error, then the object does not exist
- error_code = int(e.response['Error']['Code'])
- if error_code == 404:
- exists = False
- elif error_code == 403 and validate is False:
- pass
- else:
- module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
- return exists
-
-
-def etag_compare(module, local_file, s3, bucket, obj, version=None):
- s3_etag = get_etag(s3, bucket, obj, version=version)
- local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
-
- return s3_etag == local_etag
-
-
-def get_etag(s3, bucket, obj, version=None):
- if version:
- key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
- else:
- key_check = s3.head_object(Bucket=bucket, Key=obj)
- if not key_check:
- return None
- return key_check['ETag']
-
-
-def bucket_check(module, s3, bucket, validate=True):
- exists = True
- try:
- s3.head_bucket(Bucket=bucket)
- except botocore.exceptions.ClientError as e:
- # If a client error is thrown, then check that it was a 404 error.
- # If it was a 404 error, then the bucket does not exist.
- error_code = int(e.response['Error']['Code'])
- if error_code == 404:
- exists = False
- elif error_code == 403 and validate is False:
- pass
- else:
- module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
- except botocore.exceptions.EndpointConnectionError as e:
- module.fail_json_aws(e, msg="Invalid endpoint provided")
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
- return exists
-
-
-def create_bucket(module, s3, bucket, location=None):
- if module.check_mode:
- module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True)
- configuration = {}
- if location not in ('us-east-1', None):
- configuration['LocationConstraint'] = location
- try:
- if len(configuration) > 0:
- s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration)
- else:
- s3.create_bucket(Bucket=bucket)
- if module.params.get('permission'):
- # Wait for the bucket to exist before setting ACLs
- s3.get_waiter('bucket_exists').wait(Bucket=bucket)
- for acl in module.params.get('permission'):
- s3.put_bucket_acl(ACL=acl, Bucket=bucket)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
- module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
- else:
- module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
-
- if bucket:
- return True
-
-
-def paginated_list(s3, **pagination_params):
- pg = s3.get_paginator('list_objects_v2')
- for page in pg.paginate(**pagination_params):
- yield [data['Key'] for data in page.get('Contents', [])]
-
-
-def paginated_versioned_list_with_fallback(s3, **pagination_params):
- try:
- versioned_pg = s3.get_paginator('list_object_versions')
- for page in versioned_pg.paginate(**pagination_params):
- delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])]
- current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])]
- yield delete_markers + current_objects
- except botocore.exceptions.ClientError as e:
- if to_text(e.response['Error']['Code']) in IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']:
- for page in paginated_list(s3, **pagination_params):
- yield [{'Key': data['Key']} for data in page]
-
-
-def list_keys(module, s3, bucket, prefix, marker, max_keys):
- pagination_params = {'Bucket': bucket}
- for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)):
- pagination_params[param_name] = param_value
- try:
- keys = sum(paginated_list(s3, **pagination_params), [])
- module.exit_json(msg="LIST operation complete", s3_keys=keys)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket))
-
-
-def delete_bucket(module, s3, bucket):
- if module.check_mode:
- module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
- try:
- exists = bucket_check(module, s3, bucket)
- if exists is False:
- return False
- # if there are contents then we need to delete them before we can delete the bucket
- for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket):
- if keys:
- s3.delete_objects(Bucket=bucket, Delete={'Objects': keys})
- s3.delete_bucket(Bucket=bucket)
- return True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket)
-
-
-def delete_key(module, s3, bucket, obj):
- if module.check_mode:
- module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
- try:
- s3.delete_object(Bucket=bucket, Key=obj)
- module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj)
-
-
-def create_dirkey(module, s3, bucket, obj, encrypt):
- if module.check_mode:
- module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
- try:
- params = {'Bucket': bucket, 'Key': obj, 'Body': b''}
- if encrypt:
- params['ServerSideEncryption'] = module.params['encryption_mode']
- if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
- params['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
-
- s3.put_object(**params)
- for acl in module.params.get('permission'):
- s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
- module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning")
- else:
- module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
- module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), changed=True)
-
-
-def path_check(path):
- if os.path.exists(path):
- return True
- else:
- return False
-
-
-def option_in_extra_args(option):
- temp_option = option.replace('-', '').lower()
-
- allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition',
- 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage',
- 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl',
- 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP',
- 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption',
- 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey',
- 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'}
-
- if temp_option in allowed_extra_args:
- return allowed_extra_args[temp_option]
-
-
-def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
- if module.check_mode:
- module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
- try:
- extra = {}
- if encrypt:
- extra['ServerSideEncryption'] = module.params['encryption_mode']
- if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
- extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
- if metadata:
- extra['Metadata'] = {}
-
- # determine object metadata and extra arguments
- for option in metadata:
- extra_args_option = option_in_extra_args(option)
- if extra_args_option is not None:
- extra[extra_args_option] = metadata[option]
- else:
- extra['Metadata'][option] = metadata[option]
-
- if 'ContentType' not in extra:
- content_type = mimetypes.guess_type(src)[0]
- if content_type is None:
- # s3 default content type
- content_type = 'binary/octet-stream'
- extra['ContentType'] = content_type
-
- s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to complete PUT operation.")
- try:
- for acl in module.params.get('permission'):
- s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
- module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
- else:
- module.fail_json_aws(e, msg="Unable to set object ACL")
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Unable to set object ACL")
- try:
- url = s3.generate_presigned_url(ClientMethod='put_object',
- Params={'Bucket': bucket, 'Key': obj},
- ExpiresIn=expiry)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to generate presigned URL")
- module.exit_json(msg="PUT operation complete", url=url, changed=True)
-
-
-def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
- if module.check_mode:
- module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
- # retries is the number of loops; range/xrange needs to be one
- # more to get that count of loops.
- try:
- if version:
- key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version)
- else:
- key = s3.get_object(Bucket=bucket, Key=obj)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
- raise Sigv4Required()
- elif e.response['Error']['Code'] not in ("403", "404"):
- # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
- # user does not have the s3:GetObject permission. 404 errors are handled by download_file().
- module.fail_json_aws(e, msg="Could not find the key %s." % obj)
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Could not find the key %s." % obj)
-
- optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {}
- for x in range(0, retries + 1):
- try:
- s3.download_file(bucket, obj, dest, **optional_kwargs)
- module.exit_json(msg="GET operation complete", changed=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- # actually fail on last pass through the loop.
- if x >= retries:
- module.fail_json_aws(e, msg="Failed while downloading %s." % obj)
- # otherwise, try again, this may be a transient timeout.
- except SSLError as e: # will ClientError catch SSLError?
- # actually fail on last pass through the loop.
- if x >= retries:
- module.fail_json_aws(e, msg="s3 download failed")
- # otherwise, try again, this may be a transient timeout.
-
-
-def download_s3str(module, s3, bucket, obj, version=None, validate=True):
- if module.check_mode:
- module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
- try:
- if version:
- contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read())
- else:
- contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read())
- module.exit_json(msg="GET operation complete", contents=contents, changed=True)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
- raise Sigv4Required()
- else:
- module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
-
-
-def get_download_url(module, s3, bucket, obj, expiry, changed=True):
- try:
- url = s3.generate_presigned_url(ClientMethod='get_object',
- Params={'Bucket': bucket, 'Key': obj},
- ExpiresIn=expiry)
- module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed while getting download url.")
-
-
-def is_fakes3(s3_url):
- """ Return True if s3_url has scheme fakes3:// """
- if s3_url is not None:
- return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
- else:
- return False
-
-
-def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False):
- if s3_url and rgw: # TODO - test this
- rgw = urlparse(s3_url)
- params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
- elif is_fakes3(s3_url):
- fakes3 = urlparse(s3_url)
- port = fakes3.port
- if fakes3.scheme == 'fakes3s':
- protocol = "https"
- if port is None:
- port = 443
- else:
- protocol = "http"
- if port is None:
- port = 80
- params = dict(module=module, conn_type='client', resource='s3', region=location,
- endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
- use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
- else:
- params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
- if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms':
- params['config'] = botocore.client.Config(signature_version='s3v4')
- elif module.params['mode'] in ('get', 'getstr') and sig_4:
- params['config'] = botocore.client.Config(signature_version='s3v4')
- if module.params['dualstack']:
- dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True})
- if 'config' in params:
- params['config'] = params['config'].merge(dualconf)
- else:
- params['config'] = dualconf
- return boto3_conn(**params)
-
-
-def main():
- argument_spec = dict(
- bucket=dict(required=True),
- dest=dict(default=None, type='path'),
- encrypt=dict(default=True, type='bool'),
- encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
- expiry=dict(default=600, type='int', aliases=['expiration']),
- headers=dict(type='dict'),
- marker=dict(default=""),
- max_keys=dict(default=1000, type='int'),
- metadata=dict(type='dict'),
- mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
- object=dict(),
- permission=dict(type='list', default=['private']),
- version=dict(default=None),
- overwrite=dict(aliases=['force'], default='always'),
- prefix=dict(default=""),
- retries=dict(aliases=['retry'], type='int', default=0),
- s3_url=dict(aliases=['S3_URL']),
- dualstack=dict(default='no', type='bool'),
- rgw=dict(default='no', type='bool'),
- src=dict(),
- ignore_nonexistent_bucket=dict(default=False, type='bool'),
- encryption_kms_key_id=dict()
- )
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- required_if=[['mode', 'put', ['src', 'object']],
- ['mode', 'get', ['dest', 'object']],
- ['mode', 'getstr', ['object']],
- ['mode', 'geturl', ['object']]],
- )
-
- bucket = module.params.get('bucket')
- encrypt = module.params.get('encrypt')
- expiry = module.params.get('expiry')
- dest = module.params.get('dest', '')
- headers = module.params.get('headers')
- marker = module.params.get('marker')
- max_keys = module.params.get('max_keys')
- metadata = module.params.get('metadata')
- mode = module.params.get('mode')
- obj = module.params.get('object')
- version = module.params.get('version')
- overwrite = module.params.get('overwrite')
- prefix = module.params.get('prefix')
- retries = module.params.get('retries')
- s3_url = module.params.get('s3_url')
- dualstack = module.params.get('dualstack')
- rgw = module.params.get('rgw')
- src = module.params.get('src')
- ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')
-
- object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
- bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]
-
- if overwrite not in ['always', 'never', 'different']:
- if module.boolean(overwrite):
- overwrite = 'always'
- else:
- overwrite = 'never'
-
- if overwrite == 'different' and not HAS_MD5:
- module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
-
- if region in ('us-east-1', '', None):
- # default to US Standard region
- location = 'us-east-1'
- else:
- # Boto uses symbolic names for locations but region strings will
- # actually work fine for everything except us-east-1 (US Standard)
- location = region
-
- if module.params.get('object'):
- obj = module.params['object']
- # If there is a top level object, do nothing - if the object starts with /
- # remove the leading character to maintain compatibility with Ansible versions < 2.4
- if obj.startswith('/'):
- obj = obj[1:]
-
- # Bucket deletion does not require obj. Prevents ambiguity with delobj.
- if obj and mode == "delete":
- module.fail_json(msg='Parameter obj cannot be used with mode=delete')
-
- # allow eucarc environment variables to be used if ansible vars aren't set
- if not s3_url and 'S3_URL' in os.environ:
- s3_url = os.environ['S3_URL']
-
- if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
- module.fail_json(msg='dualstack only applies to AWS S3')
-
- if dualstack and not module.botocore_at_least('1.4.45'):
- module.fail_json(msg='dualstack requires botocore >= 1.4.45')
-
- # rgw requires an explicit url
- if rgw and not s3_url:
- module.fail_json(msg='rgw flavour requires s3_url')
-
- # Look at s3_url and tweak connection settings
- # if connecting to RGW, Walrus or fakes3
- if s3_url:
- for key in ['validate_certs', 'security_token', 'profile_name']:
- aws_connect_kwargs.pop(key, None)
- s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)
-
- validate = not ignore_nonexistent_bucket
-
- # separate types of ACLs
- bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
- object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
- error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
- if error_acl:
- module.fail_json(msg='Unknown permission specified: %s' % error_acl)
-
- # First, we check to see if the bucket exists, we get "bucket" returned.
- bucketrtn = bucket_check(module, s3, bucket, validate=validate)
-
- if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
- module.fail_json(msg="Source bucket cannot be found.")
-
- if mode == 'get':
- keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
- if keyrtn is False:
- if version:
- module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
- else:
- module.fail_json(msg="Key %s does not exist." % obj)
-
- if path_check(dest) and overwrite != 'always':
- if overwrite == 'never':
- module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
- if etag_compare(module, dest, s3, bucket, obj, version=version):
- module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
-
- try:
- download_s3file(module, s3, bucket, obj, dest, retries, version=version)
- except Sigv4Required:
- s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
- download_s3file(module, s3, bucket, obj, dest, retries, version=version)
-
- if mode == 'put':
-
- # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
- # these were separated into the variables bucket_acl and object_acl above
-
- if not path_check(src):
- module.fail_json(msg="Local object for PUT does not exist")
-
- if bucketrtn:
- keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
- else:
- # If the bucket doesn't exist we should create it.
- # only use valid bucket acls for create_bucket function
- module.params['permission'] = bucket_acl
- create_bucket(module, s3, bucket, location)
-
- if keyrtn and overwrite != 'always':
- if overwrite == 'never' or etag_compare(module, src, s3, bucket, obj):
- # Return the download URL for the existing object
- get_download_url(module, s3, bucket, obj, expiry, changed=False)
-
- # only use valid object acls for the upload_s3file function
- module.params['permission'] = object_acl
- upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
-
- # Delete an object from a bucket, not the entire bucket
- if mode == 'delobj':
- if obj is None:
- module.fail_json(msg="object parameter is required")
- if bucket:
- deletertn = delete_key(module, s3, bucket, obj)
- if deletertn is True:
- module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
- else:
- module.fail_json(msg="Bucket parameter is required.")
-
- # Delete an entire bucket, including all objects in the bucket
- if mode == 'delete':
- if bucket:
- deletertn = delete_bucket(module, s3, bucket)
- if deletertn is True:
- module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
- else:
- module.fail_json(msg="Bucket parameter is required.")
-
- # Support for listing a set of keys
- if mode == 'list':
- exists = bucket_check(module, s3, bucket)
-
- # If the bucket does not exist then bail out
- if not exists:
- module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)
-
- list_keys(module, s3, bucket, prefix, marker, max_keys)
-
- # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
- # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
- if mode == 'create':
-
- # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
- # these were separated above into the variables bucket_acl and object_acl
-
- if bucket and not obj:
- if bucketrtn:
- module.exit_json(msg="Bucket already exists.", changed=False)
- else:
- # only use valid bucket acls when creating the bucket
- module.params['permission'] = bucket_acl
- module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
- if bucket and obj:
- if obj.endswith('/'):
- dirobj = obj
- else:
- dirobj = obj + "/"
- if bucketrtn:
- if key_check(module, s3, bucket, dirobj):
- module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
- else:
- # setting valid object acls for the create_dirkey function
- module.params['permission'] = object_acl
- create_dirkey(module, s3, bucket, dirobj, encrypt)
- else:
- # only use valid bucket acls for the create_bucket function
- module.params['permission'] = bucket_acl
- created = create_bucket(module, s3, bucket, location)
- # only use valid object acls for the create_dirkey function
- module.params['permission'] = object_acl
- create_dirkey(module, s3, bucket, dirobj, encrypt)
-
- # Support for grabbing the time-expired URL for an object in S3/Walrus.
- if mode == 'geturl':
- if not bucket and not obj:
- module.fail_json(msg="Bucket and Object parameters must be set")
-
- keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
- if keyrtn:
- get_download_url(module, s3, bucket, obj, expiry)
- else:
- module.fail_json(msg="Key %s does not exist." % obj)
-
- if mode == 'getstr':
- if bucket and obj:
- keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
- if keyrtn:
- try:
- download_s3str(module, s3, bucket, obj, version=version)
- except Sigv4Required:
- s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
- download_s3str(module, s3, bucket, obj, version=version)
- elif version is not None:
- module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
- else:
- module.fail_json(msg="Key %s does not exist." % obj)
-
- module.exit_json(failed=False)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudformation.py b/lib/ansible/modules/cloud/amazon/cloudformation.py
deleted file mode 100644
index cd03146501..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudformation.py
+++ /dev/null
@@ -1,837 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2017, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: cloudformation
-short_description: Create or delete an AWS CloudFormation stack
-description:
- - Launches or updates an AWS CloudFormation stack and waits for it complete.
-notes:
- - CloudFormation features change often, and this module tries to keep up. That means your botocore version should be fresh.
- The version listed in the requirements is the oldest version that works with the module as a whole.
- Some features may require recent versions, and we do not pinpoint a minimum version for each feature.
- Instead of relying on the minimum version, keep botocore up to date. AWS is always releasing features and fixing bugs.
-version_added: "1.1"
-options:
- stack_name:
- description:
- - Name of the CloudFormation stack.
- required: true
- type: str
- disable_rollback:
- description:
- - If a stacks fails to form, rollback will remove the stack.
- default: false
- type: bool
- on_create_failure:
- description:
- - Action to take upon failure of stack creation. Incompatible with the I(disable_rollback) option.
- choices:
- - DO_NOTHING
- - ROLLBACK
- - DELETE
- version_added: "2.8"
- type: str
- create_timeout:
- description:
- - The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED
- version_added: "2.6"
- type: int
- template_parameters:
- description:
- - A list of hashes of all the template variables for the stack. The value can be a string or a dict.
- - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
- default: {}
- type: dict
- state:
- description:
- - If I(state=present), stack will be created.
- - If I(state=present) and if stack exists and template has changed, it will be updated.
- - If I(state=absent), stack will be removed.
- default: present
- choices: [ present, absent ]
- type: str
- template:
- description:
- - The local path of the CloudFormation template.
- - This must be the full path to the file, relative to the working directory. If using roles this may look
- like C(roles/cloudformation/files/cloudformation-example.json).
- - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
- must be specified (but only one of them).
- - If I(state=present), the stack does exist, and neither I(template),
- I(template_body) nor I(template_url) are specified, the previous template will be reused.
- type: path
- notification_arns:
- description:
- - A comma separated list of Simple Notification Service (SNS) topic ARNs to publish stack related events.
- version_added: "2.0"
- type: str
- stack_policy:
- description:
- - The path of the CloudFormation stack policy. A policy cannot be removed once placed, but it can be modified.
- for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
- version_added: "1.9"
- type: str
- tags:
- description:
- - Dictionary of tags to associate with stack and its resources during stack creation.
- - Can be updated later, updating tags removes previous entries.
- version_added: "1.4"
- type: dict
- template_url:
- description:
- - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an
- S3 bucket in the same region as the stack.
- - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
- must be specified (but only one of them).
- - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) are specified,
- the previous template will be reused.
- version_added: "2.0"
- type: str
- create_changeset:
- description:
- - "If stack already exists create a changeset instead of directly applying changes. See the AWS Change Sets docs
- U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)."
- - "WARNING: if the stack does not exist, it will be created without changeset. If I(state=absent), the stack will be
- deleted immediately with no changeset."
- type: bool
- default: false
- version_added: "2.4"
- changeset_name:
- description:
- - Name given to the changeset when creating a changeset.
- - Only used when I(create_changeset=true).
- - By default a name prefixed with Ansible-STACKNAME is generated based on input parameters.
- See the AWS Change Sets docs for more information
- U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)
- version_added: "2.4"
- type: str
- template_format:
- description:
- - This parameter is ignored since Ansible 2.3 and will be removed in Ansible 2.14.
- - Templates are now passed raw to CloudFormation regardless of format.
- version_added: "2.0"
- type: str
- role_arn:
- description:
- - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
- docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
- version_added: "2.3"
- type: str
- termination_protection:
- description:
- - Enable or disable termination protection on the stack. Only works with botocore >= 1.7.18.
- type: bool
- version_added: "2.5"
- template_body:
- description:
- - Template body. Use this to pass in the actual body of the CloudFormation template.
- - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
- must be specified (but only one of them).
- - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
- are specified, the previous template will be reused.
- version_added: "2.5"
- type: str
- events_limit:
- description:
- - Maximum number of CloudFormation events to fetch from a stack when creating or updating it.
- default: 200
- version_added: "2.7"
- type: int
- backoff_delay:
- description:
- - Number of seconds to wait for the next retry.
- default: 3
- version_added: "2.8"
- type: int
- required: False
- backoff_max_delay:
- description:
- - Maximum amount of time to wait between retries.
- default: 30
- version_added: "2.8"
- type: int
- required: False
- backoff_retries:
- description:
- - Number of times to retry operation.
- - AWS API throttling mechanism fails CloudFormation module so we have to retry a couple of times.
- default: 10
- version_added: "2.8"
- type: int
- required: False
- capabilities:
- description:
- - Specify capabilities that stack template contains.
- - Valid values are C(CAPABILITY_IAM), C(CAPABILITY_NAMED_IAM) and C(CAPABILITY_AUTO_EXPAND).
- type: list
- elements: str
- version_added: "2.8"
- default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ]
-
-author: "James S. Martin (@jsmartin)"
-extends_documentation_fragment:
-- aws
-- ec2
-requirements: [ boto3, botocore>=1.5.45 ]
-'''
-
-EXAMPLES = '''
-- name: create a cloudformation stack
- cloudformation:
- stack_name: "ansible-cloudformation"
- state: "present"
- region: "us-east-1"
- disable_rollback: true
- template: "files/cloudformation-example.json"
- template_parameters:
- KeyName: "jmartin"
- DiskType: "ephemeral"
- InstanceType: "m1.small"
- ClusterSize: 3
- tags:
- Stack: "ansible-cloudformation"
-
-# Basic role example
-- name: create a stack, specify role that cloudformation assumes
- cloudformation:
- stack_name: "ansible-cloudformation"
- state: "present"
- region: "us-east-1"
- disable_rollback: true
- template: "roles/cloudformation/files/cloudformation-example.json"
- role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
-
-- name: delete a stack
- cloudformation:
- stack_name: "ansible-cloudformation-old"
- state: "absent"
-
-# Create a stack, pass in template from a URL, disable rollback if stack creation fails,
-# pass in some parameters to the template, provide tags for resources created
-- name: create a stack, pass in the template via an URL
- cloudformation:
- stack_name: "ansible-cloudformation"
- state: present
- region: us-east-1
- disable_rollback: true
- template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
- template_parameters:
- KeyName: jmartin
- DiskType: ephemeral
- InstanceType: m1.small
- ClusterSize: 3
- tags:
- Stack: ansible-cloudformation
-
-# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails,
-# pass in some parameters to the template, provide tags for resources created
-- name: create a stack, pass in the template body via lookup template
- cloudformation:
- stack_name: "ansible-cloudformation"
- state: present
- region: us-east-1
- disable_rollback: true
- template_body: "{{ lookup('template', 'cloudformation.j2') }}"
- template_parameters:
- KeyName: jmartin
- DiskType: ephemeral
- InstanceType: m1.small
- ClusterSize: 3
- tags:
- Stack: ansible-cloudformation
-
-# Pass a template parameter which uses CloudFormation's UsePreviousValue attribute
-# When use_previous_value is set to True, the given value will be ignored and
-# CloudFormation will use the value from a previously submitted template.
-# If use_previous_value is set to False (default) the given value is used.
-- cloudformation:
- stack_name: "ansible-cloudformation"
- state: "present"
- region: "us-east-1"
- template: "files/cloudformation-example.json"
- template_parameters:
- DBSnapshotIdentifier:
- use_previous_value: True
- value: arn:aws:rds:es-east-1:000000000000:snapshot:rds:my-db-snapshot
- DBName:
- use_previous_value: True
- tags:
- Stack: "ansible-cloudformation"
-
-# Enable termination protection on a stack.
-# If the stack already exists, this will update its termination protection
-- name: enable termination protection during stack creation
- cloudformation:
- stack_name: my_stack
- state: present
- template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
- termination_protection: yes
-
-# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED
-# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back.
-- name: enable termination protection during stack creation
- cloudformation:
- stack_name: my_stack
- state: present
- template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
- create_timeout: 5
-
-# Configure rollback behaviour on the unsuccessful creation of a stack allowing
-# CloudFormation to clean up, or do nothing in the event of an unsuccessful
-# deployment
-# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if
-# it fails to create
-- name: create stack which will delete on creation failure
- cloudformation:
- stack_name: my_stack
- state: present
- template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
- on_create_failure: DELETE
-'''
-
-RETURN = '''
-events:
- type: list
- description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
- returned: always
- sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"]
-log:
- description: Debugging logs. Useful when modifying or finding an error.
- returned: always
- type: list
- sample: ["updating stack"]
-change_set_id:
- description: The ID of the stack change set if one was created
- returned: I(state=present) and I(create_changeset=true)
- type: str
- sample: "arn:aws:cloudformation:us-east-1:012345678901:changeSet/Ansible-StackName-f4496805bd1b2be824d1e315c6884247ede41eb0"
-stack_resources:
- description: AWS stack resources and their status. List of dictionaries, one dict per resource.
- returned: state == present
- type: list
- sample: [
- {
- "last_updated_time": "2016-10-11T19:40:14.979000+00:00",
- "logical_resource_id": "CFTestSg",
- "physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F",
- "resource_type": "AWS::EC2::SecurityGroup",
- "status": "UPDATE_COMPLETE",
- "status_reason": null
- }
- ]
-stack_outputs:
- type: dict
- description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
- returned: state == present
- sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
-''' # NOQA
-
-import json
-import time
-import uuid
-import traceback
-from hashlib import sha1
-
-try:
- import boto3
- import botocore
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, boto3_conn, boto_exception, ec2_argument_spec, get_aws_connection_info
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_bytes, to_native
-
-
-def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
- '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
- ret = {'events': [], 'log': []}
-
- try:
- pg = cfn.get_paginator(
- 'describe_stack_events'
- ).paginate(
- StackName=stack_name,
- PaginationConfig={'MaxItems': events_limit}
- )
- if token_filter is not None:
- events = list(pg.search(
- "StackEvents[?ClientRequestToken == '{0}']".format(token_filter)
- ))
- else:
- events = list(pg.search("StackEvents[*]"))
- except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
- error_msg = boto_exception(err)
- if 'does not exist' in error_msg:
- # missing stack, don't bail.
- ret['log'].append('Stack does not exist.')
- return ret
- ret['log'].append('Unknown error: ' + str(error_msg))
- return ret
-
- for e in events:
- eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e)
- ret['events'].append(eventline)
-
- if e['ResourceStatus'].endswith('FAILED'):
- failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e)
- ret['log'].append(failline)
-
- return ret
-
-
-def create_stack(module, stack_params, cfn, events_limit):
- if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
- module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.")
-
- # 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and
- # 'OnFailure' only apply on creation, not update.
- if module.params.get('on_create_failure') is not None:
- stack_params['OnFailure'] = module.params['on_create_failure']
- else:
- stack_params['DisableRollback'] = module.params['disable_rollback']
-
- if module.params.get('create_timeout') is not None:
- stack_params['TimeoutInMinutes'] = module.params['create_timeout']
- if module.params.get('termination_protection') is not None:
- if boto_supports_termination_protection(cfn):
- stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection'))
- else:
- module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
-
- try:
- response = cfn.create_stack(**stack_params)
- # Use stack ID to follow stack state in case of on_create_failure = DELETE
- result = stack_operation(cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None))
- except Exception as err:
- error_msg = boto_exception(err)
- module.fail_json(msg="Failed to create stack {0}: {1}.".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
- if not result:
- module.fail_json(msg="empty result")
- return result
-
-
-def list_changesets(cfn, stack_name):
- res = cfn.list_change_sets(StackName=stack_name)
- return [cs['ChangeSetName'] for cs in res['Summaries']]
-
-
-def create_changeset(module, stack_params, cfn, events_limit):
- if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
- module.fail_json(msg="Either 'template' or 'template_url' is required.")
- if module.params['changeset_name'] is not None:
- stack_params['ChangeSetName'] = module.params['changeset_name']
-
- # changesets don't accept ClientRequestToken parameters
- stack_params.pop('ClientRequestToken', None)
-
- try:
- changeset_name = build_changeset_name(stack_params)
- stack_params['ChangeSetName'] = changeset_name
-
- # Determine if this changeset already exists
- pending_changesets = list_changesets(cfn, stack_params['StackName'])
- if changeset_name in pending_changesets:
- warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets)
- result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning])
- else:
- cs = cfn.create_change_set(**stack_params)
- # Make sure we don't enter an infinite loop
- time_end = time.time() + 600
- while time.time() < time_end:
- try:
- newcs = cfn.describe_change_set(ChangeSetName=cs['Id'])
- except botocore.exceptions.BotoCoreError as err:
- error_msg = boto_exception(err)
- module.fail_json(msg=error_msg)
- if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS':
- time.sleep(1)
- elif newcs['Status'] == 'FAILED' and "The submitted information didn't contain changes" in newcs['StatusReason']:
- cfn.delete_change_set(ChangeSetName=cs['Id'])
- result = dict(changed=False,
- output='The created Change Set did not contain any changes to this stack and was deleted.')
- # a failed change set does not trigger any stack events so we just want to
- # skip any further processing of result and just return it directly
- return result
- else:
- break
- # Lets not hog the cpu/spam the AWS API
- time.sleep(1)
- result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit)
- result['change_set_id'] = cs['Id']
- result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
- 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
- 'NOTE that dependencies on this stack might fail due to pending changes!']
- except Exception as err:
- error_msg = boto_exception(err)
- if 'No updates are to be performed.' in error_msg:
- result = dict(changed=False, output='Stack is already up-to-date.')
- else:
- module.fail_json(msg="Failed to create change set: {0}".format(error_msg), exception=traceback.format_exc())
-
- if not result:
- module.fail_json(msg="empty result")
- return result
-
-
-def update_stack(module, stack_params, cfn, events_limit):
- if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
- stack_params['UsePreviousTemplate'] = True
-
- # if the state is present and the stack already exists, we try to update it.
- # AWS will tell us if the stack template and parameters are the same and
- # don't need to be updated.
- try:
- cfn.update_stack(**stack_params)
- result = stack_operation(cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None))
- except Exception as err:
- error_msg = boto_exception(err)
- if 'No updates are to be performed.' in error_msg:
- result = dict(changed=False, output='Stack is already up-to-date.')
- else:
- module.fail_json(msg="Failed to update stack {0}: {1}".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
- if not result:
- module.fail_json(msg="empty result")
- return result
-
-
-def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state):
- '''updates termination protection of a stack'''
- if not boto_supports_termination_protection(cfn):
- module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
- stack = get_stack_facts(cfn, stack_name)
- if stack:
- if stack['EnableTerminationProtection'] is not desired_termination_protection_state:
- try:
- cfn.update_termination_protection(
- EnableTerminationProtection=desired_termination_protection_state,
- StackName=stack_name)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=boto_exception(e), exception=traceback.format_exc())
-
-
-def boto_supports_termination_protection(cfn):
- '''termination protection was added in botocore 1.7.18'''
- return hasattr(cfn, "update_termination_protection")
-
-
-def stack_operation(cfn, stack_name, operation, events_limit, op_token=None):
- '''gets the status of a stack while it is created/updated/deleted'''
- existed = []
- while True:
- try:
- stack = get_stack_facts(cfn, stack_name)
- existed.append('yes')
- except Exception:
- # If the stack previously existed, and now can't be found then it's
- # been deleted successfully.
- if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
- ret = get_stack_events(cfn, stack_name, events_limit, op_token)
- ret.update({'changed': True, 'output': 'Stack Deleted'})
- return ret
- else:
- return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
- ret = get_stack_events(cfn, stack_name, events_limit, op_token)
- if not stack:
- if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
- ret = get_stack_events(cfn, stack_name, events_limit, op_token)
- ret.update({'changed': True, 'output': 'Stack Deleted'})
- return ret
- else:
- ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
- return ret
- # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
- # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
- elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET':
- ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation})
- return ret
- elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE':
- ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'})
- return ret
- # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases.
- elif stack['StackStatus'].endswith('_COMPLETE'):
- ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
- return ret
- elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
- ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
- return ret
- # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases.
- elif stack['StackStatus'].endswith('_FAILED'):
- ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation})
- return ret
- else:
- # this can loop forever :/
- time.sleep(5)
- return {'failed': True, 'output': 'Failed for unknown reasons.'}
-
-
-def build_changeset_name(stack_params):
- if 'ChangeSetName' in stack_params:
- return stack_params['ChangeSetName']
-
- json_params = json.dumps(stack_params, sort_keys=True)
-
- return 'Ansible-{0}-{1}'.format(
- stack_params['StackName'],
- sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest()
- )
-
-
-def check_mode_changeset(module, stack_params, cfn):
- """Create a change set, describe it and delete it before returning check mode outputs."""
- stack_params['ChangeSetName'] = build_changeset_name(stack_params)
- # changesets don't accept ClientRequestToken parameters
- stack_params.pop('ClientRequestToken', None)
-
- try:
- change_set = cfn.create_change_set(**stack_params)
- for i in range(60): # total time 5 min
- description = cfn.describe_change_set(ChangeSetName=change_set['Id'])
- if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
- break
- time.sleep(5)
- else:
- # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail
- module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName'])
-
- cfn.delete_change_set(ChangeSetName=change_set['Id'])
-
- reason = description.get('StatusReason')
-
- if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']:
- return {'changed': False, 'msg': reason, 'meta': description['StatusReason']}
- return {'changed': True, 'msg': reason, 'meta': description['Changes']}
-
- except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
- error_msg = boto_exception(err)
- module.fail_json(msg=error_msg, exception=traceback.format_exc())
-
-
-def get_stack_facts(cfn, stack_name):
- try:
- stack_response = cfn.describe_stacks(StackName=stack_name)
- stack_info = stack_response['Stacks'][0]
- except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
- error_msg = boto_exception(err)
- if 'does not exist' in error_msg:
- # missing stack, don't bail.
- return None
-
- # other error, bail.
- raise err
-
- if stack_response and stack_response.get('Stacks', None):
- stacks = stack_response['Stacks']
- if len(stacks):
- stack_info = stacks[0]
-
- return stack_info
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- stack_name=dict(required=True),
- template_parameters=dict(required=False, type='dict', default={}),
- state=dict(default='present', choices=['present', 'absent']),
- template=dict(default=None, required=False, type='path'),
- notification_arns=dict(default=None, required=False),
- stack_policy=dict(default=None, required=False),
- disable_rollback=dict(default=False, type='bool'),
- on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']),
- create_timeout=dict(default=None, type='int'),
- template_url=dict(default=None, required=False),
- template_body=dict(default=None, required=False),
- template_format=dict(removed_in_version='2.14'),
- create_changeset=dict(default=False, type='bool'),
- changeset_name=dict(default=None, required=False),
- role_arn=dict(default=None, required=False),
- tags=dict(default=None, type='dict'),
- termination_protection=dict(default=None, type='bool'),
- events_limit=dict(default=200, type='int'),
- backoff_retries=dict(type='int', default=10, required=False),
- backoff_delay=dict(type='int', default=3, required=False),
- backoff_max_delay=dict(type='int', default=30, required=False),
- capabilities=dict(type='list', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- mutually_exclusive=[['template_url', 'template', 'template_body'],
- ['disable_rollback', 'on_create_failure']],
- supports_check_mode=True
- )
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 and botocore are required for this module')
-
- invalid_capabilities = []
- user_capabilities = module.params.get('capabilities')
- for user_cap in user_capabilities:
- if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']:
- invalid_capabilities.append(user_cap)
-
- if invalid_capabilities:
- module.fail_json(msg="Specified capabilities are invalid : %r,"
- " please check documentation for valid capabilities" % invalid_capabilities)
-
- # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
- stack_params = {
- 'Capabilities': user_capabilities,
- 'ClientRequestToken': to_native(uuid.uuid4()),
- }
- state = module.params['state']
- stack_params['StackName'] = module.params['stack_name']
-
- if module.params['template'] is not None:
- with open(module.params['template'], 'r') as template_fh:
- stack_params['TemplateBody'] = template_fh.read()
- elif module.params['template_body'] is not None:
- stack_params['TemplateBody'] = module.params['template_body']
- elif module.params['template_url'] is not None:
- stack_params['TemplateURL'] = module.params['template_url']
-
- if module.params.get('notification_arns'):
- stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
- else:
- stack_params['NotificationARNs'] = []
-
- # can't check the policy when verifying.
- if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']:
- with open(module.params['stack_policy'], 'r') as stack_policy_fh:
- stack_params['StackPolicyBody'] = stack_policy_fh.read()
-
- template_parameters = module.params['template_parameters']
-
- stack_params['Parameters'] = []
- for k, v in template_parameters.items():
- if isinstance(v, dict):
- # set parameter based on a dict to allow additional CFN Parameter Attributes
- param = dict(ParameterKey=k)
-
- if 'value' in v:
- param['ParameterValue'] = str(v['value'])
-
- if 'use_previous_value' in v and bool(v['use_previous_value']):
- param['UsePreviousValue'] = True
- param.pop('ParameterValue', None)
-
- stack_params['Parameters'].append(param)
- else:
- # allow default k/v configuration to set a template parameter
- stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
-
- if isinstance(module.params.get('tags'), dict):
- stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
-
- if module.params.get('role_arn'):
- stack_params['RoleARN'] = module.params['role_arn']
-
- result = {}
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- cfn = boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg=boto_exception(e))
-
- # Wrap the cloudformation client methods that this module uses with
- # automatic backoff / retry for throttling error codes
- backoff_wrapper = AWSRetry.jittered_backoff(
- retries=module.params.get('backoff_retries'),
- delay=module.params.get('backoff_delay'),
- max_delay=module.params.get('backoff_max_delay')
- )
- cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events)
- cfn.create_stack = backoff_wrapper(cfn.create_stack)
- cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets)
- cfn.create_change_set = backoff_wrapper(cfn.create_change_set)
- cfn.update_stack = backoff_wrapper(cfn.update_stack)
- cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks)
- cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources)
- cfn.delete_stack = backoff_wrapper(cfn.delete_stack)
- if boto_supports_termination_protection(cfn):
- cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection)
-
- stack_info = get_stack_facts(cfn, stack_params['StackName'])
-
- if module.check_mode:
- if state == 'absent' and stack_info:
- module.exit_json(changed=True, msg='Stack would be deleted', meta=[])
- elif state == 'absent' and not stack_info:
- module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[])
- elif state == 'present' and not stack_info:
- module.exit_json(changed=True, msg='New stack would be created', meta=[])
- else:
- module.exit_json(**check_mode_changeset(module, stack_params, cfn))
-
- if state == 'present':
- if not stack_info:
- result = create_stack(module, stack_params, cfn, module.params.get('events_limit'))
- elif module.params.get('create_changeset'):
- result = create_changeset(module, stack_params, cfn, module.params.get('events_limit'))
- else:
- if module.params.get('termination_protection') is not None:
- update_termination_protection(module, cfn, stack_params['StackName'],
- bool(module.params.get('termination_protection')))
- result = update_stack(module, stack_params, cfn, module.params.get('events_limit'))
-
- # format the stack output
-
- stack = get_stack_facts(cfn, stack_params['StackName'])
- if stack is not None:
- if result.get('stack_outputs') is None:
- # always define stack_outputs, but it may be empty
- result['stack_outputs'] = {}
- for output in stack.get('Outputs', []):
- result['stack_outputs'][output['OutputKey']] = output['OutputValue']
- stack_resources = []
- reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
- for res in reslist.get('StackResourceSummaries', []):
- stack_resources.append({
- "logical_resource_id": res['LogicalResourceId'],
- "physical_resource_id": res.get('PhysicalResourceId', ''),
- "resource_type": res['ResourceType'],
- "last_updated_time": res['LastUpdatedTimestamp'],
- "status": res['ResourceStatus'],
- "status_reason": res.get('ResourceStatusReason') # can be blank, apparently
- })
- result['stack_resources'] = stack_resources
-
- elif state == 'absent':
- # absent state is different because of the way delete_stack works.
- # problem is it it doesn't give an error if stack isn't found
- # so must describe the stack first
-
- try:
- stack = get_stack_facts(cfn, stack_params['StackName'])
- if not stack:
- result = {'changed': False, 'output': 'Stack not found.'}
- else:
- if stack_params.get('RoleARN') is None:
- cfn.delete_stack(StackName=stack_params['StackName'])
- else:
- cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN'])
- result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'),
- stack_params.get('ClientRequestToken', None))
- except Exception as err:
- module.fail_json(msg=boto_exception(err), exception=traceback.format_exc())
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudformation_info.py b/lib/ansible/modules/cloud/amazon/cloudformation_info.py
deleted file mode 100644
index f62b80235d..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudformation_info.py
+++ /dev/null
@@ -1,354 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: cloudformation_info
-short_description: Obtain information about an AWS CloudFormation stack
-description:
- - Gets information about an AWS CloudFormation stack.
- - This module was called C(cloudformation_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(cloudformation_info) module no longer returns C(ansible_facts)!
-requirements:
- - boto3 >= 1.0.0
- - python >= 2.6
-version_added: "2.2"
-author:
- - Justin Menga (@jmenga)
- - Kevin Coming (@waffie1)
-options:
- stack_name:
- description:
- - The name or id of the CloudFormation stack. Gathers information on all stacks by default.
- type: str
- all_facts:
- description:
- - Get all stack information for the stack.
- type: bool
- default: false
- stack_events:
- description:
- - Get stack events for the stack.
- type: bool
- default: false
- stack_template:
- description:
- - Get stack template body for the stack.
- type: bool
- default: false
- stack_resources:
- description:
- - Get stack resources for the stack.
- type: bool
- default: false
- stack_policy:
- description:
- - Get stack policy for the stack.
- type: bool
- default: false
- stack_change_sets:
- description:
- - Get stack change sets for the stack
- type: bool
- default: false
- version_added: '2.10'
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Get summary information about a stack
-- cloudformation_info:
- stack_name: my-cloudformation-stack
- register: output
-
-- debug:
- msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}"
-
-# When the module is called as cloudformation_facts, return values are published
-# in ansible_facts['cloudformation'][<stack_name>] and can be used as follows.
-# Note that this is deprecated and will stop working in Ansible 2.13.
-
-- cloudformation_facts:
- stack_name: my-cloudformation-stack
-
-- debug:
- msg: "{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}"
-
-# Get stack outputs, when you have the stack name available as a fact
-- set_fact:
- stack_name: my-awesome-stack
-
-- cloudformation_info:
- stack_name: "{{ stack_name }}"
- register: my_stack
-
-- debug:
- msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}"
-
-# Get all stack information about a stack
-- cloudformation_info:
- stack_name: my-cloudformation-stack
- all_facts: true
-
-# Get stack resource and stack policy information about a stack
-- cloudformation_info:
- stack_name: my-cloudformation-stack
- stack_resources: true
- stack_policy: true
-
-# Fail if the stack doesn't exist
-- name: try to get facts about a stack but fail if it doesn't exist
- cloudformation_info:
- stack_name: nonexistent-stack
- all_facts: yes
- failed_when: cloudformation['nonexistent-stack'] is undefined
-'''
-
-RETURN = '''
-stack_description:
- description: Summary facts about the stack
- returned: if the stack exists
- type: dict
-stack_outputs:
- description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each
- output 'OutputValue' parameter
- returned: if the stack exists
- type: dict
- sample:
- ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com
-stack_parameters:
- description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of
- each parameter 'ParameterValue' parameter
- returned: if the stack exists
- type: dict
- sample:
- DatabaseEngine: mysql
- DatabasePassword: "***"
-stack_events:
- description: All stack events for the stack
- returned: only if all_facts or stack_events is true and the stack exists
- type: list
-stack_policy:
- description: Describes the stack policy for the stack
- returned: only if all_facts or stack_policy is true and the stack exists
- type: dict
-stack_template:
- description: Describes the stack template for the stack
- returned: only if all_facts or stack_template is true and the stack exists
- type: dict
-stack_resource_list:
- description: Describes stack resources for the stack
- returned: only if all_facts or stack_resourses is true and the stack exists
- type: list
-stack_resources:
- description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each
- resource 'PhysicalResourceId' parameter
- returned: only if all_facts or stack_resourses is true and the stack exists
- type: dict
- sample:
- AutoScalingGroup: "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7"
- AutoScalingSecurityGroup: "sg-abcd1234"
- ApplicationDatabase: "dazvlpr01xj55a"
-stack_change_sets:
- description: A list of stack change sets. Each item in the list represents the details of a specific changeset
-
- returned: only if all_facts or stack_change_sets is true and the stack exists
- type: list
-'''
-
-import json
-import traceback
-
-from functools import partial
-from ansible.module_utils._text import to_native
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry, boto3_tag_list_to_ansible_dict)
-
-try:
- import botocore
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-
-class CloudFormationServiceManager:
- """Handles CloudFormation Services"""
-
- def __init__(self, module):
- self.module = module
- self.client = module.client('cloudformation')
-
- @AWSRetry.exponential_backoff(retries=5, delay=5)
- def describe_stacks_with_backoff(self, **kwargs):
- paginator = self.client.get_paginator('describe_stacks')
- return paginator.paginate(**kwargs).build_full_result()['Stacks']
-
- def describe_stacks(self, stack_name=None):
- try:
- kwargs = {'StackName': stack_name} if stack_name else {}
- response = self.describe_stacks_with_backoff(**kwargs)
- if response is not None:
- return response
- self.module.fail_json(msg="Error describing stack(s) - an empty response was returned")
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- if 'does not exist' in e.response['Error']['Message']:
- # missing stack, don't bail.
- return {}
- self.module.fail_json_aws(e, msg="Error describing stack " + stack_name)
-
- @AWSRetry.exponential_backoff(retries=5, delay=5)
- def list_stack_resources_with_backoff(self, stack_name):
- paginator = self.client.get_paginator('list_stack_resources')
- return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries']
-
- def list_stack_resources(self, stack_name):
- try:
- return self.list_stack_resources_with_backoff(stack_name)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name)
-
- @AWSRetry.exponential_backoff(retries=5, delay=5)
- def describe_stack_events_with_backoff(self, stack_name):
- paginator = self.client.get_paginator('describe_stack_events')
- return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents']
-
- def describe_stack_events(self, stack_name):
- try:
- return self.describe_stack_events_with_backoff(stack_name)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name)
-
- @AWSRetry.exponential_backoff(retries=5, delay=5)
- def list_stack_change_sets_with_backoff(self, stack_name):
- paginator = self.client.get_paginator('list_change_sets')
- return paginator.paginate(StackName=stack_name).build_full_result()['Summaries']
-
- @AWSRetry.exponential_backoff(retries=5, delay=5)
- def describe_stack_change_set_with_backoff(self, **kwargs):
- paginator = self.client.get_paginator('describe_change_set')
- return paginator.paginate(**kwargs).build_full_result()
-
- def describe_stack_change_sets(self, stack_name):
- changes = []
- try:
- change_sets = self.list_stack_change_sets_with_backoff(stack_name)
- for item in change_sets:
- changes.append(self.describe_stack_change_set_with_backoff(
- StackName=stack_name,
- ChangeSetName=item['ChangeSetName']))
- return changes
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name)
-
- @AWSRetry.exponential_backoff(retries=5, delay=5)
- def get_stack_policy_with_backoff(self, stack_name):
- return self.client.get_stack_policy(StackName=stack_name)
-
- def get_stack_policy(self, stack_name):
- try:
- response = self.get_stack_policy_with_backoff(stack_name)
- stack_policy = response.get('StackPolicyBody')
- if stack_policy:
- return json.loads(stack_policy)
- return dict()
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name)
-
- @AWSRetry.exponential_backoff(retries=5, delay=5)
- def get_template_with_backoff(self, stack_name):
- return self.client.get_template(StackName=stack_name)
-
- def get_template(self, stack_name):
- try:
- response = self.get_template_with_backoff(stack_name)
- return response.get('TemplateBody')
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name)
-
-
-def to_dict(items, key, value):
- ''' Transforms a list of items to a Key/Value dictionary '''
- if items:
- return dict(zip([i.get(key) for i in items], [i.get(value) for i in items]))
- else:
- return dict()
-
-
-def main():
- argument_spec = dict(
- stack_name=dict(),
- all_facts=dict(required=False, default=False, type='bool'),
- stack_policy=dict(required=False, default=False, type='bool'),
- stack_events=dict(required=False, default=False, type='bool'),
- stack_resources=dict(required=False, default=False, type='bool'),
- stack_template=dict(required=False, default=False, type='bool'),
- stack_change_sets=dict(required=False, default=False, type='bool'),
- )
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
-
- is_old_facts = module._name == 'cloudformation_facts'
- if is_old_facts:
- module.deprecate("The 'cloudformation_facts' module has been renamed to 'cloudformation_info', "
- "and the renamed one no longer returns ansible_facts", version='2.13')
-
- service_mgr = CloudFormationServiceManager(module)
-
- if is_old_facts:
- result = {'ansible_facts': {'cloudformation': {}}}
- else:
- result = {'cloudformation': {}}
-
- for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')):
- facts = {'stack_description': stack_description}
- stack_name = stack_description.get('StackName')
-
- # Create stack output and stack parameter dictionaries
- if facts['stack_description']:
- facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')
- facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'),
- 'ParameterKey', 'ParameterValue')
- facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags'))
-
- # Create optional stack outputs
- all_facts = module.params.get('all_facts')
- if all_facts or module.params.get('stack_resources'):
- facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
- facts['stack_resources'] = to_dict(facts.get('stack_resource_list'),
- 'LogicalResourceId', 'PhysicalResourceId')
- if all_facts or module.params.get('stack_template'):
- facts['stack_template'] = service_mgr.get_template(stack_name)
- if all_facts or module.params.get('stack_policy'):
- facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
- if all_facts or module.params.get('stack_events'):
- facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
- if all_facts or module.params.get('stack_change_sets'):
- facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name)
-
- if is_old_facts:
- result['ansible_facts']['cloudformation'][stack_name] = facts
- else:
- result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs',
- 'stack_parameters',
- 'stack_policy',
- 'stack_resources',
- 'stack_tags',
- 'stack_template'))
-
- module.exit_json(changed=False, **result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2.py b/lib/ansible/modules/cloud/amazon/ec2.py
deleted file mode 100644
index 91503bbf8e..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2.py
+++ /dev/null
@@ -1,1766 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: ec2
-short_description: create, terminate, start or stop an instance in ec2
-description:
- - Creates or terminates ec2 instances.
- - >
- Note: This module uses the older boto Python module to interact with the EC2 API.
- M(ec2) will still receive bug fixes, but no new features.
- Consider using the M(ec2_instance) module instead.
- If M(ec2_instance) does not support a feature you need that is available in M(ec2), please
- file a feature request.
-version_added: "0.9"
-options:
- key_name:
- description:
- - Key pair to use on the instance.
- - The SSH key must already exist in AWS in order to use this argument.
- - Keys can be created / deleted using the M(ec2_key) module.
- aliases: ['keypair']
- type: str
- id:
- version_added: "1.1"
- description:
- - Identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances.
- - This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on.
- - For details, see the description of client token at U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
- type: str
- group:
- description:
- - Security group (or list of groups) to use with the instance.
- aliases: [ 'groups' ]
- type: list
- elements: str
- group_id:
- version_added: "1.1"
- description:
- - Security group id (or list of ids) to use with the instance.
- type: list
- elements: str
- zone:
- version_added: "1.2"
- description:
- - AWS availability zone in which to launch the instance.
- aliases: [ 'aws_zone', 'ec2_zone' ]
- type: str
- instance_type:
- description:
- - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
- - Required when creating a new instance.
- type: str
- aliases: ['type']
- tenancy:
- version_added: "1.9"
- description:
- - An instance with a tenancy of C(dedicated) runs on single-tenant hardware and can only be launched into a VPC.
- - Note that to use dedicated tenancy you MUST specify a I(vpc_subnet_id) as well.
- - Dedicated tenancy is not available for EC2 "micro" instances.
- default: default
- choices: [ "default", "dedicated" ]
- type: str
- spot_price:
- version_added: "1.5"
- description:
- - Maximum spot price to bid. If not set, a regular on-demand instance is requested.
- - A spot request is made with this maximum bid. When it is filled, the instance is started.
- type: str
- spot_type:
- version_added: "2.0"
- description:
- - The type of spot request.
- - After being interrupted a C(persistent) spot instance will be started once there is capacity to fill the request again.
- default: "one-time"
- choices: [ "one-time", "persistent" ]
- type: str
- image:
- description:
- - I(ami) ID to use for the instance.
- - Required when I(state=present).
- type: str
- kernel:
- description:
- - Kernel eki to use for the instance.
- type: str
- ramdisk:
- description:
- - Ramdisk eri to use for the instance.
- type: str
- wait:
- description:
- - Wait for the instance to reach its desired state before returning.
- - Does not wait for SSH, see the 'wait_for_connection' example for details.
- type: bool
- default: false
- wait_timeout:
- description:
- - How long before wait gives up, in seconds.
- default: 300
- type: int
- spot_wait_timeout:
- version_added: "1.5"
- description:
- - How long to wait for the spot instance request to be fulfilled. Affects 'Request valid until' for setting spot request lifespan.
- default: 600
- type: int
- count:
- description:
- - Number of instances to launch.
- default: 1
- type: int
- monitoring:
- version_added: "1.1"
- description:
- - Enable detailed monitoring (CloudWatch) for instance.
- type: bool
- default: false
- user_data:
- version_added: "0.9"
- description:
- - Opaque blob of data which is made available to the EC2 instance.
- type: str
- instance_tags:
- version_added: "1.0"
- description:
- - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'.
- type: dict
- placement_group:
- version_added: "1.3"
- description:
- - Placement group for the instance when using EC2 Clustered Compute.
- type: str
- vpc_subnet_id:
- version_added: "1.1"
- description:
- - the subnet ID in which to launch the instance (VPC).
- type: str
- assign_public_ip:
- version_added: "1.5"
- description:
- - When provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+.
- type: bool
- private_ip:
- version_added: "1.2"
- description:
- - The private ip address to assign the instance (from the vpc subnet).
- type: str
- instance_profile_name:
- version_added: "1.3"
- description:
- - Name of the IAM instance profile (i.e. what the EC2 console refers to as an "IAM Role") to use. Boto library must be 2.5.0+.
- type: str
- instance_ids:
- version_added: "1.3"
- description:
- - "list of instance ids, currently used for states: absent, running, stopped"
- aliases: ['instance_id']
- type: list
- elements: str
- source_dest_check:
- version_added: "1.6"
- description:
- - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers).
- When initially creating an instance the EC2 API defaults this to C(True).
- type: bool
- termination_protection:
- version_added: "2.0"
- description:
- - Enable or Disable the Termination Protection.
- type: bool
- default: false
- instance_initiated_shutdown_behavior:
- version_added: "2.2"
- description:
- - Set whether AWS will Stop or Terminate an instance on shutdown. This parameter is ignored when using instance-store.
- images (which require termination on shutdown).
- default: 'stop'
- choices: [ "stop", "terminate" ]
- type: str
- state:
- version_added: "1.3"
- description:
- - Create, terminate, start, stop or restart instances. The state 'restarted' was added in Ansible 2.2.
- - When I(state=absent), I(instance_ids) is required.
- - When I(state=running), I(state=stopped) or I(state=restarted) then either I(instance_ids) or I(instance_tags) is required.
- default: 'present'
- choices: ['absent', 'present', 'restarted', 'running', 'stopped']
- type: str
- volumes:
- version_added: "1.5"
- description:
- - A list of hash/dictionaries of volumes to add to the new instance.
- type: list
- elements: dict
- suboptions:
- device_name:
- type: str
- required: true
- description:
- - A name for the device (For example C(/dev/sda)).
- delete_on_termination:
- type: bool
- default: false
- description:
- - Whether the volume should be automatically deleted when the instance is terminated.
- ephemeral:
- type: str
- description:
- - Whether the volume should be ephemeral.
- - Data on ephemeral volumes is lost when the instance is stopped.
- - Mutually exclusive with the I(snapshot) parameter.
- encrypted:
- type: bool
- default: false
- description:
- - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK.
- snapshot:
- type: str
- description:
- - The ID of an EBS snapshot to copy when creating the volume.
- - Mutually exclusive with the I(ephemeral) parameter.
- volume_type:
- type: str
- description:
- - The type of volume to create.
- - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types.
- volume_size:
- type: int
- description:
- - The size of the volume (in GiB).
- iops:
- type: int
- description:
- - The number of IOPS per second to provision for the volume.
- - Required when I(volume_type=io1).
- ebs_optimized:
- version_added: "1.6"
- description:
- - Whether instance is using optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
- default: false
- type: bool
- exact_count:
- version_added: "1.5"
- description:
- - An integer value which indicates how many instances that match the 'count_tag' parameter should be running.
- Instances are either created or terminated based on this value.
- type: int
- count_tag:
- version_added: "1.5"
- description:
- - Used with I(exact_count) to determine how many nodes based on a specific tag criteria should be running.
- This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers
- that are tagged with "class=webserver". The specified tag must already exist or be passed in as the I(instance_tags) option.
- type: raw
- network_interfaces:
- version_added: "2.0"
- description:
- - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces,
- none of the I(assign_public_ip), I(private_ip), I(vpc_subnet_id), I(group), or I(group_id) parameters may be used. (Those parameters are
- for creating a new network interface at launch.)
- aliases: ['network_interface']
- type: list
- elements: str
- spot_launch_group:
- version_added: "2.1"
- description:
- - Launch group for spot requests, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group).
- type: str
-author:
- - "Tim Gerla (@tgerla)"
- - "Lester Wade (@lwade)"
- - "Seth Vidal (@skvidal)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Basic provisioning example
-- ec2:
- key_name: mykey
- instance_type: t2.micro
- image: ami-123456
- wait: yes
- group: webserver
- count: 3
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-# Advanced example with tagging and CloudWatch
-- ec2:
- key_name: mykey
- group: databases
- instance_type: t2.micro
- image: ami-123456
- wait: yes
- wait_timeout: 500
- count: 5
- instance_tags:
- db: postgres
- monitoring: yes
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-# Single instance with additional IOPS volume from snapshot and volume delete on termination
-- ec2:
- key_name: mykey
- group: webserver
- instance_type: c3.medium
- image: ami-123456
- wait: yes
- wait_timeout: 500
- volumes:
- - device_name: /dev/sdb
- snapshot: snap-abcdef12
- volume_type: io1
- iops: 1000
- volume_size: 100
- delete_on_termination: true
- monitoring: yes
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-# Single instance with ssd gp2 root volume
-- ec2:
- key_name: mykey
- group: webserver
- instance_type: c3.medium
- image: ami-123456
- wait: yes
- wait_timeout: 500
- volumes:
- - device_name: /dev/xvda
- volume_type: gp2
- volume_size: 8
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
- count_tag:
- Name: dbserver
- exact_count: 1
-
-# Multiple groups example
-- ec2:
- key_name: mykey
- group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
- instance_type: m1.large
- image: ami-6e649707
- wait: yes
- wait_timeout: 500
- count: 5
- instance_tags:
- db: postgres
- monitoring: yes
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-# Multiple instances with additional volume from snapshot
-- ec2:
- key_name: mykey
- group: webserver
- instance_type: m1.large
- image: ami-6e649707
- wait: yes
- wait_timeout: 500
- count: 5
- volumes:
- - device_name: /dev/sdb
- snapshot: snap-abcdef12
- volume_size: 10
- monitoring: yes
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-# Dedicated tenancy example
-- local_action:
- module: ec2
- assign_public_ip: yes
- group_id: sg-1dc53f72
- key_name: mykey
- image: ami-6e649707
- instance_type: m1.small
- tenancy: dedicated
- vpc_subnet_id: subnet-29e63245
- wait: yes
-
-# Spot instance example
-- ec2:
- spot_price: 0.24
- spot_wait_timeout: 600
- keypair: mykey
- group_id: sg-1dc53f72
- instance_type: m1.small
- image: ami-6e649707
- wait: yes
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
- spot_launch_group: report_generators
- instance_initiated_shutdown_behavior: terminate
-
-# Examples using pre-existing network interfaces
-- ec2:
- key_name: mykey
- instance_type: t2.small
- image: ami-f005ba11
- network_interface: eni-deadbeef
-
-- ec2:
- key_name: mykey
- instance_type: t2.small
- image: ami-f005ba11
- network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e']
-
-# Launch instances, runs some tasks
-# and then terminate them
-
-- name: Create a sandbox instance
- hosts: localhost
- gather_facts: False
- vars:
- keypair: my_keypair
- instance_type: m1.small
- security_group: my_securitygroup
- image: my_ami_id
- region: us-east-1
- tasks:
- - name: Launch instance
- ec2:
- key_name: "{{ keypair }}"
- group: "{{ security_group }}"
- instance_type: "{{ instance_type }}"
- image: "{{ image }}"
- wait: true
- region: "{{ region }}"
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
- register: ec2
-
- - name: Add new instance to host group
- add_host:
- hostname: "{{ item.public_ip }}"
- groupname: launched
- loop: "{{ ec2.instances }}"
-
- - name: Wait for SSH to come up
- delegate_to: "{{ item.public_dns_name }}"
- wait_for_connection:
- delay: 60
- timeout: 320
- loop: "{{ ec2.instances }}"
-
-- name: Configure instance(s)
- hosts: launched
- become: True
- gather_facts: True
- roles:
- - my_awesome_role
- - my_awesome_test
-
-- name: Terminate instances
- hosts: localhost
- tasks:
- - name: Terminate instances that were previously launched
- ec2:
- state: 'absent'
- instance_ids: '{{ ec2.instance_ids }}'
-
-# Start a few existing instances, run some tasks
-# and stop the instances
-
-- name: Start sandbox instances
- hosts: localhost
- gather_facts: false
- vars:
- instance_ids:
- - 'i-xxxxxx'
- - 'i-xxxxxx'
- - 'i-xxxxxx'
- region: us-east-1
- tasks:
- - name: Start the sandbox instances
- ec2:
- instance_ids: '{{ instance_ids }}'
- region: '{{ region }}'
- state: running
- wait: True
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
- roles:
- - do_neat_stuff
- - do_more_neat_stuff
-
-- name: Stop sandbox instances
- hosts: localhost
- gather_facts: false
- vars:
- instance_ids:
- - 'i-xxxxxx'
- - 'i-xxxxxx'
- - 'i-xxxxxx'
- region: us-east-1
- tasks:
- - name: Stop the sandbox instances
- ec2:
- instance_ids: '{{ instance_ids }}'
- region: '{{ region }}'
- state: stopped
- wait: True
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-#
-# Start stopped instances specified by tag
-#
-- local_action:
- module: ec2
- instance_tags:
- Name: ExtraPower
- state: running
-
-#
-# Restart instances specified by tag
-#
-- local_action:
- module: ec2
- instance_tags:
- Name: ExtraPower
- state: restarted
-
-#
-# Enforce that 5 instances with a tag "foo" are running
-# (Highly recommended!)
-#
-
-- ec2:
- key_name: mykey
- instance_type: c1.medium
- image: ami-40603AD1
- wait: yes
- group: webserver
- instance_tags:
- foo: bar
- exact_count: 5
- count_tag: foo
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-#
-# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
-#
-
-- ec2:
- key_name: mykey
- instance_type: c1.medium
- image: ami-40603AD1
- wait: yes
- group: webserver
- instance_tags:
- Name: database
- dbtype: postgres
- exact_count: 5
- count_tag:
- Name: database
- dbtype: postgres
- vpc_subnet_id: subnet-29e63245
- assign_public_ip: yes
-
-#
-# count_tag complex argument examples
-#
-
- # instances with tag foo
-- ec2:
- count_tag:
- foo:
-
- # instances with tag foo=bar
-- ec2:
- count_tag:
- foo: bar
-
- # instances with tags foo=bar & baz
-- ec2:
- count_tag:
- foo: bar
- baz:
-
- # instances with tags foo & bar & baz=bang
-- ec2:
- count_tag:
- - foo
- - bar
- - baz: bang
-
-'''
-
-import time
-import datetime
-import traceback
-from ast import literal_eval
-from distutils.version import LooseVersion
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, ec2_connect
-from ansible.module_utils.six import get_function_code, string_types
-from ansible.module_utils._text import to_bytes, to_text
-
-try:
- import boto.ec2
- from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
- from boto.exception import EC2ResponseError
- from boto import connect_ec2_endpoint
- from boto import connect_vpc
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-
-def find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone=None):
-
- # get reservations for instances that match tag(s) and are in the desired state
- state = module.params.get('state')
- if state not in ['running', 'stopped']:
- state = None
- reservations = get_reservations(module, ec2, vpc, tags=count_tag, state=state, zone=zone)
-
- instances = []
- for res in reservations:
- if hasattr(res, 'instances'):
- for inst in res.instances:
- if inst.state == 'terminated' or inst.state == 'shutting-down':
- continue
- instances.append(inst)
-
- return reservations, instances
-
-
-def _set_none_to_blank(dictionary):
- result = dictionary
- for k in result:
- if isinstance(result[k], dict):
- result[k] = _set_none_to_blank(result[k])
- elif not result[k]:
- result[k] = ""
- return result
-
-
-def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None):
- # TODO: filters do not work with tags that have underscores
- filters = dict()
-
- vpc_subnet_id = module.params.get('vpc_subnet_id')
- vpc_id = None
- if vpc_subnet_id:
- filters.update({"subnet-id": vpc_subnet_id})
- if vpc:
- vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
-
- if vpc_id:
- filters.update({"vpc-id": vpc_id})
-
- if tags is not None:
-
- if isinstance(tags, str):
- try:
- tags = literal_eval(tags)
- except Exception:
- pass
-
- # if not a string type, convert and make sure it's a text string
- if isinstance(tags, int):
- tags = to_text(tags)
-
- # if string, we only care that a tag of that name exists
- if isinstance(tags, str):
- filters.update({"tag-key": tags})
-
- # if list, append each item to filters
- if isinstance(tags, list):
- for x in tags:
- if isinstance(x, dict):
- x = _set_none_to_blank(x)
- filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items()))
- else:
- filters.update({"tag-key": x})
-
- # if dict, add the key and value to the filter
- if isinstance(tags, dict):
- tags = _set_none_to_blank(tags)
- filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items()))
-
- # lets check to see if the filters dict is empty, if so then stop
- if not filters:
- module.fail_json(msg="Filters based on tag is empty => tags: %s" % (tags))
-
- if state:
- # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
- filters.update({'instance-state-name': state})
-
- if zone:
- filters.update({'availability-zone': zone})
-
- if module.params.get('id'):
- filters['client-token'] = module.params['id']
-
- results = ec2.get_all_instances(filters=filters)
-
- return results
-
-
-def get_instance_info(inst):
- """
- Retrieves instance information from an instance
- ID and returns it as a dictionary
- """
- instance_info = {'id': inst.id,
- 'ami_launch_index': inst.ami_launch_index,
- 'private_ip': inst.private_ip_address,
- 'private_dns_name': inst.private_dns_name,
- 'public_ip': inst.ip_address,
- 'dns_name': inst.dns_name,
- 'public_dns_name': inst.public_dns_name,
- 'state_code': inst.state_code,
- 'architecture': inst.architecture,
- 'image_id': inst.image_id,
- 'key_name': inst.key_name,
- 'placement': inst.placement,
- 'region': inst.placement[:-1],
- 'kernel': inst.kernel,
- 'ramdisk': inst.ramdisk,
- 'launch_time': inst.launch_time,
- 'instance_type': inst.instance_type,
- 'root_device_type': inst.root_device_type,
- 'root_device_name': inst.root_device_name,
- 'state': inst.state,
- 'hypervisor': inst.hypervisor,
- 'tags': inst.tags,
- 'groups': dict((group.id, group.name) for group in inst.groups),
- }
- try:
- instance_info['virtualization_type'] = getattr(inst, 'virtualization_type')
- except AttributeError:
- instance_info['virtualization_type'] = None
-
- try:
- instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
- except AttributeError:
- instance_info['ebs_optimized'] = False
-
- try:
- bdm_dict = {}
- bdm = getattr(inst, 'block_device_mapping')
- for device_name in bdm.keys():
- bdm_dict[device_name] = {
- 'status': bdm[device_name].status,
- 'volume_id': bdm[device_name].volume_id,
- 'delete_on_termination': bdm[device_name].delete_on_termination
- }
- instance_info['block_device_mapping'] = bdm_dict
- except AttributeError:
- instance_info['block_device_mapping'] = False
-
- try:
- instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
- except AttributeError:
- instance_info['tenancy'] = 'default'
-
- return instance_info
-
-
-def boto_supports_associate_public_ip_address(ec2):
- """
- Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
- class. Added in Boto 2.13.0
-
- ec2: authenticated ec2 connection object
-
- Returns:
- True if Boto library accepts associate_public_ip_address argument, else false
- """
-
- try:
- network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
- getattr(network_interface, "associate_public_ip_address")
- return True
- except AttributeError:
- return False
-
-
-def boto_supports_profile_name_arg(ec2):
- """
- Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
-
- ec2: authenticated ec2 connection object
-
- Returns:
- True if Boto library accept instance_profile_name argument, else false
- """
- run_instances_method = getattr(ec2, 'run_instances')
- return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames
-
-
-def boto_supports_volume_encryption():
- """
- Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
-
- Returns:
- True if boto library has the named param as an argument on the request_spot_instances method, else False
- """
- return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
-
-
-def create_block_device(module, ec2, volume):
- # Not aware of a way to determine this programatically
- # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
- MAX_IOPS_TO_SIZE_RATIO = 30
-
- volume_type = volume.get('volume_type')
-
- if 'snapshot' not in volume and 'ephemeral' not in volume:
- if 'volume_size' not in volume:
- module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
- if 'snapshot' in volume:
- if volume_type == 'io1' and 'iops' not in volume:
- module.fail_json(msg='io1 volumes must have an iops value set')
- if 'iops' in volume:
- snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
- size = volume.get('volume_size', snapshot.volume_size)
- if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
- module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
- if 'ephemeral' in volume:
- if 'snapshot' in volume:
- module.fail_json(msg='Cannot set both ephemeral and snapshot')
- if boto_supports_volume_encryption():
- return BlockDeviceType(snapshot_id=volume.get('snapshot'),
- ephemeral_name=volume.get('ephemeral'),
- size=volume.get('volume_size'),
- volume_type=volume_type,
- delete_on_termination=volume.get('delete_on_termination', False),
- iops=volume.get('iops'),
- encrypted=volume.get('encrypted', None))
- else:
- return BlockDeviceType(snapshot_id=volume.get('snapshot'),
- ephemeral_name=volume.get('ephemeral'),
- size=volume.get('volume_size'),
- volume_type=volume_type,
- delete_on_termination=volume.get('delete_on_termination', False),
- iops=volume.get('iops'))
-
-
-def boto_supports_param_in_spot_request(ec2, param):
- """
- Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
-
- ec2: authenticated ec2 connection object
-
- Returns:
- True if boto library has the named param as an argument on the request_spot_instances method, else False
- """
- method = getattr(ec2, 'request_spot_instances')
- return param in get_function_code(method).co_varnames
-
-
-def await_spot_requests(module, ec2, spot_requests, count):
- """
- Wait for a group of spot requests to be fulfilled, or fail.
-
- module: Ansible module object
- ec2: authenticated ec2 connection object
- spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances
- count: Total number of instances to be created by the spot requests
-
- Returns:
- list of instance ID's created by the spot request(s)
- """
- spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
- wait_complete = time.time() + spot_wait_timeout
-
- spot_req_inst_ids = dict()
- while time.time() < wait_complete:
- reqs = ec2.get_all_spot_instance_requests()
- for sirb in spot_requests:
- if sirb.id in spot_req_inst_ids:
- continue
- for sir in reqs:
- if sir.id != sirb.id:
- continue # this is not our spot instance
- if sir.instance_id is not None:
- spot_req_inst_ids[sirb.id] = sir.instance_id
- elif sir.state == 'open':
- continue # still waiting, nothing to do here
- elif sir.state == 'active':
- continue # Instance is created already, nothing to do here
- elif sir.state == 'failed':
- module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
- sir.id, sir.status.code, sir.fault.code, sir.fault.message))
- elif sir.state == 'cancelled':
- module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id)
- elif sir.state == 'closed':
- # instance is terminating or marked for termination
- # this may be intentional on the part of the operator,
- # or it may have been terminated by AWS due to capacity,
- # price, or group constraints in this case, we'll fail
- # the module if the reason for the state is anything
- # other than termination by user. Codes are documented at
- # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html
- if sir.status.code == 'instance-terminated-by-user':
- # do nothing, since the user likely did this on purpose
- pass
- else:
- spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s"
- module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message))
-
- if len(spot_req_inst_ids) < count:
- time.sleep(5)
- else:
- return list(spot_req_inst_ids.values())
- module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime())
-
-
-def enforce_count(module, ec2, vpc):
-
- exact_count = module.params.get('exact_count')
- count_tag = module.params.get('count_tag')
- zone = module.params.get('zone')
-
- # fail here if the exact count was specified without filtering
- # on a tag, as this may lead to a undesired removal of instances
- if exact_count and count_tag is None:
- module.fail_json(msg="you must use the 'count_tag' option with exact_count")
-
- reservations, instances = find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone)
-
- changed = None
- checkmode = False
- instance_dict_array = []
- changed_instance_ids = None
-
- if len(instances) == exact_count:
- changed = False
- elif len(instances) < exact_count:
- changed = True
- to_create = exact_count - len(instances)
- if not checkmode:
- (instance_dict_array, changed_instance_ids, changed) \
- = create_instances(module, ec2, vpc, override_count=to_create)
-
- for inst in instance_dict_array:
- instances.append(inst)
- elif len(instances) > exact_count:
- changed = True
- to_remove = len(instances) - exact_count
- if not checkmode:
- all_instance_ids = sorted([x.id for x in instances])
- remove_ids = all_instance_ids[0:to_remove]
-
- instances = [x for x in instances if x.id not in remove_ids]
-
- (changed, instance_dict_array, changed_instance_ids) \
- = terminate_instances(module, ec2, remove_ids)
- terminated_list = []
- for inst in instance_dict_array:
- inst['state'] = "terminated"
- terminated_list.append(inst)
- instance_dict_array = terminated_list
-
- # ensure all instances are dictionaries
- all_instances = []
- for inst in instances:
-
- if not isinstance(inst, dict):
- warn_if_public_ip_assignment_changed(module, inst)
- inst = get_instance_info(inst)
- all_instances.append(inst)
-
- return (all_instances, instance_dict_array, changed_instance_ids, changed)
-
-
-def create_instances(module, ec2, vpc, override_count=None):
- """
- Creates new instances
-
- module : AnsibleModule object
- ec2: authenticated ec2 connection object
-
- Returns:
- A list of dictionaries with instance information
- about the instances that were launched
- """
-
- key_name = module.params.get('key_name')
- id = module.params.get('id')
- group_name = module.params.get('group')
- group_id = module.params.get('group_id')
- zone = module.params.get('zone')
- instance_type = module.params.get('instance_type')
- tenancy = module.params.get('tenancy')
- spot_price = module.params.get('spot_price')
- spot_type = module.params.get('spot_type')
- image = module.params.get('image')
- if override_count:
- count = override_count
- else:
- count = module.params.get('count')
- monitoring = module.params.get('monitoring')
- kernel = module.params.get('kernel')
- ramdisk = module.params.get('ramdisk')
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
- spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
- placement_group = module.params.get('placement_group')
- user_data = module.params.get('user_data')
- instance_tags = module.params.get('instance_tags')
- vpc_subnet_id = module.params.get('vpc_subnet_id')
- assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
- private_ip = module.params.get('private_ip')
- instance_profile_name = module.params.get('instance_profile_name')
- volumes = module.params.get('volumes')
- ebs_optimized = module.params.get('ebs_optimized')
- exact_count = module.params.get('exact_count')
- count_tag = module.params.get('count_tag')
- source_dest_check = module.boolean(module.params.get('source_dest_check'))
- termination_protection = module.boolean(module.params.get('termination_protection'))
- network_interfaces = module.params.get('network_interfaces')
- spot_launch_group = module.params.get('spot_launch_group')
- instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior')
-
- vpc_id = None
- if vpc_subnet_id:
- if not vpc:
- module.fail_json(msg="region must be specified")
- else:
- vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
- else:
- vpc_id = None
-
- try:
- # Here we try to lookup the group id from the security group name - if group is set.
- if group_name:
- if vpc_id:
- grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
- else:
- grp_details = ec2.get_all_security_groups()
- if isinstance(group_name, string_types):
- group_name = [group_name]
- unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
- if len(unmatched) > 0:
- module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
- group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
- # Now we try to lookup the group id testing if group exists.
- elif group_id:
- # wrap the group_id in a list if it's not one already
- if isinstance(group_id, string_types):
- group_id = [group_id]
- grp_details = ec2.get_all_security_groups(group_ids=group_id)
- group_name = [grp_item.name for grp_item in grp_details]
- except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg=str(e))
-
- # Lookup any instances that much our run id.
-
- running_instances = []
- count_remaining = int(count)
-
- if id is not None:
- filter_dict = {'client-token': id, 'instance-state-name': 'running'}
- previous_reservations = ec2.get_all_instances(None, filter_dict)
- for res in previous_reservations:
- for prev_instance in res.instances:
- running_instances.append(prev_instance)
- count_remaining = count_remaining - len(running_instances)
-
- # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
-
- if count_remaining == 0:
- changed = False
- else:
- changed = True
- try:
- params = {'image_id': image,
- 'key_name': key_name,
- 'monitoring_enabled': monitoring,
- 'placement': zone,
- 'instance_type': instance_type,
- 'kernel_id': kernel,
- 'ramdisk_id': ramdisk}
- if user_data is not None:
- params['user_data'] = to_bytes(user_data, errors='surrogate_or_strict')
-
- if ebs_optimized:
- params['ebs_optimized'] = ebs_optimized
-
- # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
- if not spot_price:
- params['tenancy'] = tenancy
-
- if boto_supports_profile_name_arg(ec2):
- params['instance_profile_name'] = instance_profile_name
- else:
- if instance_profile_name is not None:
- module.fail_json(
- msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
-
- if assign_public_ip is not None:
- if not boto_supports_associate_public_ip_address(ec2):
- module.fail_json(
- msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
- elif not vpc_subnet_id:
- module.fail_json(
- msg="assign_public_ip only available with vpc_subnet_id")
-
- else:
- if private_ip:
- interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
- subnet_id=vpc_subnet_id,
- private_ip_address=private_ip,
- groups=group_id,
- associate_public_ip_address=assign_public_ip)
- else:
- interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
- subnet_id=vpc_subnet_id,
- groups=group_id,
- associate_public_ip_address=assign_public_ip)
- interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
- params['network_interfaces'] = interfaces
- else:
- if network_interfaces:
- if isinstance(network_interfaces, string_types):
- network_interfaces = [network_interfaces]
- interfaces = []
- for i, network_interface_id in enumerate(network_interfaces):
- interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
- network_interface_id=network_interface_id,
- device_index=i)
- interfaces.append(interface)
- params['network_interfaces'] = \
- boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
- else:
- params['subnet_id'] = vpc_subnet_id
- if vpc_subnet_id:
- params['security_group_ids'] = group_id
- else:
- params['security_groups'] = group_name
-
- if volumes:
- bdm = BlockDeviceMapping()
- for volume in volumes:
- if 'device_name' not in volume:
- module.fail_json(msg='Device name must be set for volume')
- # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0
- # to be a signal not to create this volume
- if 'volume_size' not in volume or int(volume['volume_size']) > 0:
- bdm[volume['device_name']] = create_block_device(module, ec2, volume)
-
- params['block_device_map'] = bdm
-
- # check to see if we're using spot pricing first before starting instances
- if not spot_price:
- if assign_public_ip is not None and private_ip:
- params.update(
- dict(
- min_count=count_remaining,
- max_count=count_remaining,
- client_token=id,
- placement_group=placement_group,
- )
- )
- else:
- params.update(
- dict(
- min_count=count_remaining,
- max_count=count_remaining,
- client_token=id,
- placement_group=placement_group,
- private_ip_address=private_ip,
- )
- )
-
- # For ordinary (not spot) instances, we can select 'stop'
- # (the default) or 'terminate' here.
- params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
-
- try:
- res = ec2.run_instances(**params)
- except boto.exception.EC2ResponseError as e:
- if (params['instance_initiated_shutdown_behavior'] != 'terminate' and
- "InvalidParameterCombination" == e.error_code):
- params['instance_initiated_shutdown_behavior'] = 'terminate'
- res = ec2.run_instances(**params)
- else:
- raise
-
- instids = [i.id for i in res.instances]
- while True:
- try:
- ec2.get_all_instances(instids)
- break
- except boto.exception.EC2ResponseError as e:
- if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
- # there's a race between start and get an instance
- continue
- else:
- module.fail_json(msg=str(e))
-
- # The instances returned through ec2.run_instances above can be in
- # terminated state due to idempotency. See commit 7f11c3d for a complete
- # explanation.
- terminated_instances = [
- str(instance.id) for instance in res.instances if instance.state == 'terminated'
- ]
- if terminated_instances:
- module.fail_json(msg="Instances with id(s) %s " % terminated_instances +
- "were created previously but have since been terminated - " +
- "use a (possibly different) 'instanceid' parameter")
-
- else:
- if private_ip:
- module.fail_json(
- msg='private_ip only available with on-demand (non-spot) instances')
- if boto_supports_param_in_spot_request(ec2, 'placement_group'):
- params['placement_group'] = placement_group
- elif placement_group:
- module.fail_json(
- msg="placement_group parameter requires Boto version 2.3.0 or higher.")
-
- # You can't tell spot instances to 'stop'; they will always be
- # 'terminate'd. For convenience, we'll ignore the latter value.
- if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate':
- module.fail_json(
- msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.")
-
- if spot_launch_group and isinstance(spot_launch_group, string_types):
- params['launch_group'] = spot_launch_group
-
- params.update(dict(
- count=count_remaining,
- type=spot_type,
- ))
-
- # Set spot ValidUntil
- # ValidUntil -> (timestamp). The end date of the request, in
- # UTC format (for example, YYYY -MM -DD T*HH* :MM :SS Z).
- utc_valid_until = (
- datetime.datetime.utcnow()
- + datetime.timedelta(seconds=spot_wait_timeout))
- params['valid_until'] = utc_valid_until.strftime('%Y-%m-%dT%H:%M:%S.000Z')
-
- res = ec2.request_spot_instances(spot_price, **params)
-
- # Now we have to do the intermediate waiting
- if wait:
- instids = await_spot_requests(module, ec2, res, count)
- else:
- instids = []
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="Instance creation failed => %s: %s" % (e.error_code, e.error_message))
-
- # wait here until the instances are up
- num_running = 0
- wait_timeout = time.time() + wait_timeout
- res_list = ()
- while wait_timeout > time.time() and num_running < len(instids):
- try:
- res_list = ec2.get_all_instances(instids)
- except boto.exception.BotoServerError as e:
- if e.error_code == 'InvalidInstanceID.NotFound':
- time.sleep(1)
- continue
- else:
- raise
-
- num_running = 0
- for res in res_list:
- num_running += len([i for i in res.instances if i.state == 'running'])
- if len(res_list) <= 0:
- # got a bad response of some sort, possibly due to
- # stale/cached data. Wait a second and then try again
- time.sleep(1)
- continue
- if wait and num_running < len(instids):
- time.sleep(5)
- else:
- break
-
- if wait and wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
-
- # We do this after the loop ends so that we end up with one list
- for res in res_list:
- running_instances.extend(res.instances)
-
- # Enabled by default by AWS
- if source_dest_check is False:
- for inst in res.instances:
- inst.modify_attribute('sourceDestCheck', False)
-
- # Disabled by default by AWS
- if termination_protection is True:
- for inst in res.instances:
- inst.modify_attribute('disableApiTermination', True)
-
- # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
- if instance_tags and instids:
- try:
- ec2.create_tags(instids, instance_tags)
- except boto.exception.EC2ResponseError as e:
- module.fail_json(msg="Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
-
- instance_dict_array = []
- created_instance_ids = []
- for inst in running_instances:
- inst.update()
- d = get_instance_info(inst)
- created_instance_ids.append(inst.id)
- instance_dict_array.append(d)
-
- return (instance_dict_array, created_instance_ids, changed)
-
-
-def terminate_instances(module, ec2, instance_ids):
- """
- Terminates a list of instances
-
- module: Ansible module object
- ec2: authenticated ec2 connection object
- termination_list: a list of instances to terminate in the form of
- [ {id: <inst-id>}, ..]
-
- Returns a dictionary of instance information
- about the instances terminated.
-
- If the instance to be terminated is running
- "changed" will be set to False.
-
- """
-
- # Whether to wait for termination to complete before returning
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
-
- changed = False
- instance_dict_array = []
-
- if not isinstance(instance_ids, list) or len(instance_ids) < 1:
- module.fail_json(msg='instance_ids should be a list of instances, aborting')
-
- terminated_instance_ids = []
- for res in ec2.get_all_instances(instance_ids):
- for inst in res.instances:
- if inst.state == 'running' or inst.state == 'stopped':
- terminated_instance_ids.append(inst.id)
- instance_dict_array.append(get_instance_info(inst))
- try:
- ec2.terminate_instances([inst.id])
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
- changed = True
-
- # wait here until the instances are 'terminated'
- if wait:
- num_terminated = 0
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
- response = ec2.get_all_instances(instance_ids=terminated_instance_ids,
- filters={'instance-state-name': 'terminated'})
- try:
- num_terminated = sum([len(res.instances) for res in response])
- except Exception as e:
- # got a bad response of some sort, possibly due to
- # stale/cached data. Wait a second and then try again
- time.sleep(1)
- continue
-
- if num_terminated < len(terminated_instance_ids):
- time.sleep(5)
-
- # waiting took too long
- if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
- module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime())
- # Lets get the current state of the instances after terminating - issue600
- instance_dict_array = []
- for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}):
- for inst in res.instances:
- instance_dict_array.append(get_instance_info(inst))
-
- return (changed, instance_dict_array, terminated_instance_ids)
-
-
-def startstop_instances(module, ec2, instance_ids, state, instance_tags):
- """
- Starts or stops a list of existing instances
-
- module: Ansible module object
- ec2: authenticated ec2 connection object
- instance_ids: The list of instances to start in the form of
- [ {id: <inst-id>}, ..]
- instance_tags: A dict of tag keys and values in the form of
- {key: value, ... }
- state: Intended state ("running" or "stopped")
-
- Returns a dictionary of instance information
- about the instances started/stopped.
-
- If the instance was not able to change state,
- "changed" will be set to False.
-
- Note that if instance_ids and instance_tags are both non-empty,
- this method will process the intersection of the two
- """
-
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
- group_id = module.params.get('group_id')
- group_name = module.params.get('group')
- changed = False
- instance_dict_array = []
-
- if not isinstance(instance_ids, list) or len(instance_ids) < 1:
- # Fail unless the user defined instance tags
- if not instance_tags:
- module.fail_json(msg='instance_ids should be a list of instances, aborting')
-
- # To make an EC2 tag filter, we need to prepend 'tag:' to each key.
- # An empty filter does no filtering, so it's safe to pass it to the
- # get_all_instances method even if the user did not specify instance_tags
- filters = {}
- if instance_tags:
- for key, value in instance_tags.items():
- filters["tag:" + key] = value
-
- if module.params.get('id'):
- filters['client-token'] = module.params['id']
- # Check that our instances are not in the state we want to take
-
- # Check (and eventually change) instances attributes and instances state
- existing_instances_array = []
- for res in ec2.get_all_instances(instance_ids, filters=filters):
- for inst in res.instances:
-
- warn_if_public_ip_assignment_changed(module, inst)
-
- changed = (check_source_dest_attr(module, inst, ec2) or
- check_termination_protection(module, inst) or changed)
-
- # Check security groups and if we're using ec2-vpc; ec2-classic security groups may not be modified
- if inst.vpc_id and group_name:
- grp_details = ec2.get_all_security_groups(filters={'vpc_id': inst.vpc_id})
- if isinstance(group_name, string_types):
- group_name = [group_name]
- unmatched = set(group_name) - set(to_text(grp.name) for grp in grp_details)
- if unmatched:
- module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
- group_ids = [to_text(grp.id) for grp in grp_details if to_text(grp.name) in group_name]
- elif inst.vpc_id and group_id:
- if isinstance(group_id, string_types):
- group_id = [group_id]
- grp_details = ec2.get_all_security_groups(group_ids=group_id)
- group_ids = [grp_item.id for grp_item in grp_details]
- if inst.vpc_id and (group_name or group_id):
- if set(sg.id for sg in inst.groups) != set(group_ids):
- changed = inst.modify_attribute('groupSet', group_ids)
-
- # Check instance state
- if inst.state != state:
- instance_dict_array.append(get_instance_info(inst))
- try:
- if state == 'running':
- inst.start()
- else:
- inst.stop()
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
- changed = True
- existing_instances_array.append(inst.id)
-
- instance_ids = list(set(existing_instances_array + (instance_ids or [])))
- # Wait for all the instances to finish starting or stopping
- wait_timeout = time.time() + wait_timeout
- while wait and wait_timeout > time.time():
- instance_dict_array = []
- matched_instances = []
- for res in ec2.get_all_instances(instance_ids):
- for i in res.instances:
- if i.state == state:
- instance_dict_array.append(get_instance_info(i))
- matched_instances.append(i)
- if len(matched_instances) < len(instance_ids):
- time.sleep(5)
- else:
- break
-
- if wait and wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
-
- return (changed, instance_dict_array, instance_ids)
-
-
-def restart_instances(module, ec2, instance_ids, state, instance_tags):
- """
- Restarts a list of existing instances
-
- module: Ansible module object
- ec2: authenticated ec2 connection object
- instance_ids: The list of instances to start in the form of
- [ {id: <inst-id>}, ..]
- instance_tags: A dict of tag keys and values in the form of
- {key: value, ... }
- state: Intended state ("restarted")
-
- Returns a dictionary of instance information
- about the instances.
-
- If the instance was not able to change state,
- "changed" will be set to False.
-
- Wait will not apply here as this is a OS level operation.
-
- Note that if instance_ids and instance_tags are both non-empty,
- this method will process the intersection of the two.
- """
-
- changed = False
- instance_dict_array = []
-
- if not isinstance(instance_ids, list) or len(instance_ids) < 1:
- # Fail unless the user defined instance tags
- if not instance_tags:
- module.fail_json(msg='instance_ids should be a list of instances, aborting')
-
- # To make an EC2 tag filter, we need to prepend 'tag:' to each key.
- # An empty filter does no filtering, so it's safe to pass it to the
- # get_all_instances method even if the user did not specify instance_tags
- filters = {}
- if instance_tags:
- for key, value in instance_tags.items():
- filters["tag:" + key] = value
- if module.params.get('id'):
- filters['client-token'] = module.params['id']
-
- # Check that our instances are not in the state we want to take
-
- # Check (and eventually change) instances attributes and instances state
- for res in ec2.get_all_instances(instance_ids, filters=filters):
- for inst in res.instances:
-
- warn_if_public_ip_assignment_changed(module, inst)
-
- changed = (check_source_dest_attr(module, inst, ec2) or
- check_termination_protection(module, inst) or changed)
-
- # Check instance state
- if inst.state != state:
- instance_dict_array.append(get_instance_info(inst))
- try:
- inst.reboot()
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
- changed = True
-
- return (changed, instance_dict_array, instance_ids)
-
-
-def check_termination_protection(module, inst):
- """
- Check the instance disableApiTermination attribute.
-
- module: Ansible module object
- inst: EC2 instance object
-
- returns: True if state changed None otherwise
- """
-
- termination_protection = module.params.get('termination_protection')
-
- if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None):
- inst.modify_attribute('disableApiTermination', termination_protection)
- return True
-
-
-def check_source_dest_attr(module, inst, ec2):
- """
- Check the instance sourceDestCheck attribute.
-
- module: Ansible module object
- inst: EC2 instance object
-
- returns: True if state changed None otherwise
- """
-
- source_dest_check = module.params.get('source_dest_check')
-
- if source_dest_check is not None:
- try:
- if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
- inst.modify_attribute('sourceDestCheck', source_dest_check)
- return True
- except boto.exception.EC2ResponseError as exc:
- # instances with more than one Elastic Network Interface will
- # fail, because they have the sourceDestCheck attribute defined
- # per-interface
- if exc.code == 'InvalidInstanceID':
- for interface in inst.interfaces:
- if interface.source_dest_check != source_dest_check:
- ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
- return True
- else:
- module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
- exception=traceback.format_exc())
-
-
-def warn_if_public_ip_assignment_changed(module, instance):
- # This is a non-modifiable attribute.
- assign_public_ip = module.params.get('assign_public_ip')
-
- # Check that public ip assignment is the same and warn if not
- public_dns_name = getattr(instance, 'public_dns_name', None)
- if (assign_public_ip or public_dns_name) and (not public_dns_name or assign_public_ip is False):
- module.warn("Unable to modify public ip assignment to {0} for instance {1}. "
- "Whether or not to assign a public IP is determined during instance creation.".format(assign_public_ip, instance.id))
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- key_name=dict(aliases=['keypair']),
- id=dict(),
- group=dict(type='list', aliases=['groups']),
- group_id=dict(type='list'),
- zone=dict(aliases=['aws_zone', 'ec2_zone']),
- instance_type=dict(aliases=['type']),
- spot_price=dict(),
- spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
- spot_launch_group=dict(),
- image=dict(),
- kernel=dict(),
- count=dict(type='int', default='1'),
- monitoring=dict(type='bool', default=False),
- ramdisk=dict(),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300),
- spot_wait_timeout=dict(type='int', default=600),
- placement_group=dict(),
- user_data=dict(),
- instance_tags=dict(type='dict'),
- vpc_subnet_id=dict(),
- assign_public_ip=dict(type='bool'),
- private_ip=dict(),
- instance_profile_name=dict(),
- instance_ids=dict(type='list', aliases=['instance_id']),
- source_dest_check=dict(type='bool', default=None),
- termination_protection=dict(type='bool', default=None),
- state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
- instance_initiated_shutdown_behavior=dict(default='stop', choices=['stop', 'terminate']),
- exact_count=dict(type='int', default=None),
- count_tag=dict(type='raw'),
- volumes=dict(type='list'),
- ebs_optimized=dict(type='bool', default=False),
- tenancy=dict(default='default', choices=['default', 'dedicated']),
- network_interfaces=dict(type='list', aliases=['network_interface'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- mutually_exclusive=[
- # Can be uncommented when we finish the deprecation cycle.
- # ['group', 'group_id'],
- ['exact_count', 'count'],
- ['exact_count', 'state'],
- ['exact_count', 'instance_ids'],
- ['network_interfaces', 'assign_public_ip'],
- ['network_interfaces', 'group'],
- ['network_interfaces', 'group_id'],
- ['network_interfaces', 'private_ip'],
- ['network_interfaces', 'vpc_subnet_id'],
- ],
- )
-
- if module.params.get('group') and module.params.get('group_id'):
- module.deprecate(
- msg='Support for passing both group and group_id has been deprecated. '
- 'Currently group_id is ignored, in future passing both will result in an error',
- version='2.14')
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
- if module.params.get('region') or not module.params.get('ec2_url'):
- ec2 = ec2_connect(module)
- elif module.params.get('ec2_url'):
- ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs)
-
- if 'region' not in aws_connect_kwargs:
- aws_connect_kwargs['region'] = ec2.region
-
- vpc = connect_vpc(**aws_connect_kwargs)
- except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg="Failed to get connection: %s" % e.message, exception=traceback.format_exc())
-
- tagged_instances = []
-
- state = module.params['state']
-
- if state == 'absent':
- instance_ids = module.params['instance_ids']
- if not instance_ids:
- module.fail_json(msg='instance_ids list is required for absent state')
-
- (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
-
- elif state in ('running', 'stopped'):
- instance_ids = module.params.get('instance_ids')
- instance_tags = module.params.get('instance_tags')
- if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
- module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
-
- (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
-
- elif state in ('restarted'):
- instance_ids = module.params.get('instance_ids')
- instance_tags = module.params.get('instance_tags')
- if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
- module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
-
- (changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
-
- elif state == 'present':
- # Changed is always set to true when provisioning new instances
- if not module.params.get('image'):
- module.fail_json(msg='image parameter is required for new instance')
-
- if module.params.get('exact_count') is None:
- (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
- else:
- (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
-
- # Always return instances in the same order
- if new_instance_ids:
- new_instance_ids.sort()
- if instance_dict_array:
- instance_dict_array.sort(key=lambda x: x['id'])
- if tagged_instances:
- tagged_instances.sort(key=lambda x: x['id'])
-
- module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_ami.py b/lib/ansible/modules/cloud/amazon/ec2_ami.py
deleted file mode 100644
index ec4e46790c..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_ami.py
+++ /dev/null
@@ -1,738 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_ami
-version_added: "1.3"
-short_description: Create or destroy an image (AMI) in ec2
-description:
- - Registers or deregisters ec2 images.
-options:
- instance_id:
- description:
- - Instance ID to create the AMI from.
- type: str
- name:
- description:
- - The name of the new AMI.
- type: str
- architecture:
- version_added: "2.3"
- description:
- - The target architecture of the image to register
- default: "x86_64"
- type: str
- kernel_id:
- version_added: "2.3"
- description:
- - The target kernel id of the image to register.
- type: str
- virtualization_type:
- version_added: "2.3"
- description:
- - The virtualization type of the image to register.
- default: "hvm"
- type: str
- root_device_name:
- version_added: "2.3"
- description:
- - The root device name of the image to register.
- type: str
- wait:
- description:
- - Wait for the AMI to be in state 'available' before returning.
- default: false
- type: bool
- wait_timeout:
- description:
- - How long before wait gives up, in seconds.
- default: 900
- type: int
- state:
- description:
- - Register or deregister an AMI.
- default: 'present'
- choices: [ "absent", "present" ]
- type: str
- description:
- description:
- - Human-readable string describing the contents and purpose of the AMI.
- type: str
- no_reboot:
- description:
- - Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the
- responsibility of maintaining file system integrity is left to the owner of the instance.
- default: false
- type: bool
- image_id:
- description:
- - Image ID to be deregistered.
- type: str
- device_mapping:
- version_added: "2.0"
- description:
- - List of device hashes/dictionaries with custom configurations (same block-device-mapping parameters).
- type: list
- elements: dict
- suboptions:
- device_name:
- type: str
- description: The device name. For example C(/dev/sda).
- volume_type:
- type: str
- description: The volume type. Defaults to C(gp2) when not set.
- delete_on_termination:
- type: bool
- description: Whether the device should be automatically deleted when the Instance is terminated.
- no_device:
- type: bool
- description: Suppresses the specified device included in the block device mapping of the AMI.
- snapshot_id:
- type: str
- description: The ID of the Snapshot.
- iops:
- type: int
- description: When using an C(io1) I(volume_type) this sets the number of IOPS provisioned for the volume
- encrypted:
- type: bool
- description: Whether the volume should be encrypted.
- volume_size:
- aliases: ['size']
- type: int
- description: The size of the volume (in GiB)
- delete_snapshot:
- description:
- - Delete snapshots when deregistering the AMI.
- default: false
- type: bool
- tags:
- description:
- - A dictionary of tags to add to the new image; '{"key":"value"}' and '{"key":"value","key":"value"}'
- version_added: "2.0"
- type: dict
- purge_tags:
- description: Whether to remove existing tags that aren't passed in the C(tags) parameter
- version_added: "2.5"
- default: false
- type: bool
- launch_permissions:
- description:
- - Users and groups that should be able to launch the AMI. Expects dictionary with a key of user_ids and/or group_names. user_ids should
- be a list of account ids. group_name should be a list of groups, "all" is the only acceptable value currently.
- - You must pass all desired launch permissions if you wish to modify existing launch permissions (passing just groups will remove all users)
- version_added: "2.0"
- type: dict
- image_location:
- description:
- - The s3 location of an image to use for the AMI.
- version_added: "2.5"
- type: str
- enhanced_networking:
- description:
- - A boolean representing whether enhanced networking with ENA is enabled or not.
- version_added: "2.5"
- type: bool
- billing_products:
- description:
- - A list of valid billing codes. To be used with valid accounts by aws marketplace vendors.
- version_added: "2.5"
- type: list
- elements: str
- ramdisk_id:
- description:
- - The ID of the RAM disk.
- version_added: "2.5"
- type: str
- sriov_net_support:
- description:
- - Set to simple to enable enhanced networking with the Intel 82599 Virtual Function interface for the AMI and any instances that you launch from the AMI.
- version_added: "2.5"
- type: str
-author:
- - "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>"
- - "Constantin Bugneac (@Constantin07) <constantin.bugneac@endava.com>"
- - "Ross Williams (@gunzy83) <gunzy83au@gmail.com>"
- - "Willem van Ketwich (@wilvk) <willvk@gmail.com>"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-# Thank you to iAcquire for sponsoring development of this module.
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Basic AMI Creation
-- ec2_ami:
- instance_id: i-xxxxxx
- wait: yes
- name: newtest
- tags:
- Name: newtest
- Service: TestService
-
-# Basic AMI Creation, without waiting
-- ec2_ami:
- instance_id: i-xxxxxx
- wait: no
- name: newtest
-
-# AMI Registration from EBS Snapshot
-- ec2_ami:
- name: newtest
- state: present
- architecture: x86_64
- virtualization_type: hvm
- root_device_name: /dev/xvda
- device_mapping:
- - device_name: /dev/xvda
- volume_size: 8
- snapshot_id: snap-xxxxxxxx
- delete_on_termination: true
- volume_type: gp2
-
-# AMI Creation, with a custom root-device size and another EBS attached
-- ec2_ami:
- instance_id: i-xxxxxx
- name: newtest
- device_mapping:
- - device_name: /dev/sda1
- size: XXX
- delete_on_termination: true
- volume_type: gp2
- - device_name: /dev/sdb
- size: YYY
- delete_on_termination: false
- volume_type: gp2
-
-# AMI Creation, excluding a volume attached at /dev/sdb
-- ec2_ami:
- instance_id: i-xxxxxx
- name: newtest
- device_mapping:
- - device_name: /dev/sda1
- size: XXX
- delete_on_termination: true
- volume_type: gp2
- - device_name: /dev/sdb
- no_device: yes
-
-# Deregister/Delete AMI (keep associated snapshots)
-- ec2_ami:
- image_id: "{{ instance.image_id }}"
- delete_snapshot: False
- state: absent
-
-# Deregister AMI (delete associated snapshots too)
-- ec2_ami:
- image_id: "{{ instance.image_id }}"
- delete_snapshot: True
- state: absent
-
-# Update AMI Launch Permissions, making it public
-- ec2_ami:
- image_id: "{{ instance.image_id }}"
- state: present
- launch_permissions:
- group_names: ['all']
-
-# Allow AMI to be launched by another account
-- ec2_ami:
- image_id: "{{ instance.image_id }}"
- state: present
- launch_permissions:
- user_ids: ['123456789012']
-'''
-
-RETURN = '''
-architecture:
- description: Architecture of image.
- returned: when AMI is created or already exists
- type: str
- sample: "x86_64"
-block_device_mapping:
- description: Block device mapping associated with image.
- returned: when AMI is created or already exists
- type: dict
- sample: {
- "/dev/sda1": {
- "delete_on_termination": true,
- "encrypted": false,
- "size": 10,
- "snapshot_id": "snap-1a03b80e7",
- "volume_type": "standard"
- }
- }
-creationDate:
- description: Creation date of image.
- returned: when AMI is created or already exists
- type: str
- sample: "2015-10-15T22:43:44.000Z"
-description:
- description: Description of image.
- returned: when AMI is created or already exists
- type: str
- sample: "nat-server"
-hypervisor:
- description: Type of hypervisor.
- returned: when AMI is created or already exists
- type: str
- sample: "xen"
-image_id:
- description: ID of the image.
- returned: when AMI is created or already exists
- type: str
- sample: "ami-1234abcd"
-is_public:
- description: Whether image is public.
- returned: when AMI is created or already exists
- type: bool
- sample: false
-launch_permission:
- description: Permissions allowing other accounts to access the AMI.
- returned: when AMI is created or already exists
- type: list
- sample:
- - group: "all"
-location:
- description: Location of image.
- returned: when AMI is created or already exists
- type: str
- sample: "315210894379/nat-server"
-name:
- description: AMI name of image.
- returned: when AMI is created or already exists
- type: str
- sample: "nat-server"
-ownerId:
- description: Owner of image.
- returned: when AMI is created or already exists
- type: str
- sample: "435210894375"
-platform:
- description: Platform of image.
- returned: when AMI is created or already exists
- type: str
- sample: null
-root_device_name:
- description: Root device name of image.
- returned: when AMI is created or already exists
- type: str
- sample: "/dev/sda1"
-root_device_type:
- description: Root device type of image.
- returned: when AMI is created or already exists
- type: str
- sample: "ebs"
-state:
- description: State of image.
- returned: when AMI is created or already exists
- type: str
- sample: "available"
-tags:
- description: A dictionary of tags assigned to image.
- returned: when AMI is created or already exists
- type: dict
- sample: {
- "Env": "devel",
- "Name": "nat-server"
- }
-virtualization_type:
- description: Image virtualization type.
- returned: when AMI is created or already exists
- type: str
- sample: "hvm"
-snapshots_deleted:
- description: A list of snapshot ids deleted after deregistering image.
- returned: after AMI is deregistered, if I(delete_snapshot=true)
- type: list
- sample: [
- "snap-fbcccb8f",
- "snap-cfe7cdb4"
- ]
-'''
-
-import time
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, compare_aws_tags
-from ansible.module_utils.aws.core import AnsibleAWSModule
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def get_block_device_mapping(image):
- bdm_dict = dict()
- if image is not None and image.get('block_device_mappings') is not None:
- bdm = image.get('block_device_mappings')
- for device in bdm:
- device_name = device.get('device_name')
- if 'ebs' in device:
- ebs = device.get("ebs")
- bdm_dict_item = {
- 'size': ebs.get("volume_size"),
- 'snapshot_id': ebs.get("snapshot_id"),
- 'volume_type': ebs.get("volume_type"),
- 'encrypted': ebs.get("encrypted"),
- 'delete_on_termination': ebs.get("delete_on_termination")
- }
- elif 'virtual_name' in device:
- bdm_dict_item = dict(virtual_name=device['virtual_name'])
- bdm_dict[device_name] = bdm_dict_item
- return bdm_dict
-
-
-def get_ami_info(camel_image):
- image = camel_dict_to_snake_dict(camel_image)
- return dict(
- image_id=image.get("image_id"),
- state=image.get("state"),
- architecture=image.get("architecture"),
- block_device_mapping=get_block_device_mapping(image),
- creationDate=image.get("creation_date"),
- description=image.get("description"),
- hypervisor=image.get("hypervisor"),
- is_public=image.get("public"),
- location=image.get("image_location"),
- ownerId=image.get("owner_id"),
- root_device_name=image.get("root_device_name"),
- root_device_type=image.get("root_device_type"),
- virtualization_type=image.get("virtualization_type"),
- name=image.get("name"),
- tags=boto3_tag_list_to_ansible_dict(image.get('tags')),
- platform=image.get("platform"),
- enhanced_networking=image.get("ena_support"),
- image_owner_alias=image.get("image_owner_alias"),
- image_type=image.get("image_type"),
- kernel_id=image.get("kernel_id"),
- product_codes=image.get("product_codes"),
- ramdisk_id=image.get("ramdisk_id"),
- sriov_net_support=image.get("sriov_net_support"),
- state_reason=image.get("state_reason"),
- launch_permissions=image.get('launch_permissions')
- )
-
-
-def create_image(module, connection):
- instance_id = module.params.get('instance_id')
- name = module.params.get('name')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- description = module.params.get('description')
- architecture = module.params.get('architecture')
- kernel_id = module.params.get('kernel_id')
- root_device_name = module.params.get('root_device_name')
- virtualization_type = module.params.get('virtualization_type')
- no_reboot = module.params.get('no_reboot')
- device_mapping = module.params.get('device_mapping')
- tags = module.params.get('tags')
- launch_permissions = module.params.get('launch_permissions')
- image_location = module.params.get('image_location')
- enhanced_networking = module.params.get('enhanced_networking')
- billing_products = module.params.get('billing_products')
- ramdisk_id = module.params.get('ramdisk_id')
- sriov_net_support = module.params.get('sriov_net_support')
-
- try:
- params = {
- 'Name': name,
- 'Description': description
- }
-
- block_device_mapping = None
-
- if device_mapping:
- block_device_mapping = []
- for device in device_mapping:
- device['Ebs'] = {}
- if 'device_name' not in device:
- module.fail_json(msg="Error - Device name must be set for volume.")
- device = rename_item_if_exists(device, 'device_name', 'DeviceName')
- device = rename_item_if_exists(device, 'virtual_name', 'VirtualName')
- device = rename_item_if_exists(device, 'no_device', 'NoDevice')
- device = rename_item_if_exists(device, 'volume_type', 'VolumeType', 'Ebs')
- device = rename_item_if_exists(device, 'snapshot_id', 'SnapshotId', 'Ebs')
- device = rename_item_if_exists(device, 'delete_on_termination', 'DeleteOnTermination', 'Ebs')
- device = rename_item_if_exists(device, 'size', 'VolumeSize', 'Ebs', attribute_type=int)
- device = rename_item_if_exists(device, 'volume_size', 'VolumeSize', 'Ebs', attribute_type=int)
- device = rename_item_if_exists(device, 'iops', 'Iops', 'Ebs')
- device = rename_item_if_exists(device, 'encrypted', 'Encrypted', 'Ebs')
- block_device_mapping.append(device)
- if block_device_mapping:
- params['BlockDeviceMappings'] = block_device_mapping
- if instance_id:
- params['InstanceId'] = instance_id
- params['NoReboot'] = no_reboot
- image_id = connection.create_image(**params).get('ImageId')
- else:
- if architecture:
- params['Architecture'] = architecture
- if virtualization_type:
- params['VirtualizationType'] = virtualization_type
- if image_location:
- params['ImageLocation'] = image_location
- if enhanced_networking:
- params['EnaSupport'] = enhanced_networking
- if billing_products:
- params['BillingProducts'] = billing_products
- if ramdisk_id:
- params['RamdiskId'] = ramdisk_id
- if sriov_net_support:
- params['SriovNetSupport'] = sriov_net_support
- if kernel_id:
- params['KernelId'] = kernel_id
- if root_device_name:
- params['RootDeviceName'] = root_device_name
- image_id = connection.register_image(**params).get('ImageId')
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Error registering image")
-
- if wait:
- waiter = connection.get_waiter('image_available')
- delay = wait_timeout // 30
- max_attempts = 30
- waiter.wait(ImageIds=[image_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts))
-
- if tags:
- try:
- connection.create_tags(Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags))
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Error tagging image")
-
- if launch_permissions:
- try:
- params = dict(Attribute='LaunchPermission', ImageId=image_id, LaunchPermission=dict(Add=list()))
- for group_name in launch_permissions.get('group_names', []):
- params['LaunchPermission']['Add'].append(dict(Group=group_name))
- for user_id in launch_permissions.get('user_ids', []):
- params['LaunchPermission']['Add'].append(dict(UserId=str(user_id)))
- if params['LaunchPermission']['Add']:
- connection.modify_image_attribute(**params)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Error setting launch permissions for image %s" % image_id)
-
- module.exit_json(msg="AMI creation operation complete.", changed=True,
- **get_ami_info(get_image_by_id(module, connection, image_id)))
-
-
-def deregister_image(module, connection):
- image_id = module.params.get('image_id')
- delete_snapshot = module.params.get('delete_snapshot')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- image = get_image_by_id(module, connection, image_id)
-
- if image is None:
- module.exit_json(changed=False)
-
- # Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable.
- snapshots = []
- if 'BlockDeviceMappings' in image:
- for mapping in image.get('BlockDeviceMappings'):
- snapshot_id = mapping.get('Ebs', {}).get('SnapshotId')
- if snapshot_id is not None:
- snapshots.append(snapshot_id)
-
- # When trying to re-deregister an already deregistered image it doesn't raise an exception, it just returns an object without image attributes.
- if 'ImageId' in image:
- try:
- connection.deregister_image(ImageId=image_id)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Error deregistering image")
- else:
- module.exit_json(msg="Image %s has already been deregistered." % image_id, changed=False)
-
- image = get_image_by_id(module, connection, image_id)
- wait_timeout = time.time() + wait_timeout
-
- while wait and wait_timeout > time.time() and image is not None:
- image = get_image_by_id(module, connection, image_id)
- time.sleep(3)
-
- if wait and wait_timeout <= time.time():
- module.fail_json(msg="Timed out waiting for image to be deregistered.")
-
- exit_params = {'msg': "AMI deregister operation complete.", 'changed': True}
-
- if delete_snapshot:
- try:
- for snapshot_id in snapshots:
- connection.delete_snapshot(SnapshotId=snapshot_id)
- except botocore.exceptions.ClientError as e:
- # Don't error out if root volume snapshot was already deregistered as part of deregister_image
- if e.response['Error']['Code'] == 'InvalidSnapshot.NotFound':
- pass
- exit_params['snapshots_deleted'] = snapshots
-
- module.exit_json(**exit_params)
-
-
-def update_image(module, connection, image_id):
- launch_permissions = module.params.get('launch_permissions')
- image = get_image_by_id(module, connection, image_id)
- if image is None:
- module.fail_json(msg="Image %s does not exist" % image_id, changed=False)
- changed = False
-
- if launch_permissions is not None:
- current_permissions = image['LaunchPermissions']
-
- current_users = set(permission['UserId'] for permission in current_permissions if 'UserId' in permission)
- desired_users = set(str(user_id) for user_id in launch_permissions.get('user_ids', []))
- current_groups = set(permission['Group'] for permission in current_permissions if 'Group' in permission)
- desired_groups = set(launch_permissions.get('group_names', []))
-
- to_add_users = desired_users - current_users
- to_remove_users = current_users - desired_users
- to_add_groups = desired_groups - current_groups
- to_remove_groups = current_groups - desired_groups
-
- to_add = [dict(Group=group) for group in to_add_groups] + [dict(UserId=user_id) for user_id in to_add_users]
- to_remove = [dict(Group=group) for group in to_remove_groups] + [dict(UserId=user_id) for user_id in to_remove_users]
-
- if to_add or to_remove:
- try:
- connection.modify_image_attribute(ImageId=image_id, Attribute='launchPermission',
- LaunchPermission=dict(Add=to_add, Remove=to_remove))
- changed = True
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Error updating launch permissions of image %s" % image_id)
-
- desired_tags = module.params.get('tags')
- if desired_tags is not None:
- current_tags = boto3_tag_list_to_ansible_dict(image.get('Tags'))
- tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, purge_tags=module.params.get('purge_tags'))
-
- if tags_to_remove:
- try:
- connection.delete_tags(Resources=[image_id], Tags=[dict(Key=tagkey) for tagkey in tags_to_remove])
- changed = True
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Error updating tags")
-
- if tags_to_add:
- try:
- connection.create_tags(Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags_to_add))
- changed = True
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Error updating tags")
-
- description = module.params.get('description')
- if description and description != image['Description']:
- try:
- connection.modify_image_attribute(Attribute='Description ', ImageId=image_id, Description=dict(Value=description))
- changed = True
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Error setting description for image %s" % image_id)
-
- if changed:
- module.exit_json(msg="AMI updated.", changed=True,
- **get_ami_info(get_image_by_id(module, connection, image_id)))
- else:
- module.exit_json(msg="AMI not updated.", changed=False,
- **get_ami_info(get_image_by_id(module, connection, image_id)))
-
-
-def get_image_by_id(module, connection, image_id):
- try:
- try:
- images_response = connection.describe_images(ImageIds=[image_id])
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Error retrieving image %s" % image_id)
- images = images_response.get('Images')
- no_images = len(images)
- if no_images == 0:
- return None
- if no_images == 1:
- result = images[0]
- try:
- result['LaunchPermissions'] = connection.describe_image_attribute(Attribute='launchPermission', ImageId=image_id)['LaunchPermissions']
- result['ProductCodes'] = connection.describe_image_attribute(Attribute='productCodes', ImageId=image_id)['ProductCodes']
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] != 'InvalidAMIID.Unavailable':
- module.fail_json_aws(e, msg="Error retrieving image attributes for image %s" % image_id)
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Error retrieving image attributes for image %s" % image_id)
- return result
- module.fail_json(msg="Invalid number of instances (%s) found for image_id: %s." % (str(len(images)), image_id))
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Error retrieving image by image_id")
-
-
-def rename_item_if_exists(dict_object, attribute, new_attribute, child_node=None, attribute_type=None):
- new_item = dict_object.get(attribute)
- if new_item is not None:
- if attribute_type is not None:
- new_item = attribute_type(new_item)
- if child_node is None:
- dict_object[new_attribute] = new_item
- else:
- dict_object[child_node][new_attribute] = new_item
- dict_object.pop(attribute)
- return dict_object
-
-
-def main():
- argument_spec = dict(
- instance_id=dict(),
- image_id=dict(),
- architecture=dict(default='x86_64'),
- kernel_id=dict(),
- virtualization_type=dict(default='hvm'),
- root_device_name=dict(),
- delete_snapshot=dict(default=False, type='bool'),
- name=dict(),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(default=900, type='int'),
- description=dict(default=''),
- no_reboot=dict(default=False, type='bool'),
- state=dict(default='present', choices=['present', 'absent']),
- device_mapping=dict(type='list'),
- tags=dict(type='dict'),
- launch_permissions=dict(type='dict'),
- image_location=dict(),
- enhanced_networking=dict(type='bool'),
- billing_products=dict(type='list'),
- ramdisk_id=dict(),
- sriov_net_support=dict(),
- purge_tags=dict(type='bool', default=False)
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- required_if=[
- ['state', 'absent', ['image_id']],
- ]
- )
-
- # Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by
- # the required_if for state=absent, so check manually instead
- if not any([module.params['image_id'], module.params['name']]):
- module.fail_json(msg="one of the following is required: name, image_id")
-
- connection = module.client('ec2')
-
- if module.params.get('state') == 'absent':
- deregister_image(module, connection)
- elif module.params.get('state') == 'present':
- if module.params.get('image_id'):
- update_image(module, connection, module.params.get('image_id'))
- if not module.params.get('instance_id') and not module.params.get('device_mapping'):
- module.fail_json(msg="The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image.")
- create_image(module, connection)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_ami_info.py b/lib/ansible/modules/cloud/amazon/ec2_ami_info.py
deleted file mode 100644
index 41e1aa83f9..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_ami_info.py
+++ /dev/null
@@ -1,281 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_ami_info
-version_added: '2.5'
-short_description: Gather information about ec2 AMIs
-description:
- - Gather information about ec2 AMIs
- - This module was called C(ec2_ami_facts) before Ansible 2.9. The usage did not change.
-author:
- - Prasad Katti (@prasadkatti)
-requirements: [ boto3 ]
-options:
- image_ids:
- description: One or more image IDs.
- aliases: [image_id]
- type: list
- elements: str
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters.
- - Filter names and values are case sensitive.
- type: dict
- owners:
- description:
- - Filter the images by the owner. Valid options are an AWS account ID, self,
- or an AWS owner alias ( amazon | aws-marketplace | microsoft ).
- aliases: [owner]
- type: list
- elements: str
- executable_users:
- description:
- - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs).
- aliases: [executable_user]
- type: list
- elements: str
- describe_image_attributes:
- description:
- - Describe attributes (like launchPermission) of the images found.
- default: no
- type: bool
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: gather information about an AMI using ami-id
- ec2_ami_info:
- image_ids: ami-5b488823
-
-- name: gather information about all AMIs with tag key Name and value webapp
- ec2_ami_info:
- filters:
- "tag:Name": webapp
-
-- name: gather information about an AMI with 'AMI Name' equal to foobar
- ec2_ami_info:
- filters:
- name: foobar
-
-- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477)
- ec2_ami_info:
- owners: 099720109477
- filters:
- name: "ubuntu/images/ubuntu-zesty-17.04-*"
-'''
-
-RETURN = '''
-images:
- description: A list of images.
- returned: always
- type: list
- elements: dict
- contains:
- architecture:
- description: The architecture of the image.
- returned: always
- type: str
- sample: x86_64
- block_device_mappings:
- description: Any block device mapping entries.
- returned: always
- type: list
- elements: dict
- contains:
- device_name:
- description: The device name exposed to the instance.
- returned: always
- type: str
- sample: /dev/sda1
- ebs:
- description: EBS volumes
- returned: always
- type: complex
- creation_date:
- description: The date and time the image was created.
- returned: always
- type: str
- sample: '2017-10-16T19:22:13.000Z'
- description:
- description: The description of the AMI.
- returned: always
- type: str
- sample: ''
- ena_support:
- description: Whether enhanced networking with ENA is enabled.
- returned: always
- type: bool
- sample: true
- hypervisor:
- description: The hypervisor type of the image.
- returned: always
- type: str
- sample: xen
- image_id:
- description: The ID of the AMI.
- returned: always
- type: str
- sample: ami-5b466623
- image_location:
- description: The location of the AMI.
- returned: always
- type: str
- sample: 408466080000/Webapp
- image_type:
- description: The type of image.
- returned: always
- type: str
- sample: machine
- launch_permissions:
- description: A List of AWS accounts may launch the AMI.
- returned: When image is owned by calling account and I(describe_image_attributes) is yes.
- type: list
- elements: dict
- contains:
- group:
- description: A value of 'all' means the AMI is public.
- type: str
- user_id:
- description: An AWS account ID with permissions to launch the AMI.
- type: str
- sample: [{"group": "all"}, {"user_id": "408466080000"}]
- name:
- description: The name of the AMI that was provided during image creation.
- returned: always
- type: str
- sample: Webapp
- owner_id:
- description: The AWS account ID of the image owner.
- returned: always
- type: str
- sample: '408466080000'
- public:
- description: Whether the image has public launch permissions.
- returned: always
- type: bool
- sample: true
- root_device_name:
- description: The device name of the root device.
- returned: always
- type: str
- sample: /dev/sda1
- root_device_type:
- description: The type of root device used by the AMI.
- returned: always
- type: str
- sample: ebs
- sriov_net_support:
- description: Whether enhanced networking is enabled.
- returned: always
- type: str
- sample: simple
- state:
- description: The current state of the AMI.
- returned: always
- type: str
- sample: available
- tags:
- description: Any tags assigned to the image.
- returned: always
- type: dict
- virtualization_type:
- description: The type of virtualization of the AMI.
- returned: always
- type: str
- sample: hvm
-'''
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
-
-
-def list_ec2_images(ec2_client, module):
-
- image_ids = module.params.get("image_ids")
- owners = module.params.get("owners")
- executable_users = module.params.get("executable_users")
- filters = module.params.get("filters")
- owner_param = []
-
- # describe_images is *very* slow if you pass the `Owners`
- # param (unless it's self), for some reason.
- # Converting the owners to filters and removing from the
- # owners param greatly speeds things up.
- # Implementation based on aioue's suggestion in #24886
- for owner in owners:
- if owner.isdigit():
- if 'owner-id' not in filters:
- filters['owner-id'] = list()
- filters['owner-id'].append(owner)
- elif owner == 'self':
- # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
- owner_param.append(owner)
- else:
- if 'owner-alias' not in filters:
- filters['owner-alias'] = list()
- filters['owner-alias'].append(owner)
-
- filters = ansible_dict_to_boto3_filter_list(filters)
-
- try:
- images = ec2_client.describe_images(ImageIds=image_ids, Filters=filters, Owners=owner_param, ExecutableUsers=executable_users)
- images = [camel_dict_to_snake_dict(image) for image in images["Images"]]
- except (ClientError, BotoCoreError) as err:
- module.fail_json_aws(err, msg="error describing images")
- for image in images:
- try:
- image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', []))
- if module.params.get("describe_image_attributes"):
- launch_permissions = ec2_client.describe_image_attribute(Attribute='launchPermission', ImageId=image['image_id'])['LaunchPermissions']
- image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions]
- except (ClientError, BotoCoreError) as err:
- # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures
- pass
-
- images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist
- module.exit_json(images=images)
-
-
-def main():
-
- argument_spec = dict(
- image_ids=dict(default=[], type='list', aliases=['image_id']),
- filters=dict(default={}, type='dict'),
- owners=dict(default=[], type='list', aliases=['owner']),
- executable_users=dict(default=[], type='list', aliases=['executable_user']),
- describe_image_attributes=dict(default=False, type='bool')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._module._name == 'ec2_ami_facts':
- module._module.deprecate("The 'ec2_ami_facts' module has been renamed to 'ec2_ami_info'", version='2.13')
-
- ec2_client = module.client('ec2')
-
- list_ec2_images(ec2_client, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_elb_lb.py b/lib/ansible/modules/cloud/amazon/ec2_elb_lb.py
deleted file mode 100644
index 0e7ed5456e..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_elb_lb.py
+++ /dev/null
@@ -1,1365 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: ec2_elb_lb
-description:
- - Returns information about the load balancer.
- - Will be marked changed when called only if state is changed.
-short_description: Creates, updates or destroys an Amazon ELB.
-version_added: "1.5"
-author:
- - "Jim Dalton (@jsdalton)"
-options:
- state:
- description:
- - Create or destroy the ELB
- type: str
- choices: [ absent, present ]
- required: true
- name:
- description:
- - The name of the ELB
- type: str
- required: true
- listeners:
- description:
- - List of ports/protocols for this ELB to listen on (see example)
- type: list
- purge_listeners:
- description:
- - Purge existing listeners on ELB that are not found in listeners
- type: bool
- default: yes
- instance_ids:
- description:
- - List of instance ids to attach to this ELB
- type: list
- version_added: "2.1"
- purge_instance_ids:
- description:
- - Purge existing instance ids on ELB that are not found in instance_ids
- type: bool
- default: no
- version_added: "2.1"
- zones:
- description:
- - List of availability zones to enable on this ELB
- type: list
- purge_zones:
- description:
- - Purge existing availability zones on ELB that are not found in zones
- type: bool
- default: no
- security_group_ids:
- description:
- - A list of security groups to apply to the elb
- type: list
- version_added: "1.6"
- security_group_names:
- description:
- - A list of security group names to apply to the elb
- type: list
- version_added: "2.0"
- health_check:
- description:
- - An associative array of health check configuration settings (see example)
- type: dict
- access_logs:
- description:
- - An associative array of access logs configuration settings (see example)
- type: dict
- version_added: "2.0"
- subnets:
- description:
- - A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
- type: list
- version_added: "1.7"
- purge_subnets:
- description:
- - Purge existing subnet on ELB that are not found in subnets
- type: bool
- default: no
- version_added: "1.7"
- scheme:
- description:
- - The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
- If you choose to update your scheme with a different value the ELB will be destroyed and
- recreated. To update scheme you must use the option wait.
- type: str
- choices: ["internal", "internet-facing"]
- default: 'internet-facing'
- version_added: "1.7"
- validate_certs:
- description:
- - When set to C(no), SSL certificates will not be validated for boto versions >= 2.6.0.
- type: bool
- default: yes
- version_added: "1.5"
- connection_draining_timeout:
- description:
- - Wait a specified timeout allowing connections to drain before terminating an instance
- type: int
- version_added: "1.8"
- idle_timeout:
- description:
- - ELB connections from clients and to servers are timed out after this amount of time
- type: int
- version_added: "2.0"
- cross_az_load_balancing:
- description:
- - Distribute load across all configured Availability Zones
- type: bool
- default: no
- version_added: "1.8"
- stickiness:
- description:
- - An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example )
- type: dict
- version_added: "2.0"
- wait:
- description:
- - When specified, Ansible will check the status of the load balancer to ensure it has been successfully
- removed from AWS.
- type: bool
- default: no
- version_added: "2.1"
- wait_timeout:
- description:
- - Used in conjunction with wait. Number of seconds to wait for the elb to be terminated.
- A maximum of 600 seconds (10 minutes) is allowed.
- type: int
- default: 60
- version_added: "2.1"
- tags:
- description:
- - An associative array of tags. To delete all tags, supply an empty dict.
- type: dict
- version_added: "2.1"
-
-extends_documentation_fragment:
- - aws
- - ec2
-"""
-
-EXAMPLES = """
-# Note: None of these examples set aws_access_key, aws_secret_key, or region.
-# It is assumed that their matching environment variables are set.
-
-# Basic provisioning example (non-VPC)
-
-- local_action:
- module: ec2_elb_lb
- name: "test-please-delete"
- state: present
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http # options are http, https, ssl, tcp
- load_balancer_port: 80
- instance_port: 80
- proxy_protocol: True
- - protocol: https
- load_balancer_port: 443
- instance_protocol: http # optional, defaults to value of protocol setting
- instance_port: 80
- # ssl certificate required for https or ssl
- ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
-
-# Internal ELB example
-
-- local_action:
- module: ec2_elb_lb
- name: "test-vpc"
- scheme: internal
- state: present
- instance_ids:
- - i-abcd1234
- purge_instance_ids: true
- subnets:
- - subnet-abcd1234
- - subnet-1a2b3c4d
- listeners:
- - protocol: http # options are http, https, ssl, tcp
- load_balancer_port: 80
- instance_port: 80
-
-# Configure a health check and the access logs
-- local_action:
- module: ec2_elb_lb
- name: "test-please-delete"
- state: present
- zones:
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- health_check:
- ping_protocol: http # options are http, https, ssl, tcp
- ping_port: 80
- ping_path: "/index.html" # not required for tcp or ssl
- response_timeout: 5 # seconds
- interval: 30 # seconds
- unhealthy_threshold: 2
- healthy_threshold: 10
- access_logs:
- interval: 5 # minutes (defaults to 60)
- s3_location: "my-bucket" # This value is required if access_logs is set
- s3_prefix: "logs"
-
-# Ensure ELB is gone
-- local_action:
- module: ec2_elb_lb
- name: "test-please-delete"
- state: absent
-
-# Ensure ELB is gone and wait for check (for default timeout)
-- local_action:
- module: ec2_elb_lb
- name: "test-please-delete"
- state: absent
- wait: yes
-
-# Ensure ELB is gone and wait for check with timeout value
-- local_action:
- module: ec2_elb_lb
- name: "test-please-delete"
- state: absent
- wait: yes
- wait_timeout: 600
-
-# Normally, this module will purge any listeners that exist on the ELB
-# but aren't specified in the listeners parameter. If purge_listeners is
-# false it leaves them alone
-- local_action:
- module: ec2_elb_lb
- name: "test-please-delete"
- state: present
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- purge_listeners: no
-
-# Normally, this module will leave availability zones that are enabled
-# on the ELB alone. If purge_zones is true, then any extraneous zones
-# will be removed
-- local_action:
- module: ec2_elb_lb
- name: "test-please-delete"
- state: present
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- purge_zones: yes
-
-# Creates a ELB and assigns a list of subnets to it.
-- local_action:
- module: ec2_elb_lb
- state: present
- name: 'New ELB'
- security_group_ids: 'sg-123456, sg-67890'
- region: us-west-2
- subnets: 'subnet-123456,subnet-67890'
- purge_subnets: yes
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
-
-# Create an ELB with connection draining, increased idle timeout and cross availability
-# zone load balancing
-- local_action:
- module: ec2_elb_lb
- name: "New ELB"
- state: present
- connection_draining_timeout: 60
- idle_timeout: 300
- cross_az_load_balancing: "yes"
- region: us-east-1
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
-
-# Create an ELB with load balancer stickiness enabled
-- local_action:
- module: ec2_elb_lb
- name: "New ELB"
- state: present
- region: us-east-1
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- stickiness:
- type: loadbalancer
- enabled: yes
- expiration: 300
-
-# Create an ELB with application stickiness enabled
-- local_action:
- module: ec2_elb_lb
- name: "New ELB"
- state: present
- region: us-east-1
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- stickiness:
- type: application
- enabled: yes
- cookie: SESSIONID
-
-# Create an ELB and add tags
-- local_action:
- module: ec2_elb_lb
- name: "New ELB"
- state: present
- region: us-east-1
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- tags:
- Name: "New ELB"
- stack: "production"
- client: "Bob"
-
-# Delete all tags from an ELB
-- local_action:
- module: ec2_elb_lb
- name: "New ELB"
- state: present
- region: us-east-1
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- tags: {}
-"""
-
-import random
-import time
-import traceback
-
-try:
- import boto
- import boto.ec2.elb
- import boto.ec2.elb.attributes
- import boto.vpc
- from boto.ec2.elb.healthcheck import HealthCheck
- from boto.ec2.tag import Tag
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import ec2_argument_spec, connect_to_aws, AnsibleAWSError, get_aws_connection_info
-from ansible.module_utils.six import string_types
-from ansible.module_utils._text import to_native
-
-
-def _throttleable_operation(max_retries):
- def _operation_wrapper(op):
- def _do_op(*args, **kwargs):
- retry = 0
- while True:
- try:
- return op(*args, **kwargs)
- except boto.exception.BotoServerError as e:
- if retry < max_retries and e.code in \
- ("Throttling", "RequestLimitExceeded"):
- retry = retry + 1
- time.sleep(min(random.random() * (2 ** retry), 300))
- continue
- else:
- raise
- return _do_op
- return _operation_wrapper
-
-
-def _get_vpc_connection(module, region, aws_connect_params):
- try:
- return connect_to_aws(boto.vpc, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- module.fail_json(msg=str(e))
-
-
-_THROTTLING_RETRIES = 5
-
-
-class ElbManager(object):
- """Handles ELB creation and destruction"""
-
- def __init__(self, module, name, listeners=None, purge_listeners=None,
- zones=None, purge_zones=None, security_group_ids=None,
- health_check=None, subnets=None, purge_subnets=None,
- scheme="internet-facing", connection_draining_timeout=None,
- idle_timeout=None,
- cross_az_load_balancing=None, access_logs=None,
- stickiness=None, wait=None, wait_timeout=None, tags=None,
- region=None,
- instance_ids=None, purge_instance_ids=None, **aws_connect_params):
-
- self.module = module
- self.name = name
- self.listeners = listeners
- self.purge_listeners = purge_listeners
- self.instance_ids = instance_ids
- self.purge_instance_ids = purge_instance_ids
- self.zones = zones
- self.purge_zones = purge_zones
- self.security_group_ids = security_group_ids
- self.health_check = health_check
- self.subnets = subnets
- self.purge_subnets = purge_subnets
- self.scheme = scheme
- self.connection_draining_timeout = connection_draining_timeout
- self.idle_timeout = idle_timeout
- self.cross_az_load_balancing = cross_az_load_balancing
- self.access_logs = access_logs
- self.stickiness = stickiness
- self.wait = wait
- self.wait_timeout = wait_timeout
- self.tags = tags
-
- self.aws_connect_params = aws_connect_params
- self.region = region
-
- self.changed = False
- self.status = 'gone'
- self.elb_conn = self._get_elb_connection()
-
- try:
- self.elb = self._get_elb()
- except boto.exception.BotoServerError as e:
- module.fail_json(msg='unable to get all load balancers: %s' % e.message, exception=traceback.format_exc())
-
- self.ec2_conn = self._get_ec2_connection()
-
- @_throttleable_operation(_THROTTLING_RETRIES)
- def ensure_ok(self):
- """Create the ELB"""
- if not self.elb:
- # Zones and listeners will be added at creation
- self._create_elb()
- else:
- if self._get_scheme():
- # the only way to change the scheme is by recreating the resource
- self.ensure_gone()
- self._create_elb()
- else:
- self._set_zones()
- self._set_security_groups()
- self._set_elb_listeners()
- self._set_subnets()
- self._set_health_check()
- # boto has introduced support for some ELB attributes in
- # different versions, so we check first before trying to
- # set them to avoid errors
- if self._check_attribute_support('connection_draining'):
- self._set_connection_draining_timeout()
- if self._check_attribute_support('connecting_settings'):
- self._set_idle_timeout()
- if self._check_attribute_support('cross_zone_load_balancing'):
- self._set_cross_az_load_balancing()
- if self._check_attribute_support('access_log'):
- self._set_access_log()
- # add sticky options
- self.select_stickiness_policy()
-
- # ensure backend server policies are correct
- self._set_backend_policies()
- # set/remove instance ids
- self._set_instance_ids()
-
- self._set_tags()
-
- def ensure_gone(self):
- """Destroy the ELB"""
- if self.elb:
- self._delete_elb()
- if self.wait:
- elb_removed = self._wait_for_elb_removed()
- # Unfortunately even though the ELB itself is removed quickly
- # the interfaces take longer so reliant security groups cannot
- # be deleted until the interface has registered as removed.
- elb_interface_removed = self._wait_for_elb_interface_removed()
- if not (elb_removed and elb_interface_removed):
- self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
-
- def get_info(self):
- try:
- check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
- except Exception:
- check_elb = None
-
- if not check_elb:
- info = {
- 'name': self.name,
- 'status': self.status,
- 'region': self.region
- }
- else:
- try:
- lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
- except Exception:
- lb_cookie_policy = None
- try:
- app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
- except Exception:
- app_cookie_policy = None
-
- info = {
- 'name': check_elb.name,
- 'dns_name': check_elb.dns_name,
- 'zones': check_elb.availability_zones,
- 'security_group_ids': check_elb.security_groups,
- 'status': self.status,
- 'subnets': self.subnets,
- 'scheme': check_elb.scheme,
- 'hosted_zone_name': check_elb.canonical_hosted_zone_name,
- 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
- 'lb_cookie_policy': lb_cookie_policy,
- 'app_cookie_policy': app_cookie_policy,
- 'proxy_policy': self._get_proxy_protocol_policy(),
- 'backends': self._get_backend_policies(),
- 'instances': [instance.id for instance in check_elb.instances],
- 'out_of_service_count': 0,
- 'in_service_count': 0,
- 'unknown_instance_state_count': 0,
- 'region': self.region
- }
-
- # status of instances behind the ELB
- if info['instances']:
- info['instance_health'] = [dict(
- instance_id=instance_state.instance_id,
- reason_code=instance_state.reason_code,
- state=instance_state.state
- ) for instance_state in self.elb_conn.describe_instance_health(self.name)]
- else:
- info['instance_health'] = []
-
- # instance state counts: InService or OutOfService
- if info['instance_health']:
- for instance_state in info['instance_health']:
- if instance_state['state'] == "InService":
- info['in_service_count'] += 1
- elif instance_state['state'] == "OutOfService":
- info['out_of_service_count'] += 1
- else:
- info['unknown_instance_state_count'] += 1
-
- if check_elb.health_check:
- info['health_check'] = {
- 'target': check_elb.health_check.target,
- 'interval': check_elb.health_check.interval,
- 'timeout': check_elb.health_check.timeout,
- 'healthy_threshold': check_elb.health_check.healthy_threshold,
- 'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
- }
-
- if check_elb.listeners:
- info['listeners'] = [self._api_listener_as_tuple(l)
- for l in check_elb.listeners]
- elif self.status == 'created':
- # When creating a new ELB, listeners don't show in the
- # immediately returned result, so just include the
- # ones that were added
- info['listeners'] = [self._listener_as_tuple(l)
- for l in self.listeners]
- else:
- info['listeners'] = []
-
- if self._check_attribute_support('connection_draining'):
- info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout)
-
- if self._check_attribute_support('connecting_settings'):
- info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
-
- if self._check_attribute_support('cross_zone_load_balancing'):
- is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
- if is_cross_az_lb_enabled:
- info['cross_az_load_balancing'] = 'yes'
- else:
- info['cross_az_load_balancing'] = 'no'
-
- # return stickiness info?
-
- info['tags'] = self.tags
-
- return info
-
- @_throttleable_operation(_THROTTLING_RETRIES)
- def _wait_for_elb_removed(self):
- polling_increment_secs = 15
- max_retries = (self.wait_timeout // polling_increment_secs)
- status_achieved = False
-
- for x in range(0, max_retries):
- try:
- self.elb_conn.get_all_lb_attributes(self.name)
- except (boto.exception.BotoServerError, Exception) as e:
- if "LoadBalancerNotFound" in e.code:
- status_achieved = True
- break
- else:
- time.sleep(polling_increment_secs)
-
- return status_achieved
-
- @_throttleable_operation(_THROTTLING_RETRIES)
- def _wait_for_elb_interface_removed(self):
- polling_increment_secs = 15
- max_retries = (self.wait_timeout // polling_increment_secs)
- status_achieved = False
-
- elb_interfaces = self.ec2_conn.get_all_network_interfaces(
- filters={'attachment.instance-owner-id': 'amazon-elb',
- 'description': 'ELB {0}'.format(self.name)})
-
- for x in range(0, max_retries):
- for interface in elb_interfaces:
- try:
- result = self.ec2_conn.get_all_network_interfaces(interface.id)
- if result == []:
- status_achieved = True
- break
- else:
- time.sleep(polling_increment_secs)
- except (boto.exception.BotoServerError, Exception) as e:
- if 'InvalidNetworkInterfaceID' in e.code:
- status_achieved = True
- break
- else:
- self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- return status_achieved
-
- @_throttleable_operation(_THROTTLING_RETRIES)
- def _get_elb(self):
- elbs = self.elb_conn.get_all_load_balancers()
- for elb in elbs:
- if self.name == elb.name:
- self.status = 'ok'
- return elb
-
- def _get_elb_connection(self):
- try:
- return connect_to_aws(boto.ec2.elb, self.region,
- **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- self.module.fail_json(msg=str(e))
-
- def _get_ec2_connection(self):
- try:
- return connect_to_aws(boto.ec2, self.region,
- **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, Exception) as e:
- self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- @_throttleable_operation(_THROTTLING_RETRIES)
- def _delete_elb(self):
- # True if succeeds, exception raised if not
- result = self.elb_conn.delete_load_balancer(name=self.name)
- if result:
- self.changed = True
- self.status = 'deleted'
-
- def _create_elb(self):
- listeners = [self._listener_as_tuple(l) for l in self.listeners]
- self.elb = self.elb_conn.create_load_balancer(name=self.name,
- zones=self.zones,
- security_groups=self.security_group_ids,
- complex_listeners=listeners,
- subnets=self.subnets,
- scheme=self.scheme)
- if self.elb:
- # HACK: Work around a boto bug in which the listeners attribute is
- # always set to the listeners argument to create_load_balancer, and
- # not the complex_listeners
- # We're not doing a self.elb = self._get_elb here because there
- # might be eventual consistency issues and it doesn't necessarily
- # make sense to wait until the ELB gets returned from the EC2 API.
- # This is necessary in the event we hit the throttling errors and
- # need to retry ensure_ok
- # See https://github.com/boto/boto/issues/3526
- self.elb.listeners = self.listeners
- self.changed = True
- self.status = 'created'
-
- def _create_elb_listeners(self, listeners):
- """Takes a list of listener tuples and creates them"""
- # True if succeeds, exception raised if not
- self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
- complex_listeners=listeners)
-
- def _delete_elb_listeners(self, listeners):
- """Takes a list of listener tuples and deletes them from the elb"""
- ports = [l[0] for l in listeners]
-
- # True if succeeds, exception raised if not
- self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
- ports)
-
- def _set_elb_listeners(self):
- """
- Creates listeners specified by self.listeners; overwrites existing
- listeners on these ports; removes extraneous listeners
- """
- listeners_to_add = []
- listeners_to_remove = []
- listeners_to_keep = []
-
- # Check for any listeners we need to create or overwrite
- for listener in self.listeners:
- listener_as_tuple = self._listener_as_tuple(listener)
-
- # First we loop through existing listeners to see if one is
- # already specified for this port
- existing_listener_found = None
- for existing_listener in self.elb.listeners:
- # Since ELB allows only one listener on each incoming port, a
- # single match on the incoming port is all we're looking for
- if existing_listener[0] == int(listener['load_balancer_port']):
- existing_listener_found = self._api_listener_as_tuple(existing_listener)
- break
-
- if existing_listener_found:
- # Does it match exactly?
- if listener_as_tuple != existing_listener_found:
- # The ports are the same but something else is different,
- # so we'll remove the existing one and add the new one
- listeners_to_remove.append(existing_listener_found)
- listeners_to_add.append(listener_as_tuple)
- else:
- # We already have this listener, so we're going to keep it
- listeners_to_keep.append(existing_listener_found)
- else:
- # We didn't find an existing listener, so just add the new one
- listeners_to_add.append(listener_as_tuple)
-
- # Check for any extraneous listeners we need to remove, if desired
- if self.purge_listeners:
- for existing_listener in self.elb.listeners:
- existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
- if existing_listener_tuple in listeners_to_remove:
- # Already queued for removal
- continue
- if existing_listener_tuple in listeners_to_keep:
- # Keep this one around
- continue
- # Since we're not already removing it and we don't need to keep
- # it, let's get rid of it
- listeners_to_remove.append(existing_listener_tuple)
-
- if listeners_to_remove:
- self._delete_elb_listeners(listeners_to_remove)
-
- if listeners_to_add:
- self._create_elb_listeners(listeners_to_add)
-
- def _api_listener_as_tuple(self, listener):
- """Adds ssl_certificate_id to ELB API tuple if present"""
- base_tuple = listener.get_complex_tuple()
- if listener.ssl_certificate_id and len(base_tuple) < 5:
- return base_tuple + (listener.ssl_certificate_id,)
- return base_tuple
-
- def _listener_as_tuple(self, listener):
- """Formats listener as a 4- or 5-tuples, in the order specified by the
- ELB API"""
- # N.B. string manipulations on protocols below (str(), upper()) is to
- # ensure format matches output from ELB API
- listener_list = [
- int(listener['load_balancer_port']),
- int(listener['instance_port']),
- str(listener['protocol'].upper()),
- ]
-
- # Instance protocol is not required by ELB API; it defaults to match
- # load balancer protocol. We'll mimic that behavior here
- if 'instance_protocol' in listener:
- listener_list.append(str(listener['instance_protocol'].upper()))
- else:
- listener_list.append(str(listener['protocol'].upper()))
-
- if 'ssl_certificate_id' in listener:
- listener_list.append(str(listener['ssl_certificate_id']))
-
- return tuple(listener_list)
-
- def _enable_zones(self, zones):
- try:
- self.elb.enable_zones(zones)
- except boto.exception.BotoServerError as e:
- self.module.fail_json(msg='unable to enable zones: %s' % e.message, exception=traceback.format_exc())
-
- self.changed = True
-
- def _disable_zones(self, zones):
- try:
- self.elb.disable_zones(zones)
- except boto.exception.BotoServerError as e:
- self.module.fail_json(msg='unable to disable zones: %s' % e.message, exception=traceback.format_exc())
- self.changed = True
-
- def _attach_subnets(self, subnets):
- self.elb_conn.attach_lb_to_subnets(self.name, subnets)
- self.changed = True
-
- def _detach_subnets(self, subnets):
- self.elb_conn.detach_lb_from_subnets(self.name, subnets)
- self.changed = True
-
- def _set_subnets(self):
- """Determine which subnets need to be attached or detached on the ELB"""
- if self.subnets:
- if self.purge_subnets:
- subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
- subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
- else:
- subnets_to_detach = None
- subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
-
- if subnets_to_attach:
- self._attach_subnets(subnets_to_attach)
- if subnets_to_detach:
- self._detach_subnets(subnets_to_detach)
-
- def _get_scheme(self):
- """Determine if the current scheme is different than the scheme of the ELB"""
- if self.scheme:
- if self.elb.scheme != self.scheme:
- if not self.wait:
- self.module.fail_json(msg="Unable to modify scheme without using the wait option")
- return True
- return False
-
- def _set_zones(self):
- """Determine which zones need to be enabled or disabled on the ELB"""
- if self.zones:
- if self.purge_zones:
- zones_to_disable = list(set(self.elb.availability_zones) -
- set(self.zones))
- zones_to_enable = list(set(self.zones) -
- set(self.elb.availability_zones))
- else:
- zones_to_disable = None
- zones_to_enable = list(set(self.zones) -
- set(self.elb.availability_zones))
- if zones_to_enable:
- self._enable_zones(zones_to_enable)
- # N.B. This must come second, in case it would have removed all zones
- if zones_to_disable:
- self._disable_zones(zones_to_disable)
-
- def _set_security_groups(self):
- if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids):
- self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
- self.changed = True
-
- def _set_health_check(self):
- """Set health check values on ELB as needed"""
- if self.health_check:
- # This just makes it easier to compare each of the attributes
- # and look for changes. Keys are attributes of the current
- # health_check; values are desired values of new health_check
- health_check_config = {
- "target": self._get_health_check_target(),
- "timeout": self.health_check['response_timeout'],
- "interval": self.health_check['interval'],
- "unhealthy_threshold": self.health_check['unhealthy_threshold'],
- "healthy_threshold": self.health_check['healthy_threshold'],
- }
-
- update_health_check = False
-
- # The health_check attribute is *not* set on newly created
- # ELBs! So we have to create our own.
- if not self.elb.health_check:
- self.elb.health_check = HealthCheck()
-
- for attr, desired_value in health_check_config.items():
- if getattr(self.elb.health_check, attr) != desired_value:
- setattr(self.elb.health_check, attr, desired_value)
- update_health_check = True
-
- if update_health_check:
- self.elb.configure_health_check(self.elb.health_check)
- self.changed = True
-
- def _check_attribute_support(self, attr):
- return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
-
- def _set_cross_az_load_balancing(self):
- attributes = self.elb.get_attributes()
- if self.cross_az_load_balancing:
- if not attributes.cross_zone_load_balancing.enabled:
- self.changed = True
- attributes.cross_zone_load_balancing.enabled = True
- else:
- if attributes.cross_zone_load_balancing.enabled:
- self.changed = True
- attributes.cross_zone_load_balancing.enabled = False
- self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
- attributes.cross_zone_load_balancing.enabled)
-
- def _set_access_log(self):
- attributes = self.elb.get_attributes()
- if self.access_logs:
- if 's3_location' not in self.access_logs:
- self.module.fail_json(msg='s3_location information required')
-
- access_logs_config = {
- "enabled": True,
- "s3_bucket_name": self.access_logs['s3_location'],
- "s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
- "emit_interval": self.access_logs.get('interval', 60),
- }
-
- update_access_logs_config = False
- for attr, desired_value in access_logs_config.items():
- if getattr(attributes.access_log, attr) != desired_value:
- setattr(attributes.access_log, attr, desired_value)
- update_access_logs_config = True
- if update_access_logs_config:
- self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
- self.changed = True
- elif attributes.access_log.enabled:
- attributes.access_log.enabled = False
- self.changed = True
- self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
-
- def _set_connection_draining_timeout(self):
- attributes = self.elb.get_attributes()
- if self.connection_draining_timeout is not None:
- if not attributes.connection_draining.enabled or \
- attributes.connection_draining.timeout != self.connection_draining_timeout:
- self.changed = True
- attributes.connection_draining.enabled = True
- attributes.connection_draining.timeout = self.connection_draining_timeout
- self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
- else:
- if attributes.connection_draining.enabled:
- self.changed = True
- attributes.connection_draining.enabled = False
- self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
-
- def _set_idle_timeout(self):
- attributes = self.elb.get_attributes()
- if self.idle_timeout is not None:
- if attributes.connecting_settings.idle_timeout != self.idle_timeout:
- self.changed = True
- attributes.connecting_settings.idle_timeout = self.idle_timeout
- self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
-
- def _policy_name(self, policy_type):
- return 'ec2-elb-lb-{0}'.format(to_native(policy_type, errors='surrogate_or_strict'))
-
- def _create_policy(self, policy_param, policy_meth, policy):
- getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy)
-
- def _delete_policy(self, elb_name, policy):
- self.elb_conn.delete_lb_policy(elb_name, policy)
-
- def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
- self._delete_policy(self.elb.name, policy)
- self._create_policy(policy_param, policy_meth, policy)
-
- def _set_listener_policy(self, listeners_dict, policy=None):
- policy = [] if policy is None else policy
-
- for listener_port in listeners_dict:
- if listeners_dict[listener_port].startswith('HTTP'):
- self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
-
- def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
- for p in getattr(elb_info.policies, policy_attrs['attr']):
- if str(p.__dict__['policy_name']) == str(policy[0]):
- if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
- self._set_listener_policy(listeners_dict)
- self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
- self.changed = True
- break
- else:
- self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
- self.changed = True
-
- self._set_listener_policy(listeners_dict, policy)
-
- def select_stickiness_policy(self):
- if self.stickiness:
-
- if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
- self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
-
- elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
- d = {}
- for listener in elb_info.listeners:
- d[listener[0]] = listener[2]
- listeners_dict = d
-
- if self.stickiness['type'] == 'loadbalancer':
- policy = []
- policy_type = 'LBCookieStickinessPolicyType'
-
- if self.module.boolean(self.stickiness['enabled']):
-
- if 'expiration' not in self.stickiness:
- self.module.fail_json(msg='expiration must be set when type is loadbalancer')
-
- try:
- expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None
- except ValueError:
- self.module.fail_json(msg='expiration must be set to an integer')
-
- policy_attrs = {
- 'type': policy_type,
- 'attr': 'lb_cookie_stickiness_policies',
- 'method': 'create_lb_cookie_stickiness_policy',
- 'dict_key': 'cookie_expiration_period',
- 'param_value': expiration
- }
- policy.append(self._policy_name(policy_attrs['type']))
-
- self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
- elif not self.module.boolean(self.stickiness['enabled']):
- if len(elb_info.policies.lb_cookie_stickiness_policies):
- if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
- self.changed = True
- else:
- self.changed = False
- self._set_listener_policy(listeners_dict)
- self._delete_policy(self.elb.name, self._policy_name(policy_type))
-
- elif self.stickiness['type'] == 'application':
- policy = []
- policy_type = 'AppCookieStickinessPolicyType'
- if self.module.boolean(self.stickiness['enabled']):
-
- if 'cookie' not in self.stickiness:
- self.module.fail_json(msg='cookie must be set when type is application')
-
- policy_attrs = {
- 'type': policy_type,
- 'attr': 'app_cookie_stickiness_policies',
- 'method': 'create_app_cookie_stickiness_policy',
- 'dict_key': 'cookie_name',
- 'param_value': self.stickiness['cookie']
- }
- policy.append(self._policy_name(policy_attrs['type']))
- self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
- elif not self.module.boolean(self.stickiness['enabled']):
- if len(elb_info.policies.app_cookie_stickiness_policies):
- if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
- self.changed = True
- self._set_listener_policy(listeners_dict)
- self._delete_policy(self.elb.name, self._policy_name(policy_type))
-
- else:
- self._set_listener_policy(listeners_dict)
-
- def _get_backend_policies(self):
- """Get a list of backend policies"""
- policies = []
- if self.elb.backends is not None:
- for backend in self.elb.backends:
- if backend.policies is not None:
- for policy in backend.policies:
- policies.append(str(backend.instance_port) + ':' + policy.policy_name)
-
- return policies
-
- def _set_backend_policies(self):
- """Sets policies for all backends"""
- ensure_proxy_protocol = False
- replace = []
- backend_policies = self._get_backend_policies()
-
- # Find out what needs to be changed
- for listener in self.listeners:
- want = False
-
- if 'proxy_protocol' in listener and listener['proxy_protocol']:
- ensure_proxy_protocol = True
- want = True
-
- if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
- if not want:
- replace.append({'port': listener['instance_port'], 'policies': []})
- elif want:
- replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
-
- # enable or disable proxy protocol
- if ensure_proxy_protocol:
- self._set_proxy_protocol_policy()
-
- # Make the backend policies so
- for item in replace:
- self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
- self.changed = True
-
- def _get_proxy_protocol_policy(self):
- """Find out if the elb has a proxy protocol enabled"""
- if self.elb.policies is not None and self.elb.policies.other_policies is not None:
- for policy in self.elb.policies.other_policies:
- if policy.policy_name == 'ProxyProtocol-policy':
- return policy.policy_name
-
- return None
-
- def _set_proxy_protocol_policy(self):
- """Install a proxy protocol policy if needed"""
- proxy_policy = self._get_proxy_protocol_policy()
-
- if proxy_policy is None:
- self.elb_conn.create_lb_policy(
- self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
- )
- self.changed = True
-
- # TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
-
- def _diff_list(self, a, b):
- """Find the entries in list a that are not in list b"""
- b = set(b)
- return [aa for aa in a if aa not in b]
-
- def _get_instance_ids(self):
- """Get the current list of instance ids installed in the elb"""
- instances = []
- if self.elb.instances is not None:
- for instance in self.elb.instances:
- instances.append(instance.id)
-
- return instances
-
- def _set_instance_ids(self):
- """Register or deregister instances from an lb instance"""
- assert_instances = self.instance_ids or []
-
- has_instances = self._get_instance_ids()
-
- add_instances = self._diff_list(assert_instances, has_instances)
- if add_instances:
- self.elb_conn.register_instances(self.elb.name, add_instances)
- self.changed = True
-
- if self.purge_instance_ids:
- remove_instances = self._diff_list(has_instances, assert_instances)
- if remove_instances:
- self.elb_conn.deregister_instances(self.elb.name, remove_instances)
- self.changed = True
-
- def _set_tags(self):
- """Add/Delete tags"""
- if self.tags is None:
- return
-
- params = {'LoadBalancerNames.member.1': self.name}
-
- tagdict = dict()
-
- # get the current list of tags from the ELB, if ELB exists
- if self.elb:
- current_tags = self.elb_conn.get_list('DescribeTags', params,
- [('member', Tag)])
- tagdict = dict((tag.Key, tag.Value) for tag in current_tags
- if hasattr(tag, 'Key'))
-
- # Add missing tags
- dictact = dict(set(self.tags.items()) - set(tagdict.items()))
- if dictact:
- for i, key in enumerate(dictact):
- params['Tags.member.%d.Key' % (i + 1)] = key
- params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
-
- self.elb_conn.make_request('AddTags', params)
- self.changed = True
-
- # Remove extra tags
- dictact = dict(set(tagdict.items()) - set(self.tags.items()))
- if dictact:
- for i, key in enumerate(dictact):
- params['Tags.member.%d.Key' % (i + 1)] = key
-
- self.elb_conn.make_request('RemoveTags', params)
- self.changed = True
-
- def _get_health_check_target(self):
- """Compose target string from healthcheck parameters"""
- protocol = self.health_check['ping_protocol'].upper()
- path = ""
-
- if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
- path = self.health_check['ping_path']
-
- return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state={'required': True, 'choices': ['present', 'absent']},
- name={'required': True},
- listeners={'default': None, 'required': False, 'type': 'list'},
- purge_listeners={'default': True, 'required': False, 'type': 'bool'},
- instance_ids={'default': None, 'required': False, 'type': 'list'},
- purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
- zones={'default': None, 'required': False, 'type': 'list'},
- purge_zones={'default': False, 'required': False, 'type': 'bool'},
- security_group_ids={'default': None, 'required': False, 'type': 'list'},
- security_group_names={'default': None, 'required': False, 'type': 'list'},
- health_check={'default': None, 'required': False, 'type': 'dict'},
- subnets={'default': None, 'required': False, 'type': 'list'},
- purge_subnets={'default': False, 'required': False, 'type': 'bool'},
- scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']},
- connection_draining_timeout={'default': None, 'required': False, 'type': 'int'},
- idle_timeout={'default': None, 'type': 'int', 'required': False},
- cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False},
- stickiness={'default': None, 'required': False, 'type': 'dict'},
- access_logs={'default': None, 'required': False, 'type': 'dict'},
- wait={'default': False, 'type': 'bool', 'required': False},
- wait_timeout={'default': 60, 'type': 'int', 'required': False},
- tags={'default': None, 'required': False, 'type': 'dict'}
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- mutually_exclusive=[['security_group_ids', 'security_group_names']]
- )
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
- if not region:
- module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
-
- name = module.params['name']
- state = module.params['state']
- listeners = module.params['listeners']
- purge_listeners = module.params['purge_listeners']
- instance_ids = module.params['instance_ids']
- purge_instance_ids = module.params['purge_instance_ids']
- zones = module.params['zones']
- purge_zones = module.params['purge_zones']
- security_group_ids = module.params['security_group_ids']
- security_group_names = module.params['security_group_names']
- health_check = module.params['health_check']
- access_logs = module.params['access_logs']
- subnets = module.params['subnets']
- purge_subnets = module.params['purge_subnets']
- scheme = module.params['scheme']
- connection_draining_timeout = module.params['connection_draining_timeout']
- idle_timeout = module.params['idle_timeout']
- cross_az_load_balancing = module.params['cross_az_load_balancing']
- stickiness = module.params['stickiness']
- wait = module.params['wait']
- wait_timeout = module.params['wait_timeout']
- tags = module.params['tags']
-
- if state == 'present' and not listeners:
- module.fail_json(msg="At least one listener is required for ELB creation")
-
- if state == 'present' and not (zones or subnets):
- module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
-
- if wait_timeout > 600:
- module.fail_json(msg='wait_timeout maximum is 600 seconds')
-
- if security_group_names:
- security_group_ids = []
- try:
- ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
- if subnets: # We have at least one subnet, ergo this is a VPC
- vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
- vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
- filters = {'vpc_id': vpc_id}
- else:
- filters = None
- grp_details = ec2.get_all_security_groups(filters=filters)
-
- for group_name in security_group_names:
- if isinstance(group_name, string_types):
- group_name = [group_name]
-
- group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
- security_group_ids.extend(group_id)
- except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg=str(e))
-
- elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
- purge_zones, security_group_ids, health_check,
- subnets, purge_subnets, scheme,
- connection_draining_timeout, idle_timeout,
- cross_az_load_balancing,
- access_logs, stickiness, wait, wait_timeout, tags,
- region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
- **aws_connect_params)
-
- # check for unsupported attributes for this version of boto
- if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
- module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
-
- if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
- module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
-
- if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
- module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
-
- if state == 'present':
- elb_man.ensure_ok()
- elif state == 'absent':
- elb_man.ensure_gone()
-
- ansible_facts = {'ec2_elb': 'info'}
- ec2_facts_result = dict(changed=elb_man.changed,
- elb=elb_man.get_info(),
- ansible_facts=ansible_facts)
-
- module.exit_json(**ec2_facts_result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_eni.py b/lib/ansible/modules/cloud/amazon/ec2_eni.py
deleted file mode 100644
index 8b6dbd1c32..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_eni.py
+++ /dev/null
@@ -1,633 +0,0 @@
-#!/usr/bin/python
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_eni
-short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
-description:
- - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is
- provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status
- of the network interface.
-version_added: "2.0"
-author: "Rob White (@wimnat)"
-options:
- eni_id:
- description:
- - The ID of the ENI (to modify).
- - If I(eni_id=None) and I(state=present), a new eni will be created.
- type: str
- instance_id:
- description:
- - Instance ID that you wish to attach ENI to.
- - Since version 2.2, use the I(attached) parameter to attach or detach an ENI. Prior to 2.2, to detach an ENI from an instance, use C(None).
- type: str
- private_ip_address:
- description:
- - Private IP address.
- type: str
- subnet_id:
- description:
- - ID of subnet in which to create the ENI.
- type: str
- description:
- description:
- - Optional description of the ENI.
- type: str
- security_groups:
- description:
- - List of security groups associated with the interface. Only used when I(state=present).
- - Since version 2.2, you can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.
- type: list
- elements: str
- state:
- description:
- - Create or delete ENI.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- device_index:
- description:
- - The index of the device for the network interface attachment on the instance.
- default: 0
- type: int
- attached:
- description:
- - Specifies if network interface should be attached or detached from instance. If omitted, attachment status
- won't change
- version_added: 2.2
- type: bool
- force_detach:
- description:
- - Force detachment of the interface. This applies either when explicitly detaching the interface by setting I(instance_id=None)
- or when deleting an interface with I(state=absent).
- default: false
- type: bool
- delete_on_termination:
- description:
- - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the
- interface is being modified, not on creation.
- required: false
- type: bool
- source_dest_check:
- description:
- - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled.
- You can only specify this flag when the interface is being modified, not on creation.
- required: false
- type: bool
- secondary_private_ip_addresses:
- description:
- - A list of IP addresses to assign as secondary IP addresses to the network interface.
- This option is mutually exclusive of I(secondary_private_ip_address_count)
- required: false
- version_added: 2.2
- type: list
- elements: str
- purge_secondary_private_ip_addresses:
- description:
- - To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified.
- - Set I(secondary_private_ip_addresses=[]) to purge all secondary addresses.
- default: false
- type: bool
- version_added: 2.5
- secondary_private_ip_address_count:
- description:
- - The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of I(secondary_private_ip_addresses)
- required: false
- version_added: 2.2
- type: int
- allow_reassignment:
- description:
- - Indicates whether to allow an IP address that is already assigned to another network interface or instance
- to be reassigned to the specified network interface.
- required: false
- default: false
- type: bool
- version_added: 2.7
-extends_documentation_fragment:
- - aws
- - ec2
-notes:
- - This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id),
- or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI.
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Create an ENI. As no security group is defined, ENI will be created in default security group
-- ec2_eni:
- private_ip_address: 172.31.0.20
- subnet_id: subnet-xxxxxxxx
- state: present
-
-# Create an ENI and attach it to an instance
-- ec2_eni:
- instance_id: i-xxxxxxx
- device_index: 1
- private_ip_address: 172.31.0.20
- subnet_id: subnet-xxxxxxxx
- state: present
-
-# Create an ENI with two secondary addresses
-- ec2_eni:
- subnet_id: subnet-xxxxxxxx
- state: present
- secondary_private_ip_address_count: 2
-
-# Assign a secondary IP address to an existing ENI
-# This will purge any existing IPs
-- ec2_eni:
- subnet_id: subnet-xxxxxxxx
- eni_id: eni-yyyyyyyy
- state: present
- secondary_private_ip_addresses:
- - 172.16.1.1
-
-# Remove any secondary IP addresses from an existing ENI
-- ec2_eni:
- subnet_id: subnet-xxxxxxxx
- eni_id: eni-yyyyyyyy
- state: present
- secondary_private_ip_address_count: 0
-
-# Destroy an ENI, detaching it from any instance if necessary
-- ec2_eni:
- eni_id: eni-xxxxxxx
- force_detach: true
- state: absent
-
-# Update an ENI
-- ec2_eni:
- eni_id: eni-xxxxxxx
- description: "My new description"
- state: present
-
-# Update an ENI identifying it by private_ip_address and subnet_id
-- ec2_eni:
- subnet_id: subnet-xxxxxxx
- private_ip_address: 172.16.1.1
- description: "My new description"
-
-# Detach an ENI from an instance
-- ec2_eni:
- eni_id: eni-xxxxxxx
- instance_id: None
- state: present
-
-### Delete an interface on termination
-# First create the interface
-- ec2_eni:
- instance_id: i-xxxxxxx
- device_index: 1
- private_ip_address: 172.31.0.20
- subnet_id: subnet-xxxxxxxx
- state: present
- register: eni
-
-# Modify the interface to enable the delete_on_terminaton flag
-- ec2_eni:
- eni_id: "{{ eni.interface.id }}"
- delete_on_termination: true
-
-'''
-
-
-RETURN = '''
-interface:
- description: Network interface attributes
- returned: when state != absent
- type: complex
- contains:
- description:
- description: interface description
- type: str
- sample: Firewall network interface
- groups:
- description: list of security groups
- type: list
- elements: dict
- sample: [ { "sg-f8a8a9da": "default" } ]
- id:
- description: network interface id
- type: str
- sample: "eni-1d889198"
- mac_address:
- description: interface's physical address
- type: str
- sample: "00:00:5E:00:53:23"
- owner_id:
- description: aws account id
- type: str
- sample: 812381371
- private_ip_address:
- description: primary ip address of this interface
- type: str
- sample: 10.20.30.40
- private_ip_addresses:
- description: list of all private ip addresses associated to this interface
- type: list
- elements: dict
- sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
- source_dest_check:
- description: value of source/dest check flag
- type: bool
- sample: True
- status:
- description: network interface status
- type: str
- sample: "pending"
- subnet_id:
- description: which vpc subnet the interface is bound
- type: str
- sample: subnet-b0a0393c
- vpc_id:
- description: which vpc this network interface is bound
- type: str
- sample: vpc-9a9a9da
-
-'''
-
-import time
-import re
-
-try:
- import boto.ec2
- import boto.vpc
- from boto.exception import BotoServerError
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (AnsibleAWSError, connect_to_aws,
- ec2_argument_spec, get_aws_connection_info,
- get_ec2_security_group_ids_from_names)
-
-
-def get_eni_info(interface):
-
- # Private addresses
- private_addresses = []
- for ip in interface.private_ip_addresses:
- private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
-
- interface_info = {'id': interface.id,
- 'subnet_id': interface.subnet_id,
- 'vpc_id': interface.vpc_id,
- 'description': interface.description,
- 'owner_id': interface.owner_id,
- 'status': interface.status,
- 'mac_address': interface.mac_address,
- 'private_ip_address': interface.private_ip_address,
- 'source_dest_check': interface.source_dest_check,
- 'groups': dict((group.id, group.name) for group in interface.groups),
- 'private_ip_addresses': private_addresses
- }
-
- if interface.attachment is not None:
- interface_info['attachment'] = {'attachment_id': interface.attachment.id,
- 'instance_id': interface.attachment.instance_id,
- 'device_index': interface.attachment.device_index,
- 'status': interface.attachment.status,
- 'attach_time': interface.attachment.attach_time,
- 'delete_on_termination': interface.attachment.delete_on_termination,
- }
-
- return interface_info
-
-
-def wait_for_eni(eni, status):
-
- while True:
- time.sleep(3)
- eni.update()
- # If the status is detached we just need attachment to disappear
- if eni.attachment is None:
- if status == "detached":
- break
- else:
- if status == "attached" and eni.attachment.status == "attached":
- break
-
-
-def create_eni(connection, vpc_id, module):
-
- instance_id = module.params.get("instance_id")
- attached = module.params.get("attached")
- if instance_id == 'None':
- instance_id = None
- device_index = module.params.get("device_index")
- subnet_id = module.params.get('subnet_id')
- private_ip_address = module.params.get('private_ip_address')
- description = module.params.get('description')
- security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection, vpc_id=vpc_id, boto3=False)
- secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
- secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
- changed = False
-
- try:
- eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
- if attached and instance_id is not None:
- try:
- eni.attach(instance_id, device_index)
- except BotoServerError:
- eni.delete()
- raise
- # Wait to allow creation / attachment to finish
- wait_for_eni(eni, "attached")
- eni.update()
-
- if secondary_private_ip_address_count is not None:
- try:
- connection.assign_private_ip_addresses(network_interface_id=eni.id, secondary_private_ip_address_count=secondary_private_ip_address_count)
- except BotoServerError:
- eni.delete()
- raise
-
- if secondary_private_ip_addresses is not None:
- try:
- connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses)
- except BotoServerError:
- eni.delete()
- raise
-
- changed = True
-
- except BotoServerError as e:
- module.fail_json(msg=e.message)
-
- module.exit_json(changed=changed, interface=get_eni_info(eni))
-
-
-def modify_eni(connection, vpc_id, module, eni):
-
- instance_id = module.params.get("instance_id")
- attached = module.params.get("attached")
- do_detach = module.params.get('state') == 'detached'
- device_index = module.params.get("device_index")
- description = module.params.get('description')
- security_groups = module.params.get('security_groups')
- force_detach = module.params.get("force_detach")
- source_dest_check = module.params.get("source_dest_check")
- delete_on_termination = module.params.get("delete_on_termination")
- secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
- purge_secondary_private_ip_addresses = module.params.get("purge_secondary_private_ip_addresses")
- secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
- allow_reassignment = module.params.get("allow_reassignment")
- changed = False
-
- try:
- if description is not None:
- if eni.description != description:
- connection.modify_network_interface_attribute(eni.id, "description", description)
- changed = True
- if len(security_groups) > 0:
- groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=vpc_id, boto3=False)
- if sorted(get_sec_group_list(eni.groups)) != sorted(groups):
- connection.modify_network_interface_attribute(eni.id, "groupSet", groups)
- changed = True
- if source_dest_check is not None:
- if eni.source_dest_check != source_dest_check:
- connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
- changed = True
- if delete_on_termination is not None and eni.attachment is not None:
- if eni.attachment.delete_on_termination is not delete_on_termination:
- connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
- changed = True
-
- current_secondary_addresses = [i.private_ip_address for i in eni.private_ip_addresses if not i.primary]
- if secondary_private_ip_addresses is not None:
- secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))
- if secondary_addresses_to_remove and purge_secondary_private_ip_addresses:
- connection.unassign_private_ip_addresses(network_interface_id=eni.id,
- private_ip_addresses=list(set(current_secondary_addresses) -
- set(secondary_private_ip_addresses)),
- dry_run=False)
- changed = True
-
- secondary_addresses_to_add = list(set(secondary_private_ip_addresses) - set(current_secondary_addresses))
- if secondary_addresses_to_add:
- connection.assign_private_ip_addresses(network_interface_id=eni.id,
- private_ip_addresses=secondary_addresses_to_add,
- secondary_private_ip_address_count=None,
- allow_reassignment=allow_reassignment, dry_run=False)
- changed = True
- if secondary_private_ip_address_count is not None:
- current_secondary_address_count = len(current_secondary_addresses)
-
- if secondary_private_ip_address_count > current_secondary_address_count:
- connection.assign_private_ip_addresses(network_interface_id=eni.id,
- private_ip_addresses=None,
- secondary_private_ip_address_count=(secondary_private_ip_address_count -
- current_secondary_address_count),
- allow_reassignment=allow_reassignment, dry_run=False)
- changed = True
- elif secondary_private_ip_address_count < current_secondary_address_count:
- # How many of these addresses do we want to remove
- secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count
- connection.unassign_private_ip_addresses(network_interface_id=eni.id,
- private_ip_addresses=current_secondary_addresses[:secondary_addresses_to_remove_count],
- dry_run=False)
-
- if attached is True:
- if eni.attachment and eni.attachment.instance_id != instance_id:
- detach_eni(eni, module)
- eni.attach(instance_id, device_index)
- wait_for_eni(eni, "attached")
- changed = True
- if eni.attachment is None:
- eni.attach(instance_id, device_index)
- wait_for_eni(eni, "attached")
- changed = True
- elif attached is False:
- detach_eni(eni, module)
-
- except BotoServerError as e:
- module.fail_json(msg=e.message)
-
- eni.update()
- module.exit_json(changed=changed, interface=get_eni_info(eni))
-
-
-def delete_eni(connection, module):
-
- eni_id = module.params.get("eni_id")
- force_detach = module.params.get("force_detach")
-
- try:
- eni_result_set = connection.get_all_network_interfaces(eni_id)
- eni = eni_result_set[0]
-
- if force_detach is True:
- if eni.attachment is not None:
- eni.detach(force_detach)
- # Wait to allow detachment to finish
- wait_for_eni(eni, "detached")
- eni.update()
- eni.delete()
- changed = True
- else:
- eni.delete()
- changed = True
-
- module.exit_json(changed=changed)
- except BotoServerError as e:
- regex = re.compile('The networkInterface ID \'.*\' does not exist')
- if regex.search(e.message) is not None:
- module.exit_json(changed=False)
- else:
- module.fail_json(msg=e.message)
-
-
-def detach_eni(eni, module):
-
- attached = module.params.get("attached")
-
- force_detach = module.params.get("force_detach")
- if eni.attachment is not None:
- eni.detach(force_detach)
- wait_for_eni(eni, "detached")
- if attached:
- return
- eni.update()
- module.exit_json(changed=True, interface=get_eni_info(eni))
- else:
- module.exit_json(changed=False, interface=get_eni_info(eni))
-
-
-def uniquely_find_eni(connection, module):
-
- eni_id = module.params.get("eni_id")
- private_ip_address = module.params.get('private_ip_address')
- subnet_id = module.params.get('subnet_id')
- instance_id = module.params.get('instance_id')
- device_index = module.params.get('device_index')
- attached = module.params.get('attached')
-
- try:
- filters = {}
-
- # proceed only if we're univocally specifying an ENI
- if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None):
- return None
-
- if private_ip_address and subnet_id:
- filters['private-ip-address'] = private_ip_address
- filters['subnet-id'] = subnet_id
-
- if not attached and instance_id and device_index:
- filters['attachment.instance-id'] = instance_id
- filters['attachment.device-index'] = device_index
-
- if eni_id is None and len(filters) == 0:
- return None
-
- eni_result = connection.get_all_network_interfaces(eni_id, filters=filters)
- if len(eni_result) == 1:
- return eni_result[0]
- else:
- return None
-
- except BotoServerError as e:
- module.fail_json(msg=e.message)
-
- return None
-
-
-def get_sec_group_list(groups):
-
- # Build list of remote security groups
- remote_security_groups = []
- for group in groups:
- remote_security_groups.append(group.id.encode())
-
- return remote_security_groups
-
-
-def _get_vpc_id(connection, module, subnet_id):
-
- try:
- return connection.get_all_subnets(subnet_ids=[subnet_id])[0].vpc_id
- except BotoServerError as e:
- module.fail_json(msg=e.message)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- eni_id=dict(default=None, type='str'),
- instance_id=dict(default=None, type='str'),
- private_ip_address=dict(type='str'),
- subnet_id=dict(type='str'),
- description=dict(type='str'),
- security_groups=dict(default=[], type='list'),
- device_index=dict(default=0, type='int'),
- state=dict(default='present', choices=['present', 'absent']),
- force_detach=dict(default='no', type='bool'),
- source_dest_check=dict(default=None, type='bool'),
- delete_on_termination=dict(default=None, type='bool'),
- secondary_private_ip_addresses=dict(default=None, type='list'),
- purge_secondary_private_ip_addresses=dict(default=False, type='bool'),
- secondary_private_ip_address_count=dict(default=None, type='int'),
- allow_reassignment=dict(default=False, type='bool'),
- attached=dict(default=None, type='bool')
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- mutually_exclusive=[
- ['secondary_private_ip_addresses', 'secondary_private_ip_address_count']
- ],
- required_if=([
- ('state', 'absent', ['eni_id']),
- ('attached', True, ['instance_id']),
- ('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses'])
- ])
- )
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
-
- if region:
- try:
- connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
- vpc_connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- module.fail_json(msg=str(e))
- else:
- module.fail_json(msg="region must be specified")
-
- state = module.params.get("state")
-
- if state == 'present':
- eni = uniquely_find_eni(connection, module)
- if eni is None:
- subnet_id = module.params.get("subnet_id")
- if subnet_id is None:
- module.fail_json(msg="subnet_id is required when creating a new ENI")
-
- vpc_id = _get_vpc_id(vpc_connection, module, subnet_id)
- create_eni(connection, vpc_id, module)
- else:
- vpc_id = eni.vpc_id
- modify_eni(connection, vpc_id, module, eni)
-
- elif state == 'absent':
- delete_eni(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_eni_info.py b/lib/ansible/modules/cloud/amazon/ec2_eni_info.py
deleted file mode 100644
index 99922a84d1..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_eni_info.py
+++ /dev/null
@@ -1,275 +0,0 @@
-#!/usr/bin/python
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_eni_info
-short_description: Gather information about ec2 ENI interfaces in AWS
-description:
- - Gather information about ec2 ENI interfaces in AWS.
- - This module was called C(ec2_eni_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.0"
-author: "Rob White (@wimnat)"
-requirements: [ boto3 ]
-options:
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
- type: dict
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all ENIs
-- ec2_eni_info:
-
-# Gather information about a particular ENI
-- ec2_eni_info:
- filters:
- network-interface-id: eni-xxxxxxx
-
-'''
-
-RETURN = '''
-network_interfaces:
- description: List of matching elastic network interfaces
- returned: always
- type: complex
- contains:
- association:
- description: Info of associated elastic IP (EIP)
- returned: always, empty dict if no association exists
- type: dict
- sample: {
- allocation_id: "eipalloc-5sdf123",
- association_id: "eipassoc-8sdf123",
- ip_owner_id: "4415120123456",
- public_dns_name: "ec2-52-1-0-63.compute-1.amazonaws.com",
- public_ip: "52.1.0.63"
- }
- attachment:
- description: Info about attached ec2 instance
- returned: always, empty dict if ENI is not attached
- type: dict
- sample: {
- attach_time: "2017-08-05T15:25:47+00:00",
- attachment_id: "eni-attach-149d21234",
- delete_on_termination: false,
- device_index: 1,
- instance_id: "i-15b8d3cadbafa1234",
- instance_owner_id: "4415120123456",
- status: "attached"
- }
- availability_zone:
- description: Availability zone of ENI
- returned: always
- type: str
- sample: "us-east-1b"
- description:
- description: Description text for ENI
- returned: always
- type: str
- sample: "My favourite network interface"
- groups:
- description: List of attached security groups
- returned: always
- type: list
- sample: [
- {
- group_id: "sg-26d0f1234",
- group_name: "my_ec2_security_group"
- }
- ]
- id:
- description: The id of the ENI (alias for network_interface_id)
- returned: always
- type: str
- sample: "eni-392fsdf"
- interface_type:
- description: Type of the network interface
- returned: always
- type: str
- sample: "interface"
- ipv6_addresses:
- description: List of IPv6 addresses for this interface
- returned: always
- type: list
- sample: []
- mac_address:
- description: MAC address of the network interface
- returned: always
- type: str
- sample: "0a:f8:10:2f:ab:a1"
- network_interface_id:
- description: The id of the ENI
- returned: always
- type: str
- sample: "eni-392fsdf"
- owner_id:
- description: AWS account id of the owner of the ENI
- returned: always
- type: str
- sample: "4415120123456"
- private_dns_name:
- description: Private DNS name for the ENI
- returned: always
- type: str
- sample: "ip-172-16-1-180.ec2.internal"
- private_ip_address:
- description: Private IP address for the ENI
- returned: always
- type: str
- sample: "172.16.1.180"
- private_ip_addresses:
- description: List of private IP addresses attached to the ENI
- returned: always
- type: list
- sample: []
- requester_id:
- description: The ID of the entity that launched the ENI
- returned: always
- type: str
- sample: "AIDAIONYVJQNIAZFT3ABC"
- requester_managed:
- description: Indicates whether the network interface is being managed by an AWS service.
- returned: always
- type: bool
- sample: false
- source_dest_check:
- description: Indicates whether the network interface performs source/destination checking.
- returned: always
- type: bool
- sample: false
- status:
- description: Indicates if the network interface is attached to an instance or not
- returned: always
- type: str
- sample: "in-use"
- subnet_id:
- description: Subnet ID the ENI is in
- returned: always
- type: str
- sample: "subnet-7bbf01234"
- tag_set:
- description: Dictionary of tags added to the ENI
- returned: always
- type: dict
- sample: {}
- vpc_id:
- description: ID of the VPC the network interface it part of
- returned: always
- type: str
- sample: "vpc-b3f1f123"
-'''
-
-try:
- from botocore.exceptions import ClientError, NoCredentialsError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_conn
-from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
-from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
-
-
-def list_eni(connection, module):
-
- if module.params.get("filters") is None:
- filters = []
- else:
- filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
-
- try:
- network_interfaces_result = connection.describe_network_interfaces(Filters=filters)['NetworkInterfaces']
- except (ClientError, NoCredentialsError) as e:
- module.fail_json(msg=e.message)
-
- # Modify boto3 tags list to be ansible friendly dict and then camel_case
- camel_network_interfaces = []
- for network_interface in network_interfaces_result:
- network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet'])
- # Added id to interface info to be compatible with return values of ec2_eni module:
- network_interface['Id'] = network_interface['NetworkInterfaceId']
- camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface))
-
- module.exit_json(network_interfaces=camel_network_interfaces)
-
-
-def get_eni_info(interface):
-
- # Private addresses
- private_addresses = []
- for ip in interface.private_ip_addresses:
- private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
-
- interface_info = {'id': interface.id,
- 'subnet_id': interface.subnet_id,
- 'vpc_id': interface.vpc_id,
- 'description': interface.description,
- 'owner_id': interface.owner_id,
- 'status': interface.status,
- 'mac_address': interface.mac_address,
- 'private_ip_address': interface.private_ip_address,
- 'source_dest_check': interface.source_dest_check,
- 'groups': dict((group.id, group.name) for group in interface.groups),
- 'private_ip_addresses': private_addresses
- }
-
- if hasattr(interface, 'publicDnsName'):
- interface_info['association'] = {'public_ip_address': interface.publicIp,
- 'public_dns_name': interface.publicDnsName,
- 'ip_owner_id': interface.ipOwnerId
- }
-
- if interface.attachment is not None:
- interface_info['attachment'] = {'attachment_id': interface.attachment.id,
- 'instance_id': interface.attachment.instance_id,
- 'device_index': interface.attachment.device_index,
- 'status': interface.attachment.status,
- 'attach_time': interface.attachment.attach_time,
- 'delete_on_termination': interface.attachment.delete_on_termination,
- }
-
- return interface_info
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- filters=dict(default=None, type='dict')
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec)
- if module._name == 'ec2_eni_facts':
- module.deprecate("The 'ec2_eni_facts' module has been renamed to 'ec2_eni_info'", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
-
- connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
-
- list_eni(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_group.py b/lib/ansible/modules/cloud/amazon/ec2_group.py
deleted file mode 100644
index bc416f66b5..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_group.py
+++ /dev/null
@@ -1,1345 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-DOCUMENTATION = '''
----
-module: ec2_group
-author: "Andrew de Quincey (@adq)"
-version_added: "1.3"
-requirements: [ boto3 ]
-short_description: maintain an ec2 VPC security group.
-description:
- - Maintains ec2 security groups. This module has a dependency on python-boto >= 2.5.
-options:
- name:
- description:
- - Name of the security group.
- - One of and only one of I(name) or I(group_id) is required.
- - Required if I(state=present).
- required: false
- type: str
- group_id:
- description:
- - Id of group to delete (works only with absent).
- - One of and only one of I(name) or I(group_id) is required.
- required: false
- version_added: "2.4"
- type: str
- description:
- description:
- - Description of the security group. Required when C(state) is C(present).
- required: false
- type: str
- vpc_id:
- description:
- - ID of the VPC to create the group in.
- required: false
- type: str
- rules:
- description:
- - List of firewall inbound rules to enforce in this group (see example). If none are supplied,
- no inbound rules will be enabled. Rules list may include its own name in `group_name`.
- This allows idempotent loopback additions (e.g. allow group to access itself).
- Rule sources list support was added in version 2.4. This allows to define multiple sources per
- source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
- In version 2.5 support for rule descriptions was added.
- required: false
- type: list
- elements: dict
- suboptions:
- cidr_ip:
- type: str
- description:
- - The IPv4 CIDR range traffic is coming from.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- cidr_ipv6:
- type: str
- description:
- - The IPv6 CIDR range traffic is coming from.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- ip_prefix:
- type: str
- description:
- - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
- that traffic is coming from.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- group_id:
- type: str
- description:
- - The ID of the Security Group that traffic is coming from.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- group_name:
- type: str
- description:
- - Name of the Security Group that traffic is coming from.
- - If the Security Group doesn't exist a new Security Group will be
- created with I(group_desc) as the description.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- group_desc:
- type: str
- description:
- - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
- created with I(group_desc) as the description.
- proto:
- type: str
- description:
- - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
- from_port:
- type: int
- description: The start of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
- to_port:
- type: int
- description: The end of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
- rule_desc:
- type: str
- description: A description for the rule.
- rules_egress:
- description:
- - List of firewall outbound rules to enforce in this group (see example). If none are supplied,
- a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
- Rule Egress sources list support was added in version 2.4. In version 2.5 support for rule descriptions
- was added.
- required: false
- version_added: "1.6"
- type: list
- elements: dict
- suboptions:
- cidr_ip:
- type: str
- description:
- - The IPv4 CIDR range traffic is going to.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- cidr_ipv6:
- type: str
- description:
- - The IPv6 CIDR range traffic is going to.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- ip_prefix:
- type: str
- description:
- - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
- that traffic is going to.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- group_id:
- type: str
- description:
- - The ID of the Security Group that traffic is going to.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- group_name:
- type: str
- description:
- - Name of the Security Group that traffic is going to.
- - If the Security Group doesn't exist a new Security Group will be
- created with I(group_desc) as the description.
- - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
- and I(group_name).
- group_desc:
- type: str
- description:
- - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
- created with I(group_desc) as the description.
- proto:
- type: str
- description:
- - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
- from_port:
- type: int
- description: The start of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
- to_port:
- type: int
- description: The end of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
- rule_desc:
- type: str
- description: A description for the rule.
- state:
- version_added: "1.4"
- description:
- - Create or delete a security group.
- required: false
- default: 'present'
- choices: [ "present", "absent" ]
- aliases: []
- type: str
- purge_rules:
- version_added: "1.8"
- description:
- - Purge existing rules on security group that are not found in rules.
- required: false
- default: 'true'
- aliases: []
- type: bool
- purge_rules_egress:
- version_added: "1.8"
- description:
- - Purge existing rules_egress on security group that are not found in rules_egress.
- required: false
- default: 'true'
- aliases: []
- type: bool
- tags:
- version_added: "2.4"
- description:
- - A dictionary of one or more tags to assign to the security group.
- required: false
- type: dict
- aliases: ['resource_tags']
- purge_tags:
- version_added: "2.4"
- description:
- - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
- tags will not be modified.
- required: false
- default: yes
- type: bool
-
-extends_documentation_fragment:
- - aws
- - ec2
-
-notes:
- - If a rule declares a group_name and that group doesn't exist, it will be
- automatically created. In that case, group_desc should be provided as well.
- The module will refuse to create a depended-on group without a description.
- - Preview diff mode support is added in version 2.7.
-'''
-
-EXAMPLES = '''
-- name: example using security group rule descriptions
- ec2_group:
- name: "{{ name }}"
- description: sg with rule descriptions
- vpc_id: vpc-xxxxxxxx
- profile: "{{ aws_profile }}"
- region: us-east-1
- rules:
- - proto: tcp
- ports:
- - 80
- cidr_ip: 0.0.0.0/0
- rule_desc: allow all on port 80
-
-- name: example ec2 group
- ec2_group:
- name: example
- description: an example EC2 group
- vpc_id: 12345
- region: eu-west-1
- aws_secret_key: SECRET
- aws_access_key: ACCESS
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 10.0.0.0/8
- - proto: tcp
- from_port: 443
- to_port: 443
- # this should only be needed for EC2 Classic security group rules
- # because in a VPC an ELB will use a user-account security group
- group_id: amazon-elb/sg-87654321/amazon-elb-sg
- - proto: tcp
- from_port: 3306
- to_port: 3306
- group_id: 123412341234/sg-87654321/exact-name-of-sg
- - proto: udp
- from_port: 10050
- to_port: 10050
- cidr_ip: 10.0.0.0/8
- - proto: udp
- from_port: 10051
- to_port: 10051
- group_id: sg-12345678
- - proto: icmp
- from_port: 8 # icmp type, -1 = any type
- to_port: -1 # icmp subtype, -1 = any subtype
- cidr_ip: 10.0.0.0/8
- - proto: all
- # the containing group name may be specified here
- group_name: example
- - proto: all
- # in the 'proto' attribute, if you specify -1, all, or a protocol number other than tcp, udp, icmp, or 58 (ICMPv6),
- # traffic on all ports is allowed, regardless of any ports you specify
- from_port: 10050 # this value is ignored
- to_port: 10050 # this value is ignored
- cidr_ip: 10.0.0.0/8
-
- rules_egress:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- cidr_ipv6: 64:ff9b::/96
- group_name: example-other
- # description to use if example-other needs to be created
- group_desc: other example EC2 group
-
-- name: example2 ec2 group
- ec2_group:
- name: example2
- description: an example2 EC2 group
- vpc_id: 12345
- region: eu-west-1
- rules:
- # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
- - proto: tcp
- ports: 22
- group_name: example-vpn
- - proto: tcp
- ports:
- - 80
- - 443
- - 8080-8099
- cidr_ip: 0.0.0.0/0
- # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
- - proto: tcp
- ports:
- - 6379
- - 26379
- group_name:
- - example-vpn
- - example-redis
- - proto: tcp
- ports: 5665
- group_name: example-vpn
- cidr_ip:
- - 172.16.1.0/24
- - 172.16.17.0/24
- cidr_ipv6:
- - 2607:F8B0::/32
- - 64:ff9b::/96
- group_id:
- - sg-edcd9784
- diff: True
-
-- name: "Delete group by its id"
- ec2_group:
- region: eu-west-1
- group_id: sg-33b4ee5b
- state: absent
-'''
-
-RETURN = '''
-group_name:
- description: Security group name
- sample: My Security Group
- type: str
- returned: on create/update
-group_id:
- description: Security group id
- sample: sg-abcd1234
- type: str
- returned: on create/update
-description:
- description: Description of security group
- sample: My Security Group
- type: str
- returned: on create/update
-tags:
- description: Tags associated with the security group
- sample:
- Name: My Security Group
- Purpose: protecting stuff
- type: dict
- returned: on create/update
-vpc_id:
- description: ID of VPC to which the security group belongs
- sample: vpc-abcd1234
- type: str
- returned: on create/update
-ip_permissions:
- description: Inbound rules associated with the security group.
- sample:
- - from_port: 8182
- ip_protocol: tcp
- ip_ranges:
- - cidr_ip: "1.1.1.1/32"
- ipv6_ranges: []
- prefix_list_ids: []
- to_port: 8182
- user_id_group_pairs: []
- type: list
- returned: on create/update
-ip_permissions_egress:
- description: Outbound rules associated with the security group.
- sample:
- - ip_protocol: -1
- ip_ranges:
- - cidr_ip: "0.0.0.0/0"
- ipv6_ranges: []
- prefix_list_ids: []
- user_id_group_pairs: []
- type: list
- returned: on create/update
-owner_id:
- description: AWS Account ID of the security group
- sample: 123456789012
- type: int
- returned: on create/update
-'''
-
-import json
-import re
-import itertools
-from copy import deepcopy
-from time import sleep
-from collections import namedtuple
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.aws.iam import get_aws_account_id
-from ansible.module_utils.aws.waiters import get_waiter
-from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
-from ansible.module_utils.common.network import to_ipv6_subnet, to_subnet
-from ansible.module_utils.compat.ipaddress import ip_network, IPv6Network
-from ansible.module_utils._text import to_text
-from ansible.module_utils.six import string_types
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description'])
-valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix'])
-current_account_id = None
-
-
-def rule_cmp(a, b):
- """Compare rules without descriptions"""
- for prop in ['port_range', 'protocol', 'target', 'target_type']:
- if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol):
- # equal protocols can interchange `(-1, -1)` and `(None, None)`
- if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)):
- continue
- elif getattr(a, prop) != getattr(b, prop):
- return False
- elif getattr(a, prop) != getattr(b, prop):
- return False
- return True
-
-
-def rules_to_permissions(rules):
- return [to_permission(rule) for rule in rules]
-
-
-def to_permission(rule):
- # take a Rule, output the serialized grant
- perm = {
- 'IpProtocol': rule.protocol,
- }
- perm['FromPort'], perm['ToPort'] = rule.port_range
- if rule.target_type == 'ipv4':
- perm['IpRanges'] = [{
- 'CidrIp': rule.target,
- }]
- if rule.description:
- perm['IpRanges'][0]['Description'] = rule.description
- elif rule.target_type == 'ipv6':
- perm['Ipv6Ranges'] = [{
- 'CidrIpv6': rule.target,
- }]
- if rule.description:
- perm['Ipv6Ranges'][0]['Description'] = rule.description
- elif rule.target_type == 'group':
- if isinstance(rule.target, tuple):
- pair = {}
- if rule.target[0]:
- pair['UserId'] = rule.target[0]
- # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
- if rule.target[1]:
- pair['GroupId'] = rule.target[1]
- elif rule.target[2]:
- pair['GroupName'] = rule.target[2]
- perm['UserIdGroupPairs'] = [pair]
- else:
- perm['UserIdGroupPairs'] = [{
- 'GroupId': rule.target
- }]
- if rule.description:
- perm['UserIdGroupPairs'][0]['Description'] = rule.description
- elif rule.target_type == 'ip_prefix':
- perm['PrefixListIds'] = [{
- 'PrefixListId': rule.target,
- }]
- if rule.description:
- perm['PrefixListIds'][0]['Description'] = rule.description
- elif rule.target_type not in valid_targets:
- raise ValueError('Invalid target type for rule {0}'.format(rule))
- return fix_port_and_protocol(perm)
-
-
-def rule_from_group_permission(perm):
- def ports_from_permission(p):
- if 'FromPort' not in p and 'ToPort' not in p:
- return (None, None)
- return (int(perm['FromPort']), int(perm['ToPort']))
-
- # outputs a rule tuple
- for target_key, target_subkey, target_type in [
- ('IpRanges', 'CidrIp', 'ipv4'),
- ('Ipv6Ranges', 'CidrIpv6', 'ipv6'),
- ('PrefixListIds', 'PrefixListId', 'ip_prefix'),
- ]:
- if target_key not in perm:
- continue
- for r in perm[target_key]:
- # there may be several IP ranges here, which is ok
- yield Rule(
- ports_from_permission(perm),
- to_text(perm['IpProtocol']),
- r[target_subkey],
- target_type,
- r.get('Description')
- )
- if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']:
- for pair in perm['UserIdGroupPairs']:
- target = (
- pair.get('UserId', None),
- pair.get('GroupId', None),
- pair.get('GroupName', None),
- )
- if pair.get('UserId', '').startswith('amazon-'):
- # amazon-elb and amazon-prefix rules don't need
- # group-id specified, so remove it when querying
- # from permission
- target = (
- target[0],
- None,
- target[2],
- )
- elif 'VpcPeeringConnectionId' in pair or pair['UserId'] != current_account_id:
- target = (
- pair.get('UserId', None),
- pair.get('GroupId', None),
- pair.get('GroupName', None),
- )
-
- yield Rule(
- ports_from_permission(perm),
- to_text(perm['IpProtocol']),
- target,
- 'group',
- pair.get('Description')
- )
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['InvalidGroup.NotFound'])
-def get_security_groups_with_backoff(connection, **kwargs):
- return connection.describe_security_groups(**kwargs)
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def sg_exists_with_backoff(connection, **kwargs):
- try:
- return connection.describe_security_groups(**kwargs)
- except is_boto3_error_code('InvalidGroup.NotFound'):
- return {'SecurityGroups': []}
-
-
-def deduplicate_rules_args(rules):
- """Returns unique rules"""
- if rules is None:
- return None
- return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
-
-
-def validate_rule(module, rule):
- VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', 'ip_prefix',
- 'group_id', 'group_name', 'group_desc',
- 'proto', 'from_port', 'to_port', 'rule_desc')
- if not isinstance(rule, dict):
- module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
- for k in rule:
- if k not in VALID_PARAMS:
- module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule))
-
- if 'group_id' in rule and 'cidr_ip' in rule:
- module.fail_json(msg='Specify group_id OR cidr_ip, not both')
- elif 'group_name' in rule and 'cidr_ip' in rule:
- module.fail_json(msg='Specify group_name OR cidr_ip, not both')
- elif 'group_id' in rule and 'cidr_ipv6' in rule:
- module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
- elif 'group_name' in rule and 'cidr_ipv6' in rule:
- module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
- elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
- module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
- elif 'group_id' in rule and 'group_name' in rule:
- module.fail_json(msg='Specify group_id OR group_name, not both')
-
-
-def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
- """
- Returns tuple of (target_type, target, group_created) after validating rule params.
-
- rule: Dict describing a rule.
- name: Name of the security group being managed.
- groups: Dict of all available security groups.
-
- AWS accepts an ip range or a security group as target of a rule. This
- function validate the rule specification and return either a non-None
- group_id or a non-None ip range.
- """
- FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)'
- group_id = None
- group_name = None
- target_group_created = False
-
- validate_rule(module, rule)
- if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
- # this is a foreign Security Group. Since you can't fetch it you must create an instance of it
- owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
- group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name)
- groups[group_id] = group_instance
- groups[group_name] = group_instance
- # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
- if group_id and group_name:
- group_name = None
- return 'group', (owner_id, group_id, group_name), False
- elif 'group_id' in rule:
- return 'group', rule['group_id'], False
- elif 'group_name' in rule:
- group_name = rule['group_name']
- if group_name == name:
- group_id = group['GroupId']
- groups[group_id] = group
- groups[group_name] = group
- elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
- # both are VPC groups, this is ok
- group_id = groups[group_name]['GroupId']
- elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
- # both are EC2 classic, this is ok
- group_id = groups[group_name]['GroupId']
- else:
- auto_group = None
- filters = {'group-name': group_name}
- if vpc_id:
- filters['vpc-id'] = vpc_id
- # if we got here, either the target group does not exist, or there
- # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
- # is bad, so we have to create a new SG because no compatible group
- # exists
- if not rule.get('group_desc', '').strip():
- # retry describing the group once
- try:
- auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
- except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError):
- module.fail_json(msg="group %s will be automatically created by rule %s but "
- "no description was provided" % (group_name, rule))
- except ClientError as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e)
- elif not module.check_mode:
- params = dict(GroupName=group_name, Description=rule['group_desc'])
- if vpc_id:
- params['VpcId'] = vpc_id
- try:
- auto_group = client.create_security_group(**params)
- get_waiter(
- client, 'security_group_exists',
- ).wait(
- GroupIds=[auto_group['GroupId']],
- )
- except is_boto3_error_code('InvalidGroup.Duplicate'):
- # The group exists, but didn't show up in any of our describe-security-groups calls
- # Try searching on a filter for the name, and allow a retry window for AWS to update
- # the model on their end.
- try:
- auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
- except IndexError as e:
- module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
- except ClientError as e:
- module.fail_json_aws(
- e,
- msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
- if auto_group is not None:
- group_id = auto_group['GroupId']
- groups[group_id] = auto_group
- groups[group_name] = auto_group
- target_group_created = True
- return 'group', group_id, target_group_created
- elif 'cidr_ip' in rule:
- return 'ipv4', validate_ip(module, rule['cidr_ip']), False
- elif 'cidr_ipv6' in rule:
- return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False
- elif 'ip_prefix' in rule:
- return 'ip_prefix', rule['ip_prefix'], False
-
- module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule)
-
-
-def ports_expand(ports):
- # takes a list of ports and returns a list of (port_from, port_to)
- ports_expanded = []
- for port in ports:
- if not isinstance(port, string_types):
- ports_expanded.append((port,) * 2)
- elif '-' in port:
- ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1)))
- else:
- ports_expanded.append((int(port.strip()),) * 2)
-
- return ports_expanded
-
-
-def rule_expand_ports(rule):
- # takes a rule dict and returns a list of expanded rule dicts
- if 'ports' not in rule:
- if isinstance(rule.get('from_port'), string_types):
- rule['from_port'] = int(rule.get('from_port'))
- if isinstance(rule.get('to_port'), string_types):
- rule['to_port'] = int(rule.get('to_port'))
- return [rule]
-
- ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
-
- rule_expanded = []
- for from_to in ports_expand(ports):
- temp_rule = rule.copy()
- del temp_rule['ports']
- temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to)
- rule_expanded.append(temp_rule)
-
- return rule_expanded
-
-
-def rules_expand_ports(rules):
- # takes a list of rules and expands it based on 'ports'
- if not rules:
- return rules
-
- return [rule for rule_complex in rules
- for rule in rule_expand_ports(rule_complex)]
-
-
-def rule_expand_source(rule, source_type):
- # takes a rule dict and returns a list of expanded rule dicts for specified source_type
- sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
- source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix')
-
- rule_expanded = []
- for source in sources:
- temp_rule = rule.copy()
- for s in source_types_all:
- temp_rule.pop(s, None)
- temp_rule[source_type] = source
- rule_expanded.append(temp_rule)
-
- return rule_expanded
-
-
-def rule_expand_sources(rule):
- # takes a rule dict and returns a list of expanded rule discts
- source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule)
-
- return [r for stype in source_types
- for r in rule_expand_source(rule, stype)]
-
-
-def rules_expand_sources(rules):
- # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
- if not rules:
- return rules
-
- return [rule for rule_complex in rules
- for rule in rule_expand_sources(rule_complex)]
-
-
-def update_rules_description(module, client, rule_type, group_id, ip_permissions):
- if module.check_mode:
- return
- try:
- if rule_type == "in":
- client.update_security_group_rule_descriptions_ingress(GroupId=group_id, IpPermissions=ip_permissions)
- if rule_type == "out":
- client.update_security_group_rule_descriptions_egress(GroupId=group_id, IpPermissions=ip_permissions)
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id)
-
-
-def fix_port_and_protocol(permission):
- for key in ('FromPort', 'ToPort'):
- if key in permission:
- if permission[key] is None:
- del permission[key]
- else:
- permission[key] = int(permission[key])
-
- permission['IpProtocol'] = to_text(permission['IpProtocol'])
-
- return permission
-
-
-def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id):
- if revoke_ingress:
- revoke(client, module, revoke_ingress, group_id, 'in')
- if revoke_egress:
- revoke(client, module, revoke_egress, group_id, 'out')
- return bool(revoke_ingress or revoke_egress)
-
-
-def revoke(client, module, ip_permissions, group_id, rule_type):
- if not module.check_mode:
- try:
- if rule_type == 'in':
- client.revoke_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
- elif rule_type == 'out':
- client.revoke_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
- except (BotoCoreError, ClientError) as e:
- rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
- module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions))
-
-
-def add_new_permissions(client, module, new_ingress, new_egress, group_id):
- if new_ingress:
- authorize(client, module, new_ingress, group_id, 'in')
- if new_egress:
- authorize(client, module, new_egress, group_id, 'out')
- return bool(new_ingress or new_egress)
-
-
-def authorize(client, module, ip_permissions, group_id, rule_type):
- if not module.check_mode:
- try:
- if rule_type == 'in':
- client.authorize_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
- elif rule_type == 'out':
- client.authorize_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
- except (BotoCoreError, ClientError) as e:
- rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
- module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions))
-
-
-def validate_ip(module, cidr_ip):
- split_addr = cidr_ip.split('/')
- if len(split_addr) == 2:
- # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set
- # Get the network bits if IPv4, and validate if IPv6.
- try:
- ip = to_subnet(split_addr[0], split_addr[1])
- if ip != cidr_ip:
- module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
- "check the network mask and make sure that only network bits are set: {1}.".format(
- cidr_ip, ip))
- except ValueError:
- # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here
- try:
- isinstance(ip_network(to_text(cidr_ip)), IPv6Network)
- ip = cidr_ip
- except ValueError:
- # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError
- # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits
- ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1]
- if ip6 != cidr_ip:
- module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, "
- "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6))
- return ip6
- return ip
- return cidr_ip
-
-
-def update_tags(client, module, group_id, current_tags, tags, purge_tags):
- tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
-
- if not module.check_mode:
- if tags_to_delete:
- try:
- client.delete_tags(Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete))
-
- # Add/update tags
- if tags_need_modify:
- try:
- client.create_tags(Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
- except (BotoCoreError, ClientError) as e:
- module.fail_json(e, msg="Unable to add tags {0}".format(tags_need_modify))
-
- return bool(tags_need_modify or tags_to_delete)
-
-
-def update_rule_descriptions(module, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list):
- changed = False
- client = module.client('ec2')
- ingress_needs_desc_update = []
- egress_needs_desc_update = []
-
- for present_rule in present_egress:
- needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
- for r in needs_update:
- named_tuple_egress_list.remove(r)
- egress_needs_desc_update.extend(needs_update)
- for present_rule in present_ingress:
- needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
- for r in needs_update:
- named_tuple_ingress_list.remove(r)
- ingress_needs_desc_update.extend(needs_update)
-
- if ingress_needs_desc_update:
- update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update))
- changed |= True
- if egress_needs_desc_update:
- update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update))
- changed |= True
- return changed
-
-
-def create_security_group(client, module, name, description, vpc_id):
- if not module.check_mode:
- params = dict(GroupName=name, Description=description)
- if vpc_id:
- params['VpcId'] = vpc_id
- try:
- group = client.create_security_group(**params)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to create security group")
- # When a group is created, an egress_rule ALLOW ALL
- # to 0.0.0.0/0 is added automatically but it's not
- # reflected in the object returned by the AWS API
- # call. We re-read the group for getting an updated object
- # amazon sometimes takes a couple seconds to update the security group so wait till it exists
- while True:
- sleep(3)
- group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
- if group.get('VpcId') and not group.get('IpPermissionsEgress'):
- pass
- else:
- break
- return group
- return None
-
-
-def wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_ingress, purge_egress):
- group_id = group['GroupId']
- tries = 6
-
- def await_rules(group, desired_rules, purge, rule_key):
- for i in range(tries):
- current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], []))
- if purge and len(current_rules ^ set(desired_rules)) == 0:
- return group
- elif purge:
- conflicts = current_rules ^ set(desired_rules)
- # For cases where set comparison is equivalent, but invalid port/proto exist
- for a, b in itertools.combinations(conflicts, 2):
- if rule_cmp(a, b):
- conflicts.discard(a)
- conflicts.discard(b)
- if not len(conflicts):
- return group
- elif current_rules.issuperset(desired_rules) and not purge:
- return group
- sleep(10)
- group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
- module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules))
- return group
-
- group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
- if 'VpcId' in group and module.params.get('rules_egress') is not None:
- group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress')
- return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions')
-
-
-def group_exists(client, module, vpc_id, group_id, name):
- params = {'Filters': []}
- if group_id:
- params['GroupIds'] = [group_id]
- if name:
- # Add name to filters rather than params['GroupNames']
- # because params['GroupNames'] only checks the default vpc if no vpc is provided
- params['Filters'].append({'Name': 'group-name', 'Values': [name]})
- if vpc_id:
- params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]})
- # Don't filter by description to maintain backwards compatibility
-
- try:
- security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', [])
- all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', [])
- except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Error in describe_security_groups")
-
- if security_groups:
- groups = dict((group['GroupId'], group) for group in all_groups)
- groups.update(dict((group['GroupName'], group) for group in all_groups))
- if vpc_id:
- vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id)
- groups.update(vpc_wins)
- # maintain backwards compatibility by using the last matching group
- return security_groups[-1], groups
- return None, {}
-
-
-def verify_rules_with_descriptions_permitted(client, module, rules, rules_egress):
- if not hasattr(client, "update_security_group_rule_descriptions_egress"):
- all_rules = rules if rules else [] + rules_egress if rules_egress else []
- if any('rule_desc' in rule for rule in all_rules):
- module.fail_json(msg="Using rule descriptions requires botocore version >= 1.7.2.")
-
-
-def get_diff_final_resource(client, module, security_group):
- def get_account_id(security_group, module):
- try:
- owner_id = security_group.get('owner_id', module.client('sts').get_caller_identity()['Account'])
- except (BotoCoreError, ClientError) as e:
- owner_id = "Unable to determine owner_id: {0}".format(to_text(e))
- return owner_id
-
- def get_final_tags(security_group_tags, specified_tags, purge_tags):
- if specified_tags is None:
- return security_group_tags
- tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags)
- end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete)
- end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete))
- end_result_tags.update(tags_need_modify)
- return end_result_tags
-
- def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules):
- if specified_rules is None:
- return security_group_rules
- if purge_rules:
- final_rules = []
- else:
- final_rules = list(security_group_rules)
- specified_rules = flatten_nested_targets(module, deepcopy(specified_rules))
- for rule in specified_rules:
- format_rule = {
- 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'),
- 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': []
- }
- if rule.get('proto', 'tcp') in ('all', '-1', -1):
- format_rule['ip_protocol'] = '-1'
- format_rule.pop('from_port')
- format_rule.pop('to_port')
- elif rule.get('ports'):
- if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)):
- rule['ports'] = [rule['ports']]
- for port in rule.get('ports'):
- if isinstance(port, string_types) and '-' in port:
- format_rule['from_port'], format_rule['to_port'] = port.split('-')
- else:
- format_rule['from_port'] = format_rule['to_port'] = port
- elif rule.get('from_port') or rule.get('to_port'):
- format_rule['from_port'] = rule.get('from_port', rule.get('to_port'))
- format_rule['to_port'] = rule.get('to_port', rule.get('from_port'))
- for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'):
- if rule.get(source_type):
- rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type)
- if rule.get('rule_desc'):
- format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}]
- else:
- if not isinstance(rule[source_type], list):
- rule[source_type] = [rule[source_type]]
- format_rule[rule_key] = [{source_type: target} for target in rule[source_type]]
- if rule.get('group_id') or rule.get('group_name'):
- rule_sg = camel_dict_to_snake_dict(group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0])
- format_rule['user_id_group_pairs'] = [{
- 'description': rule_sg.get('description', rule_sg.get('group_desc')),
- 'group_id': rule_sg.get('group_id', rule.get('group_id')),
- 'group_name': rule_sg.get('group_name', rule.get('group_name')),
- 'peering_status': rule_sg.get('peering_status'),
- 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)),
- 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']),
- 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id')
- }]
- for k, v in list(format_rule['user_id_group_pairs'][0].items()):
- if v is None:
- format_rule['user_id_group_pairs'][0].pop(k)
- final_rules.append(format_rule)
- # Order final rules consistently
- final_rules.sort(key=get_ip_permissions_sort_key)
- return final_rules
- security_group_ingress = security_group.get('ip_permissions', [])
- specified_ingress = module.params['rules']
- purge_ingress = module.params['purge_rules']
- security_group_egress = security_group.get('ip_permissions_egress', [])
- specified_egress = module.params['rules_egress']
- purge_egress = module.params['purge_rules_egress']
- return {
- 'description': module.params['description'],
- 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'),
- 'group_name': security_group.get('group_name', module.params['name']),
- 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress),
- 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress),
- 'owner_id': get_account_id(security_group, module),
- 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']),
- 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])}
-
-
-def flatten_nested_targets(module, rules):
- def _flatten(targets):
- for target in targets:
- if isinstance(target, list):
- for t in _flatten(target):
- yield t
- elif isinstance(target, string_types):
- yield target
-
- if rules is not None:
- for rule in rules:
- target_list_type = None
- if isinstance(rule.get('cidr_ip'), list):
- target_list_type = 'cidr_ip'
- elif isinstance(rule.get('cidr_ipv6'), list):
- target_list_type = 'cidr_ipv6'
- if target_list_type is not None:
- rule[target_list_type] = list(_flatten(rule[target_list_type]))
- return rules
-
-
-def get_rule_sort_key(dicts):
- if dicts.get('cidr_ip'):
- return dicts.get('cidr_ip')
- elif dicts.get('cidr_ipv6'):
- return dicts.get('cidr_ipv6')
- elif dicts.get('prefix_list_id'):
- return dicts.get('prefix_list_id')
- elif dicts.get('group_id'):
- return dicts.get('group_id')
- return None
-
-
-def get_ip_permissions_sort_key(rule):
- if rule.get('ip_ranges'):
- rule.get('ip_ranges').sort(key=get_rule_sort_key)
- return rule.get('ip_ranges')[0]['cidr_ip']
- elif rule.get('ipv6_ranges'):
- rule.get('ipv6_ranges').sort(key=get_rule_sort_key)
- return rule.get('ipv6_ranges')[0]['cidr_ipv6']
- elif rule.get('prefix_list_ids'):
- rule.get('prefix_list_ids').sort(key=get_rule_sort_key)
- return rule.get('prefix_list_ids')[0]['prefix_list_id']
- elif rule.get('user_id_group_pairs'):
- rule.get('user_id_group_pairs').sort(key=get_rule_sort_key)
- return rule.get('user_id_group_pairs')[0]['group_id']
- return None
-
-
-def main():
- argument_spec = dict(
- name=dict(),
- group_id=dict(),
- description=dict(),
- vpc_id=dict(),
- rules=dict(type='list'),
- rules_egress=dict(type='list'),
- state=dict(default='present', type='str', choices=['present', 'absent']),
- purge_rules=dict(default=True, required=False, type='bool'),
- purge_rules_egress=dict(default=True, required=False, type='bool'),
- tags=dict(required=False, type='dict', aliases=['resource_tags']),
- purge_tags=dict(default=True, required=False, type='bool')
- )
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- required_one_of=[['name', 'group_id']],
- required_if=[['state', 'present', ['name']]],
- )
-
- name = module.params['name']
- group_id = module.params['group_id']
- description = module.params['description']
- vpc_id = module.params['vpc_id']
- rules = flatten_nested_targets(module, deepcopy(module.params['rules']))
- rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress']))
- rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules)))
- rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress)))
- state = module.params.get('state')
- purge_rules = module.params['purge_rules']
- purge_rules_egress = module.params['purge_rules_egress']
- tags = module.params['tags']
- purge_tags = module.params['purge_tags']
-
- if state == 'present' and not description:
- module.fail_json(msg='Must provide description when state is present.')
-
- changed = False
- client = module.client('ec2')
-
- verify_rules_with_descriptions_permitted(client, module, rules, rules_egress)
- group, groups = group_exists(client, module, vpc_id, group_id, name)
- group_created_new = not bool(group)
-
- global current_account_id
- current_account_id = get_aws_account_id(module)
-
- before = {}
- after = {}
-
- # Ensure requested group is absent
- if state == 'absent':
- if group:
- # found a match, delete it
- before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
- before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
- try:
- if not module.check_mode:
- client.delete_security_group(GroupId=group['GroupId'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group)
- else:
- group = None
- changed = True
- else:
- # no match found, no changes required
- pass
-
- # Ensure requested group is present
- elif state == 'present':
- if group:
- # existing group
- before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
- before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
- if group['Description'] != description:
- module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
- "and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
- else:
- # no match found, create it
- group = create_security_group(client, module, name, description, vpc_id)
- changed = True
-
- if tags is not None and group is not None:
- current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
- changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags)
-
- if group:
- named_tuple_ingress_list = []
- named_tuple_egress_list = []
- current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], [])
- current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], [])
-
- for new_rules, rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list),
- (rules_egress, 'out', named_tuple_egress_list)]:
- if new_rules is None:
- continue
- for rule in new_rules:
- target_type, target, target_group_created = get_target_from_rule(
- module, client, rule, name, group, groups, vpc_id)
- changed |= target_group_created
-
- if rule.get('proto', 'tcp') in ('all', '-1', -1):
- rule['proto'] = '-1'
- rule['from_port'] = None
- rule['to_port'] = None
- try:
- int(rule.get('proto', 'tcp'))
- rule['proto'] = to_text(rule.get('proto', 'tcp'))
- rule['from_port'] = None
- rule['to_port'] = None
- except ValueError:
- # rule does not use numeric protocol spec
- pass
-
- named_tuple_rule_list.append(
- Rule(
- port_range=(rule['from_port'], rule['to_port']),
- protocol=to_text(rule.get('proto', 'tcp')),
- target=target, target_type=target_type,
- description=rule.get('rule_desc'),
- )
- )
-
- # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
- new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
- new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))]
-
- if module.params.get('rules_egress') is None and 'VpcId' in group:
- # when no egress rules are specified and we're in a VPC,
- # we add in a default allow all out rule, which was the
- # default behavior before egress rules were added
- rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
- if rule in current_egress:
- named_tuple_egress_list.append(rule)
- if rule not in current_egress:
- current_egress.append(rule)
-
- # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
- present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress)))
- present_egress = list(set(named_tuple_egress_list).union(set(current_egress)))
-
- if purge_rules:
- revoke_ingress = []
- for p in present_ingress:
- if not any([rule_cmp(p, b) for b in named_tuple_ingress_list]):
- revoke_ingress.append(to_permission(p))
- else:
- revoke_ingress = []
- if purge_rules_egress and module.params.get('rules_egress') is not None:
- if module.params.get('rules_egress') is []:
- revoke_egress = [
- to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list)
- if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
- ]
- else:
- revoke_egress = []
- for p in present_egress:
- if not any([rule_cmp(p, b) for b in named_tuple_egress_list]):
- revoke_egress.append(to_permission(p))
- else:
- revoke_egress = []
-
- # named_tuple_ingress_list and named_tuple_egress_list got updated by
- # method update_rule_descriptions, deep copy these two lists to new
- # variables for the record of the 'desired' ingress and egress sg permissions
- desired_ingress = deepcopy(named_tuple_ingress_list)
- desired_egress = deepcopy(named_tuple_egress_list)
-
- changed |= update_rule_descriptions(module, group['GroupId'], present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list)
-
- # Revoke old rules
- changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId'])
- rule_msg = 'Revoking {0}, and egress {1}'.format(revoke_ingress, revoke_egress)
-
- new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
- new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress))
- new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress))
- # Authorize new rules
- changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId'])
-
- if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None:
- # A new group with no rules provided is already being awaited.
- # When it is created we wait for the default egress rule to be added by AWS
- security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
- elif changed and not module.check_mode:
- # keep pulling until current security group rules match the desired ingress and egress rules
- security_group = wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress)
- else:
- security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
- security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags'])
- security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []))
-
- else:
- security_group = {'group_id': None}
-
- if module._diff:
- if module.params['state'] == 'present':
- after = get_diff_final_resource(client, module, security_group)
- if before.get('ip_permissions'):
- before['ip_permissions'].sort(key=get_ip_permissions_sort_key)
-
- security_group['diff'] = [{'before': before, 'after': after}]
-
- module.exit_json(changed=changed, **security_group)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_group_info.py b/lib/ansible/modules/cloud/amazon/ec2_group_info.py
deleted file mode 100644
index 733f7d0622..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_group_info.py
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_group_info
-short_description: Gather information about ec2 security groups in AWS.
-description:
- - Gather information about ec2 security groups in AWS.
- - This module was called C(ec2_group_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.3"
-requirements: [ boto3 ]
-author:
-- Henrique Rodrigues (@Sodki)
-options:
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
- U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for
- possible filters. Filter names and values are case sensitive. You can also use underscores (_)
- instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
- required: false
- default: {}
- type: dict
-notes:
- - By default, the module will return all security groups. To limit results use the appropriate filters.
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all security groups
-- ec2_group_info:
-
-# Gather information about all security groups in a specific VPC
-- ec2_group_info:
- filters:
- vpc-id: vpc-12345678
-
-# Gather information about all security groups in a specific VPC
-- ec2_group_info:
- filters:
- vpc-id: vpc-12345678
-
-# Gather information about a security group
-- ec2_group_info:
- filters:
- group-name: example-1
-
-# Gather information about a security group by id
-- ec2_group_info:
- filters:
- group-id: sg-12345678
-
-# Gather information about a security group with multiple filters, also mixing the use of underscores as filter keys
-- ec2_group_info:
- filters:
- group_id: sg-12345678
- vpc-id: vpc-12345678
-
-# Gather information about various security groups
-- ec2_group_info:
- filters:
- group-name:
- - example-1
- - example-2
- - example-3
-
-# Gather information about any security group with a tag key Name and value Example.
-# The quotes around 'tag:name' are important because of the colon in the value
-- ec2_group_info:
- filters:
- "tag:Name": Example
-'''
-
-RETURN = '''
-security_groups:
- description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group.
- type: list
- returned: always
- sample:
-'''
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict)
-
-
-def main():
- argument_spec = dict(
- filters=dict(default={}, type='dict')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._name == 'ec2_group_facts':
- module.deprecate("The 'ec2_group_facts' module has been renamed to 'ec2_group_info'", version='2.13')
-
- connection = module.client('ec2')
-
- # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
- sanitized_filters = module.params.get("filters")
- for key in list(sanitized_filters):
- if not key.startswith("tag:"):
- sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key)
-
- try:
- security_groups = connection.describe_security_groups(
- Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
- )
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to describe security groups')
-
- snaked_security_groups = []
- for security_group in security_groups['SecurityGroups']:
- # Modify boto3 tags list to be ansible friendly dict
- # but don't camel case tags
- security_group = camel_dict_to_snake_dict(security_group)
- security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', {}), tag_name_key_name='key', tag_value_key_name='value')
- snaked_security_groups.append(security_group)
-
- module.exit_json(security_groups=snaked_security_groups)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_key.py b/lib/ansible/modules/cloud/amazon/ec2_key.py
deleted file mode 100644
index de67af8bc0..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_key.py
+++ /dev/null
@@ -1,271 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_key
-version_added: "1.5"
-short_description: create or delete an ec2 key pair
-description:
- - create or delete an ec2 key pair.
-options:
- name:
- description:
- - Name of the key pair.
- required: true
- type: str
- key_material:
- description:
- - Public key material.
- required: false
- type: str
- force:
- description:
- - Force overwrite of already existing key pair if key has changed.
- required: false
- default: true
- type: bool
- version_added: "2.3"
- state:
- description:
- - create or delete keypair
- required: false
- choices: [ present, absent ]
- default: 'present'
- type: str
- wait:
- description:
- - This option has no effect since version 2.5 and will be removed in 2.14.
- version_added: "1.6"
- type: bool
- wait_timeout:
- description:
- - This option has no effect since version 2.5 and will be removed in 2.14.
- version_added: "1.6"
- type: int
- required: false
-
-extends_documentation_fragment:
- - aws
- - ec2
-requirements: [ boto3 ]
-author:
- - "Vincent Viallet (@zbal)"
- - "Prasad Katti (@prasadkatti)"
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: create a new ec2 key pair, returns generated private key
- ec2_key:
- name: my_keypair
-
-- name: create key pair using provided key_material
- ec2_key:
- name: my_keypair
- key_material: 'ssh-rsa AAAAxyz...== me@example.com'
-
-- name: create key pair using key_material obtained using 'file' lookup plugin
- ec2_key:
- name: my_keypair
- key_material: "{{ lookup('file', '/path/to/public_key/id_rsa.pub') }}"
-
-# try creating a key pair with the name of an already existing keypair
-# but don't overwrite it even if the key is different (force=false)
-- name: try creating a key pair with name of an already existing keypair
- ec2_key:
- name: my_existing_keypair
- key_material: 'ssh-rsa AAAAxyz...== me@example.com'
- force: false
-
-- name: remove key pair by name
- ec2_key:
- name: my_keypair
- state: absent
-'''
-
-RETURN = '''
-changed:
- description: whether a keypair was created/deleted
- returned: always
- type: bool
- sample: true
-msg:
- description: short message describing the action taken
- returned: always
- type: str
- sample: key pair created
-key:
- description: details of the keypair (this is set to null when state is absent)
- returned: always
- type: complex
- contains:
- fingerprint:
- description: fingerprint of the key
- returned: when state is present
- type: str
- sample: 'b0:22:49:61:d9:44:9d:0c:7e:ac:8a:32:93:21:6c:e8:fb:59:62:43'
- name:
- description: name of the keypair
- returned: when state is present
- type: str
- sample: my_keypair
- private_key:
- description: private key of a newly created keypair
- returned: when a new keypair is created by AWS (key_material is not provided)
- type: str
- sample: '-----BEGIN RSA PRIVATE KEY-----
- MIIEowIBAAKC...
- -----END RSA PRIVATE KEY-----'
-'''
-
-import uuid
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils._text import to_bytes
-
-try:
- from botocore.exceptions import ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def extract_key_data(key):
-
- data = {
- 'name': key['KeyName'],
- 'fingerprint': key['KeyFingerprint']
- }
- if 'KeyMaterial' in key:
- data['private_key'] = key['KeyMaterial']
- return data
-
-
-def get_key_fingerprint(module, ec2_client, key_material):
- '''
- EC2's fingerprints are non-trivial to generate, so push this key
- to a temporary name and make ec2 calculate the fingerprint for us.
- http://blog.jbrowne.com/?p=23
- https://forums.aws.amazon.com/thread.jspa?messageID=352828
- '''
-
- # find an unused name
- name_in_use = True
- while name_in_use:
- random_name = "ansible-" + str(uuid.uuid4())
- name_in_use = find_key_pair(module, ec2_client, random_name)
-
- temp_key = import_key_pair(module, ec2_client, random_name, key_material)
- delete_key_pair(module, ec2_client, random_name, finish_task=False)
- return temp_key['KeyFingerprint']
-
-
-def find_key_pair(module, ec2_client, name):
-
- try:
- key = ec2_client.describe_key_pairs(KeyNames=[name])['KeyPairs'][0]
- except ClientError as err:
- if err.response['Error']['Code'] == "InvalidKeyPair.NotFound":
- return None
- module.fail_json_aws(err, msg="error finding keypair")
- except IndexError:
- key = None
- return key
-
-
-def create_key_pair(module, ec2_client, name, key_material, force):
-
- key = find_key_pair(module, ec2_client, name)
- if key:
- if key_material and force:
- if not module.check_mode:
- new_fingerprint = get_key_fingerprint(module, ec2_client, key_material)
- if key['KeyFingerprint'] != new_fingerprint:
- delete_key_pair(module, ec2_client, name, finish_task=False)
- key = import_key_pair(module, ec2_client, name, key_material)
- key_data = extract_key_data(key)
- module.exit_json(changed=True, key=key_data, msg="key pair updated")
- else:
- # Assume a change will be made in check mode since a comparison can't be done
- module.exit_json(changed=True, key=extract_key_data(key), msg="key pair updated")
- key_data = extract_key_data(key)
- module.exit_json(changed=False, key=key_data, msg="key pair already exists")
- else:
- # key doesn't exist, create it now
- key_data = None
- if not module.check_mode:
- if key_material:
- key = import_key_pair(module, ec2_client, name, key_material)
- else:
- try:
- key = ec2_client.create_key_pair(KeyName=name)
- except ClientError as err:
- module.fail_json_aws(err, msg="error creating key")
- key_data = extract_key_data(key)
- module.exit_json(changed=True, key=key_data, msg="key pair created")
-
-
-def import_key_pair(module, ec2_client, name, key_material):
-
- try:
- key = ec2_client.import_key_pair(KeyName=name, PublicKeyMaterial=to_bytes(key_material))
- except ClientError as err:
- module.fail_json_aws(err, msg="error importing key")
- return key
-
-
-def delete_key_pair(module, ec2_client, name, finish_task=True):
-
- key = find_key_pair(module, ec2_client, name)
- if key:
- if not module.check_mode:
- try:
- ec2_client.delete_key_pair(KeyName=name)
- except ClientError as err:
- module.fail_json_aws(err, msg="error deleting key")
- if not finish_task:
- return
- module.exit_json(changed=True, key=None, msg="key deleted")
- module.exit_json(key=None, msg="key did not exist")
-
-
-def main():
-
- argument_spec = dict(
- name=dict(required=True),
- key_material=dict(),
- force=dict(type='bool', default=True),
- state=dict(default='present', choices=['present', 'absent']),
- wait=dict(type='bool', removed_in_version='2.14'),
- wait_timeout=dict(type='int', removed_in_version='2.14')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
-
- ec2_client = module.client('ec2')
-
- name = module.params['name']
- state = module.params.get('state')
- key_material = module.params.get('key_material')
- force = module.params.get('force')
-
- if state == 'absent':
- delete_key_pair(module, ec2_client, name)
- elif state == 'present':
- create_key_pair(module, ec2_client, name, key_material, force)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_metadata_facts.py b/lib/ansible/modules/cloud/amazon/ec2_metadata_facts.py
deleted file mode 100644
index 2eee43811b..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_metadata_facts.py
+++ /dev/null
@@ -1,564 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_metadata_facts
-short_description: Gathers facts (instance metadata) about remote hosts within ec2
-version_added: "1.0"
-author:
- - Silviu Dicu (@silviud)
- - Vinay Dandekar (@roadmapper)
-description:
- - This module fetches data from the instance metadata endpoint in ec2 as per
- U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html).
- - The module must be called from within the EC2 instance itself.
-notes:
- - Parameters to filter on ec2_metadata_facts may be added later.
-'''
-
-EXAMPLES = '''
-# Gather EC2 metadata facts
-- ec2_metadata_facts:
-
-- debug:
- msg: "This instance is a t1.micro"
- when: ansible_ec2_instance_type == "t1.micro"
-'''
-
-RETURN = '''
-ansible_facts:
- description: Dictionary of new facts representing discovered properties of the EC2 instance.
- returned: changed
- type: complex
- contains:
- ansible_ec2_ami_id:
- description: The AMI ID used to launch the instance.
- type: str
- sample: "ami-XXXXXXXX"
- ansible_ec2_ami_launch_index:
- description:
- - If you started more than one instance at the same time, this value indicates the order in which the instance was launched.
- - The value of the first instance launched is 0.
- type: str
- sample: "0"
- ansible_ec2_ami_manifest_path:
- description:
- - The path to the AMI manifest file in Amazon S3.
- - If you used an Amazon EBS-backed AMI to launch the instance, the returned result is unknown.
- type: str
- sample: "(unknown)"
- ansible_ec2_ancestor_ami_ids:
- description:
- - The AMI IDs of any instances that were rebundled to create this AMI.
- - This value will only exist if the AMI manifest file contained an ancestor-amis key.
- type: str
- sample: "(unknown)"
- ansible_ec2_block_device_mapping_ami:
- description: The virtual device that contains the root/boot file system.
- type: str
- sample: "/dev/sda1"
- ansible_ec2_block_device_mapping_ebsN:
- description:
- - The virtual devices associated with Amazon EBS volumes, if any are present.
- - Amazon EBS volumes are only available in metadata if they were present at launch time or when the instance was last started.
- - The N indicates the index of the Amazon EBS volume (such as ebs1 or ebs2).
- type: str
- sample: "/dev/xvdb"
- ansible_ec2_block_device_mapping_ephemeralN:
- description: The virtual devices associated with ephemeral devices, if any are present. The N indicates the index of the ephemeral volume.
- type: str
- sample: "/dev/xvdc"
- ansible_ec2_block_device_mapping_root:
- description:
- - The virtual devices or partitions associated with the root devices, or partitions on the virtual device,
- where the root (/ or C) file system is associated with the given instance.
- type: str
- sample: "/dev/sda1"
- ansible_ec2_block_device_mapping_swap:
- description: The virtual devices associated with swap. Not always present.
- type: str
- sample: "/dev/sda2"
- ansible_ec2_fws_instance_monitoring:
- description: "Value showing whether the customer has enabled detailed one-minute monitoring in CloudWatch."
- type: str
- sample: "enabled"
- ansible_ec2_hostname:
- description:
- - The private IPv4 DNS hostname of the instance.
- - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
- type: str
- sample: "ip-10-0-0-1.ec2.internal"
- ansible_ec2_iam_info:
- description:
- - If there is an IAM role associated with the instance, contains information about the last time the instance profile was updated,
- including the instance's LastUpdated date, InstanceProfileArn, and InstanceProfileId. Otherwise, not present.
- type: complex
- sample: ""
- contains:
- LastUpdated:
- description: The last time which InstanceProfile is associated with the Instance changed.
- type: str
- InstanceProfileArn:
- description: The ARN of the InstanceProfile associated with the Instance.
- type: str
- InstanceProfileId:
- description: The Id of the InstanceProfile associated with the Instance.
- type: str
- ansible_ec2_iam_info_instanceprofilearn:
- description: The IAM instance profile ARN.
- type: str
- sample: "arn:aws:iam::<account id>:instance-profile/<role name>"
- ansible_ec2_iam_info_instanceprofileid:
- description: IAM instance profile ID.
- type: str
- sample: ""
- ansible_ec2_iam_info_lastupdated:
- description: IAM info last updated time.
- type: str
- sample: "2017-05-12T02:42:27Z"
- ansible_ec2_iam_instance_profile_role:
- description: IAM instance role.
- type: str
- sample: "role_name"
- ansible_ec2_iam_security_credentials_<role name>:
- description:
- - If there is an IAM role associated with the instance, role-name is the name of the role,
- and role-name contains the temporary security credentials associated with the role. Otherwise, not present.
- type: str
- sample: ""
- ansible_ec2_iam_security_credentials_<role name>_accesskeyid:
- description: IAM role access key ID.
- type: str
- sample: ""
- ansible_ec2_iam_security_credentials_<role name>_code:
- description: IAM code.
- type: str
- sample: "Success"
- ansible_ec2_iam_security_credentials_<role name>_expiration:
- description: IAM role credentials expiration time.
- type: str
- sample: "2017-05-12T09:11:41Z"
- ansible_ec2_iam_security_credentials_<role name>_lastupdated:
- description: IAM role last updated time.
- type: str
- sample: "2017-05-12T02:40:44Z"
- ansible_ec2_iam_security_credentials_<role name>_secretaccesskey:
- description: IAM role secret access key.
- type: str
- sample: ""
- ansible_ec2_iam_security_credentials_<role name>_token:
- description: IAM role token.
- type: str
- sample: ""
- ansible_ec2_iam_security_credentials_<role name>_type:
- description: IAM role type.
- type: str
- sample: "AWS-HMAC"
- ansible_ec2_instance_action:
- description: Notifies the instance that it should reboot in preparation for bundling.
- type: str
- sample: "none"
- ansible_ec2_instance_id:
- description: The ID of this instance.
- type: str
- sample: "i-XXXXXXXXXXXXXXXXX"
- ansible_ec2_instance_identity_document:
- description: JSON containing instance attributes, such as instance-id, private IP address, etc.
- type: str
- sample: ""
- ansible_ec2_instance_identity_document_accountid:
- description: ""
- type: str
- sample: "012345678901"
- ansible_ec2_instance_identity_document_architecture:
- description: Instance system architecture.
- type: str
- sample: "x86_64"
- ansible_ec2_instance_identity_document_availabilityzone:
- description: The Availability Zone in which the instance launched.
- type: str
- sample: "us-east-1a"
- ansible_ec2_instance_identity_document_billingproducts:
- description: Billing products for this instance.
- type: str
- sample: ""
- ansible_ec2_instance_identity_document_devpayproductcodes:
- description: Product codes for the launched AMI.
- type: str
- sample: ""
- ansible_ec2_instance_identity_document_imageid:
- description: The AMI ID used to launch the instance.
- type: str
- sample: "ami-01234567"
- ansible_ec2_instance_identity_document_instanceid:
- description: The ID of this instance.
- type: str
- sample: "i-0123456789abcdef0"
- ansible_ec2_instance_identity_document_instancetype:
- description: The type of instance.
- type: str
- sample: "m4.large"
- ansible_ec2_instance_identity_document_kernelid:
- description: The ID of the kernel launched with this instance, if applicable.
- type: str
- sample: ""
- ansible_ec2_instance_identity_document_pendingtime:
- description: The instance pending time.
- type: str
- sample: "2017-05-11T20:51:20Z"
- ansible_ec2_instance_identity_document_privateip:
- description:
- - The private IPv4 address of the instance.
- - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
- type: str
- sample: "10.0.0.1"
- ansible_ec2_instance_identity_document_ramdiskid:
- description: The ID of the RAM disk specified at launch time, if applicable.
- type: str
- sample: ""
- ansible_ec2_instance_identity_document_region:
- description: The Region in which the instance launched.
- type: str
- sample: "us-east-1"
- ansible_ec2_instance_identity_document_version:
- description: Identity document version.
- type: str
- sample: "2010-08-31"
- ansible_ec2_instance_identity_pkcs7:
- description: Used to verify the document's authenticity and content against the signature.
- type: str
- sample: ""
- ansible_ec2_instance_identity_rsa2048:
- description: Used to verify the document's authenticity and content against the signature.
- type: str
- sample: ""
- ansible_ec2_instance_identity_signature:
- description: Data that can be used by other parties to verify its origin and authenticity.
- type: str
- sample: ""
- ansible_ec2_instance_type:
- description: The type of instance.
- type: str
- sample: "m4.large"
- ansible_ec2_local_hostname:
- description:
- - The private IPv4 DNS hostname of the instance.
- - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
- type: str
- sample: "ip-10-0-0-1.ec2.internal"
- ansible_ec2_local_ipv4:
- description:
- - The private IPv4 address of the instance.
- - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
- type: str
- sample: "10.0.0.1"
- ansible_ec2_mac:
- description:
- - The instance's media access control (MAC) address.
- - In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
- type: str
- sample: "00:11:22:33:44:55"
- ansible_ec2_metrics_vhostmd:
- description: Metrics.
- type: str
- sample: ""
- ansible_ec2_network_interfaces_macs_<mac address>_device_number:
- description:
- - The unique device number associated with that interface. The device number corresponds to the device name;
- for example, a device-number of 2 is for the eth2 device.
- - This category corresponds to the DeviceIndex and device-index fields that are used by the Amazon EC2 API and the EC2 commands for the AWS CLI.
- type: str
- sample: "0"
- ansible_ec2_network_interfaces_macs_<mac address>_interface_id:
- description: The elastic network interface ID.
- type: str
- sample: "eni-12345678"
- ansible_ec2_network_interfaces_macs_<mac address>_ipv4_associations_<ip address>:
- description: The private IPv4 addresses that are associated with each public-ip address and assigned to that interface.
- type: str
- sample: ""
- ansible_ec2_network_interfaces_macs_<mac address>_ipv6s:
- description: The IPv6 addresses associated with the interface. Returned only for instances launched into a VPC.
- type: str
- sample: ""
- ansible_ec2_network_interfaces_macs_<mac address>_local_hostname:
- description: The interface's local hostname.
- type: str
- sample: ""
- ansible_ec2_network_interfaces_macs_<mac address>_local_ipv4s:
- description: The private IPv4 addresses associated with the interface.
- type: str
- sample: ""
- ansible_ec2_network_interfaces_macs_<mac address>_mac:
- description: The instance's MAC address.
- type: str
- sample: "00:11:22:33:44:55"
- ansible_ec2_network_interfaces_macs_<mac address>_owner_id:
- description:
- - The ID of the owner of the network interface.
- - In multiple-interface environments, an interface can be attached by a third party, such as Elastic Load Balancing.
- - Traffic on an interface is always billed to the interface owner.
- type: str
- sample: "01234567890"
- ansible_ec2_network_interfaces_macs_<mac address>_public_hostname:
- description:
- - The interface's public DNS (IPv4). If the instance is in a VPC,
- this category is only returned if the enableDnsHostnames attribute is set to true.
- type: str
- sample: "ec2-1-2-3-4.compute-1.amazonaws.com"
- ansible_ec2_network_interfaces_macs_<mac address>_public_ipv4s:
- description: The Elastic IP addresses associated with the interface. There may be multiple IPv4 addresses on an instance.
- type: str
- sample: "1.2.3.4"
- ansible_ec2_network_interfaces_macs_<mac address>_security_group_ids:
- description: The IDs of the security groups to which the network interface belongs. Returned only for instances launched into a VPC.
- type: str
- sample: "sg-01234567,sg-01234568"
- ansible_ec2_network_interfaces_macs_<mac address>_security_groups:
- description: Security groups to which the network interface belongs. Returned only for instances launched into a VPC.
- type: str
- sample: "secgroup1,secgroup2"
- ansible_ec2_network_interfaces_macs_<mac address>_subnet_id:
- description: The ID of the subnet in which the interface resides. Returned only for instances launched into a VPC.
- type: str
- sample: "subnet-01234567"
- ansible_ec2_network_interfaces_macs_<mac address>_subnet_ipv4_cidr_block:
- description: The IPv4 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC.
- type: str
- sample: "10.0.1.0/24"
- ansible_ec2_network_interfaces_macs_<mac address>_subnet_ipv6_cidr_blocks:
- description: The IPv6 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC.
- type: str
- sample: ""
- ansible_ec2_network_interfaces_macs_<mac address>_vpc_id:
- description: The ID of the VPC in which the interface resides. Returned only for instances launched into a VPC.
- type: str
- sample: "vpc-0123456"
- ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv4_cidr_block:
- description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
- type: str
- sample: "10.0.0.0/16"
- ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv4_cidr_blocks:
- description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
- type: str
- sample: "10.0.0.0/16"
- ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv6_cidr_blocks:
- description: The IPv6 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
- type: str
- sample: ""
- ansible_ec2_placement_availability_zone:
- description: The Availability Zone in which the instance launched.
- type: str
- sample: "us-east-1a"
- ansible_ec2_placement_region:
- description: The Region in which the instance launched.
- type: str
- sample: "us-east-1"
- ansible_ec2_product_codes:
- description: Product codes associated with the instance, if any.
- type: str
- sample: "aw0evgkw8e5c1q413zgy5pjce"
- ansible_ec2_profile:
- description: EC2 instance hardware profile.
- type: str
- sample: "default-hvm"
- ansible_ec2_public_hostname:
- description:
- - The instance's public DNS. If the instance is in a VPC, this category is only returned if the enableDnsHostnames attribute is set to true.
- type: str
- sample: "ec2-1-2-3-4.compute-1.amazonaws.com"
- ansible_ec2_public_ipv4:
- description: The public IPv4 address. If an Elastic IP address is associated with the instance, the value returned is the Elastic IP address.
- type: str
- sample: "1.2.3.4"
- ansible_ec2_public_key:
- description: Public key. Only available if supplied at instance launch time.
- type: str
- sample: ""
- ansible_ec2_ramdisk_id:
- description: The ID of the RAM disk specified at launch time, if applicable.
- type: str
- sample: ""
- ansible_ec2_reservation_id:
- description: The ID of the reservation.
- type: str
- sample: "r-0123456789abcdef0"
- ansible_ec2_security_groups:
- description:
- - The names of the security groups applied to the instance. After launch, you can only change the security groups of instances running in a VPC.
- - Such changes are reflected here and in network/interfaces/macs/mac/security-groups.
- type: str
- sample: "securitygroup1,securitygroup2"
- ansible_ec2_services_domain:
- description: The domain for AWS resources for the region; for example, amazonaws.com for us-east-1.
- type: str
- sample: "amazonaws.com"
- ansible_ec2_services_partition:
- description:
- - The partition that the resource is in. For standard AWS regions, the partition is aws.
- - If you have resources in other partitions, the partition is aws-partitionname.
- - For example, the partition for resources in the China (Beijing) region is aws-cn.
- type: str
- sample: "aws"
- ansible_ec2_spot_termination_time:
- description:
- - The approximate time, in UTC, that the operating system for your Spot instance will receive the shutdown signal.
- - This item is present and contains a time value only if the Spot instance has been marked for termination by Amazon EC2.
- - The termination-time item is not set to a time if you terminated the Spot instance yourself.
- type: str
- sample: "2015-01-05T18:02:00Z"
- ansible_ec2_user_data:
- description: The instance user data.
- type: str
- sample: "#!/bin/bash"
-'''
-
-import json
-import re
-import socket
-import time
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_text
-from ansible.module_utils.urls import fetch_url
-from ansible.module_utils.six.moves.urllib.parse import quote
-
-socket.setdefaulttimeout(5)
-
-
-class Ec2Metadata(object):
- ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/'
- ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key'
- ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/'
- ec2_dynamicdata_uri = 'http://169.254.169.254/latest/dynamic/'
-
- def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None, ec2_dynamicdata_uri=None):
- self.module = module
- self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri
- self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri
- self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri
- self.uri_dynamic = ec2_dynamicdata_uri or self.ec2_dynamicdata_uri
- self._data = {}
- self._prefix = 'ansible_ec2_%s'
-
- def _fetch(self, url):
- encoded_url = quote(url, safe='%/:=&?~#+!$,;\'@()*[]')
- response, info = fetch_url(self.module, encoded_url, force=True)
-
- if info.get('status') not in (200, 404):
- time.sleep(3)
- # request went bad, retry once then raise
- self.module.warn('Retrying query to metadata service. First attempt failed: {0}'.format(info['msg']))
- response, info = fetch_url(self.module, encoded_url, force=True)
- if info.get('status') not in (200, 404):
- # fail out now
- self.module.fail_json(msg='Failed to retrieve metadata from AWS: {0}'.format(info['msg']), response=info)
- if response:
- data = response.read()
- else:
- data = None
- return to_text(data)
-
- def _mangle_fields(self, fields, uri, filter_patterns=None):
- filter_patterns = ['public-keys-0'] if filter_patterns is None else filter_patterns
-
- new_fields = {}
- for key, value in fields.items():
- split_fields = key[len(uri):].split('/')
- # Parse out the IAM role name (which is _not_ the same as the instance profile name)
- if len(split_fields) == 3 and split_fields[0:2] == ['iam', 'security-credentials'] and ':' not in split_fields[2]:
- new_fields[self._prefix % "iam-instance-profile-role"] = split_fields[2]
- if len(split_fields) > 1 and split_fields[1]:
- new_key = "-".join(split_fields)
- new_fields[self._prefix % new_key] = value
- else:
- new_key = "".join(split_fields)
- new_fields[self._prefix % new_key] = value
- for pattern in filter_patterns:
- for key in dict(new_fields):
- match = re.search(pattern, key)
- if match:
- new_fields.pop(key)
- return new_fields
-
- def fetch(self, uri, recurse=True):
- raw_subfields = self._fetch(uri)
- if not raw_subfields:
- return
- subfields = raw_subfields.split('\n')
- for field in subfields:
- if field.endswith('/') and recurse:
- self.fetch(uri + field)
- if uri.endswith('/'):
- new_uri = uri + field
- else:
- new_uri = uri + '/' + field
- if new_uri not in self._data and not new_uri.endswith('/'):
- content = self._fetch(new_uri)
- if field == 'security-groups' or field == 'security-group-ids':
- sg_fields = ",".join(content.split('\n'))
- self._data['%s' % (new_uri)] = sg_fields
- else:
- try:
- dict = json.loads(content)
- self._data['%s' % (new_uri)] = content
- for (key, value) in dict.items():
- self._data['%s:%s' % (new_uri, key.lower())] = value
- except Exception:
- self._data['%s' % (new_uri)] = content # not a stringified JSON string
-
- def fix_invalid_varnames(self, data):
- """Change ':'' and '-' to '_' to ensure valid template variable names"""
- new_data = data.copy()
- for key, value in data.items():
- if ':' in key or '-' in key:
- newkey = re.sub(':|-', '_', key)
- new_data[newkey] = value
- del new_data[key]
-
- return new_data
-
- def run(self):
- self.fetch(self.uri_meta) # populate _data with metadata
- data = self._mangle_fields(self._data, self.uri_meta)
- data[self._prefix % 'user-data'] = self._fetch(self.uri_user)
- data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh)
-
- self._data = {} # clear out metadata in _data
- self.fetch(self.uri_dynamic) # populate _data with dynamic data
- dyndata = self._mangle_fields(self._data, self.uri_dynamic)
- data.update(dyndata)
- data = self.fix_invalid_varnames(data)
-
- # Maintain old key for backwards compatibility
- if 'ansible_ec2_instance_identity_document_region' in data:
- data['ansible_ec2_placement_region'] = data['ansible_ec2_instance_identity_document_region']
- return data
-
-
-def main():
- module = AnsibleModule(
- argument_spec={},
- supports_check_mode=True,
- )
-
- ec2_metadata_facts = Ec2Metadata(module).run()
- ec2_metadata_facts_result = dict(changed=False, ansible_facts=ec2_metadata_facts)
-
- module.exit_json(**ec2_metadata_facts_result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_snapshot.py b/lib/ansible/modules/cloud/amazon/ec2_snapshot.py
deleted file mode 100644
index d3c76ee831..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_snapshot.py
+++ /dev/null
@@ -1,336 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_snapshot
-short_description: Creates a snapshot from an existing volume
-description:
- - Creates an EC2 snapshot from an existing EBS volume.
-version_added: "1.5"
-options:
- volume_id:
- description:
- - Volume from which to take the snapshot.
- required: false
- type: str
- description:
- description:
- - Description to be applied to the snapshot.
- required: false
- type: str
- instance_id:
- description:
- - Instance that has the required volume to snapshot mounted.
- required: false
- type: str
- device_name:
- description:
- - Device name of a mounted volume to be snapshotted.
- required: false
- type: str
- snapshot_tags:
- description:
- - A dictionary of tags to add to the snapshot.
- type: dict
- required: false
- version_added: "1.6"
- wait:
- description:
- - Wait for the snapshot to be ready.
- type: bool
- required: false
- default: yes
- version_added: "1.5.1"
- wait_timeout:
- description:
- - How long before wait gives up, in seconds.
- - Specify 0 to wait forever.
- required: false
- default: 0
- version_added: "1.5.1"
- type: int
- state:
- description:
- - Whether to add or create a snapshot.
- required: false
- default: present
- choices: ['absent', 'present']
- version_added: "1.9"
- type: str
- snapshot_id:
- description:
- - Snapshot id to remove.
- required: false
- version_added: "1.9"
- type: str
- last_snapshot_min_age:
- description:
- - If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
- required: false
- default: 0
- version_added: "2.0"
- type: int
-
-author: "Will Thames (@willthames)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Simple snapshot of volume using volume_id
-- ec2_snapshot:
- volume_id: vol-abcdef12
- description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
-
-# Snapshot of volume mounted on device_name attached to instance_id
-- ec2_snapshot:
- instance_id: i-12345678
- device_name: /dev/sdb1
- description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
-
-# Snapshot of volume with tagging
-- ec2_snapshot:
- instance_id: i-12345678
- device_name: /dev/sdb1
- snapshot_tags:
- frequency: hourly
- source: /data
-
-# Remove a snapshot
-- local_action:
- module: ec2_snapshot
- snapshot_id: snap-abcd1234
- state: absent
-
-# Create a snapshot only if the most recent one is older than 1 hour
-- local_action:
- module: ec2_snapshot
- volume_id: vol-abcdef12
- last_snapshot_min_age: 60
-'''
-
-RETURN = '''
-snapshot_id:
- description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
- type: str
- returned: always
- sample: snap-01234567
-tags:
- description: Any tags assigned to the snapshot.
- type: dict
- returned: always
- sample: "{ 'Name': 'instance-name' }"
-volume_id:
- description: The ID of the volume that was used to create the snapshot.
- type: str
- returned: always
- sample: vol-01234567
-volume_size:
- description: The size of the volume, in GiB.
- type: int
- returned: always
- sample: 8
-'''
-
-import time
-import datetime
-
-try:
- import boto.exception
-except ImportError:
- pass # Taken care of by ec2.HAS_BOTO
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
-
-
-# Find the most recent snapshot
-def _get_snapshot_starttime(snap):
- return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.%fZ')
-
-
-def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
- """
- Gets the most recently created snapshot and optionally filters the result
- if the snapshot is too old
- :param snapshots: list of snapshots to search
- :param max_snapshot_age_secs: filter the result if its older than this
- :param now: simulate time -- used for unit testing
- :return:
- """
- if len(snapshots) == 0:
- return None
-
- if not now:
- now = datetime.datetime.utcnow()
-
- youngest_snapshot = max(snapshots, key=_get_snapshot_starttime)
-
- # See if the snapshot is younger that the given max age
- snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.%fZ')
- snapshot_age = now - snapshot_start
-
- if max_snapshot_age_secs is not None:
- if snapshot_age.total_seconds() > max_snapshot_age_secs:
- return None
-
- return youngest_snapshot
-
-
-def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep):
- """
- Wait for the snapshot to be created
- :param snapshot:
- :param wait_timeout_secs: fail this step after this many seconds
- :param sleep_func:
- :return:
- """
- time_waited = 0
- snapshot.update()
- while snapshot.status != 'completed':
- sleep_func(3)
- snapshot.update()
- time_waited += 3
- if wait_timeout_secs and time_waited > wait_timeout_secs:
- return False
- return True
-
-
-def create_snapshot(module, ec2, state=None, description=None, wait=None,
- wait_timeout=None, volume_id=None, instance_id=None,
- snapshot_id=None, device_name=None, snapshot_tags=None,
- last_snapshot_min_age=None):
- snapshot = None
- changed = False
-
- required = [volume_id, snapshot_id, instance_id]
- if required.count(None) != len(required) - 1: # only 1 must be set
- module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
- if instance_id and not device_name or device_name and not instance_id:
- module.fail_json(msg='Instance ID and device name must both be specified')
-
- if instance_id:
- try:
- volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
-
- if not volumes:
- module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
-
- volume_id = volumes[0].id
-
- if state == 'absent':
- if not snapshot_id:
- module.fail_json(msg='snapshot_id must be set when state is absent')
- try:
- ec2.delete_snapshot(snapshot_id)
- except boto.exception.BotoServerError as e:
- # exception is raised if snapshot does not exist
- if e.error_code == 'InvalidSnapshot.NotFound':
- module.exit_json(changed=False)
- else:
- module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
-
- # successful delete
- module.exit_json(changed=True)
-
- if last_snapshot_min_age > 0:
- try:
- current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
-
- last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
- snapshot = _get_most_recent_snapshot(current_snapshots,
- max_snapshot_age_secs=last_snapshot_min_age)
- try:
- # Create a new snapshot if we didn't find an existing one to use
- if snapshot is None:
- snapshot = ec2.create_snapshot(volume_id, description=description)
- changed = True
- if wait:
- if not _create_with_wait(snapshot, wait_timeout):
- module.fail_json(msg='Timed out while creating snapshot.')
- if snapshot_tags:
- for k, v in snapshot_tags.items():
- snapshot.add_tag(k, v)
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
-
- module.exit_json(changed=changed,
- snapshot_id=snapshot.id,
- volume_id=snapshot.volume_id,
- volume_size=snapshot.volume_size,
- tags=snapshot.tags.copy())
-
-
-def create_snapshot_ansible_module():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- volume_id=dict(),
- description=dict(),
- instance_id=dict(),
- snapshot_id=dict(),
- device_name=dict(),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=0),
- last_snapshot_min_age=dict(type='int', default=0),
- snapshot_tags=dict(type='dict', default=dict()),
- state=dict(choices=['absent', 'present'], default='present'),
- )
- )
- module = AnsibleModule(argument_spec=argument_spec)
- return module
-
-
-def main():
- module = create_snapshot_ansible_module()
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- volume_id = module.params.get('volume_id')
- snapshot_id = module.params.get('snapshot_id')
- description = module.params.get('description')
- instance_id = module.params.get('instance_id')
- device_name = module.params.get('device_name')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- last_snapshot_min_age = module.params.get('last_snapshot_min_age')
- snapshot_tags = module.params.get('snapshot_tags')
- state = module.params.get('state')
-
- ec2 = ec2_connect(module)
-
- create_snapshot(
- module=module,
- state=state,
- description=description,
- wait=wait,
- wait_timeout=wait_timeout,
- ec2=ec2,
- volume_id=volume_id,
- instance_id=instance_id,
- snapshot_id=snapshot_id,
- device_name=device_name,
- snapshot_tags=snapshot_tags,
- last_snapshot_min_age=last_snapshot_min_age
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_snapshot_info.py b/lib/ansible/modules/cloud/amazon/ec2_snapshot_info.py
deleted file mode 100644
index b48ad4efa9..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_snapshot_info.py
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_snapshot_info
-short_description: Gather information about ec2 volume snapshots in AWS
-description:
- - Gather information about ec2 volume snapshots in AWS.
- - This module was called C(ec2_snapshot_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.1"
-requirements: [ boto3 ]
-author: "Rob White (@wimnat)"
-options:
- snapshot_ids:
- description:
- - If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned.
- required: false
- default: []
- type: list
- elements: str
- owner_ids:
- description:
- - If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have
- access are returned.
- required: false
- default: []
- type: list
- elements: str
- restorable_by_user_ids:
- description:
- - If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are
- returned.
- required: false
- default: []
- type: list
- elements: str
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
- U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter
- names and values are case sensitive.
- required: false
- type: dict
- default: {}
-notes:
- - By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by
- the account use the filter 'owner-id'.
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all snapshots, including public ones
-- ec2_snapshot_info:
-
-# Gather information about all snapshots owned by the account 0123456789
-- ec2_snapshot_info:
- filters:
- owner-id: 0123456789
-
-# Or alternatively...
-- ec2_snapshot_info:
- owner_ids:
- - 0123456789
-
-# Gather information about a particular snapshot using ID
-- ec2_snapshot_info:
- filters:
- snapshot-id: snap-00112233
-
-# Or alternatively...
-- ec2_snapshot_info:
- snapshot_ids:
- - snap-00112233
-
-# Gather information about any snapshot with a tag key Name and value Example
-- ec2_snapshot_info:
- filters:
- "tag:Name": Example
-
-# Gather information about any snapshot with an error status
-- ec2_snapshot_info:
- filters:
- status: error
-
-'''
-
-RETURN = '''
-snapshot_id:
- description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
- type: str
- returned: always
- sample: snap-01234567
-volume_id:
- description: The ID of the volume that was used to create the snapshot.
- type: str
- returned: always
- sample: vol-01234567
-state:
- description: The snapshot state (completed, pending or error).
- type: str
- returned: always
- sample: completed
-state_message:
- description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper
- AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the
- error occurred.
- type: str
- returned: always
- sample:
-start_time:
- description: The time stamp when the snapshot was initiated.
- type: str
- returned: always
- sample: "2015-02-12T02:14:02+00:00"
-progress:
- description: The progress of the snapshot, as a percentage.
- type: str
- returned: always
- sample: "100%"
-owner_id:
- description: The AWS account ID of the EBS snapshot owner.
- type: str
- returned: always
- sample: "099720109477"
-description:
- description: The description for the snapshot.
- type: str
- returned: always
- sample: "My important backup"
-volume_size:
- description: The size of the volume, in GiB.
- type: int
- returned: always
- sample: 8
-owner_alias:
- description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.
- type: str
- returned: always
- sample: "033440102211"
-tags:
- description: Any tags assigned to the snapshot.
- type: dict
- returned: always
- sample: "{ 'my_tag_key': 'my_tag_value' }"
-encrypted:
- description: Indicates whether the snapshot is encrypted.
- type: bool
- returned: always
- sample: "True"
-kms_key_id:
- description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \
- protect the volume encryption key for the parent volume.
- type: str
- returned: always
- sample: "74c9742a-a1b2-45cb-b3fe-abcdef123456"
-data_encryption_key_id:
- description: The data encryption key identifier for the snapshot. This value is a unique identifier that \
- corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy.
- type: str
- returned: always
- sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456"
-
-'''
-
-try:
- import boto3
- from botocore.exceptions import ClientError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
- boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
- ec2_argument_spec, get_aws_connection_info)
-
-
-def list_ec2_snapshots(connection, module):
-
- snapshot_ids = module.params.get("snapshot_ids")
- owner_ids = [str(owner_id) for owner_id in module.params.get("owner_ids")]
- restorable_by_user_ids = [str(user_id) for user_id in module.params.get("restorable_by_user_ids")]
- filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
-
- try:
- snapshots = connection.describe_snapshots(SnapshotIds=snapshot_ids, OwnerIds=owner_ids, RestorableByUserIds=restorable_by_user_ids, Filters=filters)
- except ClientError as e:
- if e.response['Error']['Code'] == "InvalidSnapshot.NotFound":
- if len(snapshot_ids) > 1:
- module.warn("Some of your snapshots may exist, but %s" % str(e))
- snapshots = {'Snapshots': []}
- else:
- module.fail_json(msg="Failed to describe snapshots: %s" % str(e))
-
- # Turn the boto3 result in to ansible_friendly_snaked_names
- snaked_snapshots = []
- for snapshot in snapshots['Snapshots']:
- snaked_snapshots.append(camel_dict_to_snake_dict(snapshot))
-
- # Turn the boto3 result in to ansible friendly tag dictionary
- for snapshot in snaked_snapshots:
- if 'tags' in snapshot:
- snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'], 'key', 'value')
-
- module.exit_json(snapshots=snaked_snapshots)
-
-
-def main():
-
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- snapshot_ids=dict(default=[], type='list'),
- owner_ids=dict(default=[], type='list'),
- restorable_by_user_ids=dict(default=[], type='list'),
- filters=dict(default={}, type='dict')
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- mutually_exclusive=[
- ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters']
- ]
- )
- if module._name == 'ec2_snapshot_facts':
- module.deprecate("The 'ec2_snapshot_facts' module has been renamed to 'ec2_snapshot_info'", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
-
- if region:
- connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
- else:
- module.fail_json(msg="region must be specified")
-
- list_ec2_snapshots(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_tag.py b/lib/ansible/modules/cloud/amazon/ec2_tag.py
deleted file mode 100644
index 9e10b5eda6..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_tag.py
+++ /dev/null
@@ -1,201 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_tag
-short_description: create and remove tags on ec2 resources
-description:
- - Creates, modifies and removes tags for any EC2 resource.
- - Resources are referenced by their resource id (for example, an instance being i-XXXXXXX, a VPC being vpc-XXXXXXX).
- - This module is designed to be used with complex args (tags), see the examples.
-version_added: "1.3"
-requirements: [ "boto3", "botocore" ]
-options:
- resource:
- description:
- - The EC2 resource id.
- required: true
- type: str
- state:
- description:
- - Whether the tags should be present or absent on the resource.
- - The use of I(state=list) to interrogate the tags of an instance has been
- deprecated and will be removed in Anisble 2.14. The 'list'
- functionality has been moved to a dedicated module M(ec2_tag_info).
- default: present
- choices: ['present', 'absent', 'list']
- type: str
- tags:
- description:
- - A dictionary of tags to add or remove from the resource.
- - If the value provided for a key is not set and I(state=absent), the tag will be removed regardless of its current value.
- - Required when I(state=present) or I(state=absent).
- type: dict
- purge_tags:
- description:
- - Whether unspecified tags should be removed from the resource.
- - Note that when combined with I(state=absent), specified tags with non-matching values are not purged.
- type: bool
- default: false
- version_added: '2.7'
-
-author:
- - Lester Wade (@lwade)
- - Paul Arthur (@flowerysong)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Ensure tags are present on a resource
- ec2_tag:
- region: eu-west-1
- resource: vol-XXXXXX
- state: present
- tags:
- Name: ubervol
- env: prod
-
-- name: Ensure all volumes are tagged
- ec2_tag:
- region: eu-west-1
- resource: '{{ item.id }}'
- state: present
- tags:
- Name: dbserver
- Env: production
- loop: '{{ ec2_vol.volumes }}'
-
-- name: Remove the Env tag
- ec2_tag:
- region: eu-west-1
- resource: i-xxxxxxxxxxxxxxxxx
- tags:
- Env:
- state: absent
-
-- name: Remove the Env tag if it's currently 'development'
- ec2_tag:
- region: eu-west-1
- resource: i-xxxxxxxxxxxxxxxxx
- tags:
- Env: development
- state: absent
-
-- name: Remove all tags except for Name from an instance
- ec2_tag:
- region: eu-west-1
- resource: i-xxxxxxxxxxxxxxxxx
- tags:
- Name: ''
- state: absent
- purge_tags: true
-'''
-
-RETURN = '''
-tags:
- description: A dict containing the tags on the resource
- returned: always
- type: dict
-added_tags:
- description: A dict of tags that were added to the resource
- returned: If tags were added
- type: dict
-removed_tags:
- description: A dict of tags that were removed from the resource
- returned: If tags were removed
- type: dict
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except Exception:
- pass # Handled by AnsibleAWSModule
-
-
-def get_tags(ec2, module, resource):
- filters = [{'Name': 'resource-id', 'Values': [resource]}]
- try:
- return boto3_tag_list_to_ansible_dict(ec2.describe_tags(Filters=filters)['Tags'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource))
-
-
-def main():
- argument_spec = dict(
- resource=dict(required=True),
- tags=dict(type='dict'),
- purge_tags=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent', 'list']),
- )
- required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])]
-
- module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
-
- resource = module.params['resource']
- tags = module.params['tags']
- state = module.params['state']
- purge_tags = module.params['purge_tags']
-
- result = {'changed': False}
-
- ec2 = module.client('ec2')
-
- current_tags = get_tags(ec2, module, resource)
-
- if state == 'list':
- module.deprecate(
- 'Using the "list" state has been deprecated. Please use the ec2_tag_info module instead', version='2.14')
- module.exit_json(changed=False, tags=current_tags)
-
- add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
-
- remove_tags = {}
- if state == 'absent':
- for key in tags:
- if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
- remove_tags[key] = current_tags[key]
-
- for key in remove:
- remove_tags[key] = current_tags[key]
-
- if remove_tags:
- result['changed'] = True
- result['removed_tags'] = remove_tags
- if not module.check_mode:
- try:
- ec2.delete_tags(Resources=[resource], Tags=ansible_dict_to_boto3_tag_list(remove_tags))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
-
- if state == 'present' and add_tags:
- result['changed'] = True
- result['added_tags'] = add_tags
- current_tags.update(add_tags)
- if not module.check_mode:
- try:
- ec2.create_tags(Resources=[resource], Tags=ansible_dict_to_boto3_tag_list(add_tags))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
-
- result['tags'] = get_tags(ec2, module, resource)
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_tag_info.py b/lib/ansible/modules/cloud/amazon/ec2_tag_info.py
deleted file mode 100644
index 6e607763b8..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_tag_info.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_tag_info
-short_description: list tags on ec2 resources
-description:
- - Lists tags for any EC2 resource.
- - Resources are referenced by their resource id (e.g. an instance being i-XXXXXXX, a vpc being vpc-XXXXXX).
- - Resource tags can be managed using the M(ec2_tag) module.
-version_added: "2.10"
-requirements: [ "boto3", "botocore" ]
-options:
- resource:
- description:
- - The EC2 resource id (for example i-XXXXXX or vpc-XXXXXX).
- required: true
- type: str
-
-author:
- - Mark Chappell (@tremble)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Retrieve all tags on an instance
- ec2_tag_info:
- region: eu-west-1
- resource: i-xxxxxxxxxxxxxxxxx
- register: instance_tags
-
-- name: Retrieve all tags on a VPC
- ec2_tag_info:
- region: eu-west-1
- resource: vpc-xxxxxxxxxxxxxxxxx
- register: vpc_tags
-'''
-
-RETURN = '''
-tags:
- description: A dict containing the tags on the resource
- returned: always
- type: dict
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, AWSRetry
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except Exception:
- pass # Handled by AnsibleAWSModule
-
-
-@AWSRetry.jittered_backoff()
-def get_tags(ec2, module, resource):
- filters = [{'Name': 'resource-id', 'Values': [resource]}]
- return boto3_tag_list_to_ansible_dict(ec2.describe_tags(Filters=filters)['Tags'])
-
-
-def main():
- argument_spec = dict(
- resource=dict(required=True),
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- resource = module.params['resource']
- ec2 = module.client('ec2')
-
- try:
- current_tags = get_tags(ec2, module, resource)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource))
-
- module.exit_json(changed=False, tags=current_tags)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vol.py b/lib/ansible/modules/cloud/amazon/ec2_vol.py
deleted file mode 100644
index aeff60d06e..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vol.py
+++ /dev/null
@@ -1,632 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vol
-short_description: Create and attach a volume, return volume id and device map
-description:
- - Creates an EBS volume and optionally attaches it to an instance.
- - If both I(instance) and I(name) are given and the instance has a device at the device name, then no volume is created and no attachment is made.
- - This module has a dependency on python-boto.
-version_added: "1.1"
-options:
- instance:
- description:
- - Instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
- type: str
- name:
- description:
- - Volume Name tag if you wish to attach an existing volume (requires instance)
- version_added: "1.6"
- type: str
- id:
- description:
- - Volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
- version_added: "1.6"
- type: str
- volume_size:
- description:
- - Size of volume (in GiB) to create.
- type: int
- volume_type:
- description:
- - Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS), st1 (Throughput Optimized HDD), sc1 (Cold HDD).
- "Standard" is the old EBS default and continues to remain the Ansible default for backwards compatibility.
- default: standard
- version_added: "1.9"
- choices: ['standard', 'gp2', 'io1', 'st1', 'sc1']
- type: str
- iops:
- description:
- - The provisioned IOPs you want to associate with this volume (integer).
- - By default AWS will set this to 100.
- version_added: "1.3"
- type: int
- encrypted:
- description:
- - Enable encryption at rest for this volume.
- default: false
- type: bool
- version_added: "1.8"
- kms_key_id:
- description:
- - Specify the id of the KMS key to use.
- version_added: "2.3"
- type: str
- device_name:
- description:
- - Device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
- type: str
- delete_on_termination:
- description:
- - When set to C(true), the volume will be deleted upon instance termination.
- type: bool
- default: false
- version_added: "2.1"
- zone:
- description:
- - Zone in which to create the volume, if unset uses the zone the instance is in (if set).
- aliases: ['availability_zone', 'aws_zone', 'ec2_zone']
- type: str
- snapshot:
- description:
- - Snapshot ID on which to base the volume.
- version_added: "1.5"
- type: str
- validate_certs:
- description:
- - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
- type: bool
- default: true
- version_added: "1.5"
- state:
- description:
- - Whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
- default: present
- choices: ['absent', 'present', 'list']
- version_added: "1.6"
- type: str
- tags:
- description:
- - tag:value pairs to add to the volume after creation.
- default: {}
- version_added: "2.3"
- type: dict
-author: "Lester Wade (@lwade)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Simple attachment action
-- ec2_vol:
- instance: XXXXXX
- volume_size: 5
- device_name: sdd
-
-# Example using custom iops params
-- ec2_vol:
- instance: XXXXXX
- volume_size: 5
- iops: 100
- device_name: sdd
-
-# Example using snapshot id
-- ec2_vol:
- instance: XXXXXX
- snapshot: "{{ snapshot }}"
-
-# Playbook example combined with instance launch
-- ec2:
- keypair: "{{ keypair }}"
- image: "{{ image }}"
- wait: yes
- count: 3
- register: ec2
-- ec2_vol:
- instance: "{{ item.id }}"
- volume_size: 5
- loop: "{{ ec2.instances }}"
- register: ec2_vol
-
-# Example: Launch an instance and then add a volume if not already attached
-# * Volume will be created with the given name if not already created.
-# * Nothing will happen if the volume is already attached.
-# * Requires Ansible 2.0
-
-- ec2:
- keypair: "{{ keypair }}"
- image: "{{ image }}"
- zone: YYYYYY
- id: my_instance
- wait: yes
- count: 1
- register: ec2
-
-- ec2_vol:
- instance: "{{ item.id }}"
- name: my_existing_volume_Name_tag
- device_name: /dev/xvdf
- loop: "{{ ec2.instances }}"
- register: ec2_vol
-
-# Remove a volume
-- ec2_vol:
- id: vol-XXXXXXXX
- state: absent
-
-# Detach a volume (since 1.9)
-- ec2_vol:
- id: vol-XXXXXXXX
- instance: None
-
-# List volumes for an instance
-- ec2_vol:
- instance: i-XXXXXX
- state: list
-
-# Create new volume using SSD storage
-- ec2_vol:
- instance: XXXXXX
- volume_size: 50
- volume_type: gp2
- device_name: /dev/xvdf
-
-# Attach an existing volume to instance. The volume will be deleted upon instance termination.
-- ec2_vol:
- instance: XXXXXX
- id: XXXXXX
- device_name: /dev/sdf
- delete_on_termination: yes
-'''
-
-RETURN = '''
-device:
- description: device name of attached volume
- returned: when success
- type: str
- sample: "/def/sdf"
-volume_id:
- description: the id of volume
- returned: when success
- type: str
- sample: "vol-35b333d9"
-volume_type:
- description: the volume type
- returned: when success
- type: str
- sample: "standard"
-volume:
- description: a dictionary containing detailed attributes of the volume
- returned: when success
- type: str
- sample: {
- "attachment_set": {
- "attach_time": "2015-10-23T00:22:29.000Z",
- "deleteOnTermination": "false",
- "device": "/dev/sdf",
- "instance_id": "i-8356263c",
- "status": "attached"
- },
- "create_time": "2015-10-21T14:36:08.870Z",
- "encrypted": false,
- "id": "vol-35b333d9",
- "iops": null,
- "size": 1,
- "snapshot_id": "",
- "status": "in-use",
- "tags": {
- "env": "dev"
- },
- "type": "standard",
- "zone": "us-east-1b"
- }
-'''
-
-import time
-
-from distutils.version import LooseVersion
-
-try:
- import boto
- import boto.ec2
- import boto.exception
- from boto.exception import BotoServerError
- from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
-except ImportError:
- pass # Taken care of by ec2.HAS_BOTO
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (HAS_BOTO, AnsibleAWSError, connect_to_aws, ec2_argument_spec,
- get_aws_connection_info)
-
-
-def get_volume(module, ec2):
- name = module.params.get('name')
- id = module.params.get('id')
- zone = module.params.get('zone')
- filters = {}
- volume_ids = None
-
- # If no name or id supplied, just try volume creation based on module parameters
- if id is None and name is None:
- return None
-
- if zone:
- filters['availability_zone'] = zone
- if name:
- filters['tag:Name'] = name
- if id:
- volume_ids = [id]
- try:
- vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
-
- if not vols:
- if id:
- msg = "Could not find the volume with id: %s" % id
- if name:
- msg += (" and name: %s" % name)
- module.fail_json(msg=msg)
- else:
- return None
-
- if len(vols) > 1:
- module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
- return vols[0]
-
-
-def get_volumes(module, ec2):
-
- instance = module.params.get('instance')
-
- try:
- if not instance:
- vols = ec2.get_all_volumes()
- else:
- vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
- return vols
-
-
-def delete_volume(module, ec2):
- volume_id = module.params['id']
- try:
- ec2.delete_volume(volume_id)
- module.exit_json(changed=True)
- except boto.exception.EC2ResponseError as ec2_error:
- if ec2_error.code == 'InvalidVolume.NotFound':
- module.exit_json(changed=False)
- module.fail_json(msg=ec2_error.message)
-
-
-def boto_supports_volume_encryption():
- """
- Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
-
- Returns:
- True if boto library has the named param as an argument on the request_spot_instances method, else False
- """
- return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
-
-
-def boto_supports_kms_key_id():
- """
- Check if Boto library supports kms_key_ids (added in 2.39.0)
-
- Returns:
- True if version is equal to or higher then the version needed, else False
- """
- return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0')
-
-
-def create_volume(module, ec2, zone):
- changed = False
- name = module.params.get('name')
- iops = module.params.get('iops')
- encrypted = module.params.get('encrypted')
- kms_key_id = module.params.get('kms_key_id')
- volume_size = module.params.get('volume_size')
- volume_type = module.params.get('volume_type')
- snapshot = module.params.get('snapshot')
- tags = module.params.get('tags')
- # If custom iops is defined we use volume_type "io1" rather than the default of "standard"
- if iops:
- volume_type = 'io1'
-
- volume = get_volume(module, ec2)
- if volume is None:
- try:
- if boto_supports_volume_encryption():
- if kms_key_id is not None:
- volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted, kms_key_id)
- else:
- volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
- changed = True
- else:
- volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
- changed = True
-
- while volume.status != 'available':
- time.sleep(3)
- volume.update()
-
- if name:
- tags["Name"] = name
- if tags:
- ec2.create_tags([volume.id], tags)
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
-
- return volume, changed
-
-
-def attach_volume(module, ec2, volume, instance):
-
- device_name = module.params.get('device_name')
- delete_on_termination = module.params.get('delete_on_termination')
- changed = False
-
- # If device_name isn't set, make a choice based on best practices here:
- # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
-
- # In future this needs to be more dynamic but combining block device mapping best practices
- # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
-
- # Use password data attribute to tell whether the instance is Windows or Linux
- if device_name is None:
- try:
- if not ec2.get_password_data(instance.id):
- device_name = '/dev/sdf'
- else:
- device_name = '/dev/xvdf'
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
-
- if volume.attachment_state() is not None:
- adata = volume.attach_data
- if adata.instance_id != instance.id:
- module.fail_json(msg="Volume %s is already attached to another instance: %s"
- % (volume.id, adata.instance_id))
- else:
- # Volume is already attached to right instance
- changed = modify_dot_attribute(module, ec2, instance, device_name)
- else:
- try:
- volume.attach(instance.id, device_name)
- while volume.attachment_state() != 'attached':
- time.sleep(3)
- volume.update()
- changed = True
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
-
- modify_dot_attribute(module, ec2, instance, device_name)
-
- return volume, changed
-
-
-def modify_dot_attribute(module, ec2, instance, device_name):
- """ Modify delete_on_termination attribute """
-
- delete_on_termination = module.params.get('delete_on_termination')
- changed = False
-
- try:
- instance.update()
- dot = instance.block_device_mapping[device_name].delete_on_termination
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
-
- if delete_on_termination != dot:
- try:
- bdt = BlockDeviceType(delete_on_termination=delete_on_termination)
- bdm = BlockDeviceMapping()
- bdm[device_name] = bdt
-
- ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm)
-
- while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination:
- time.sleep(3)
- instance.update()
- changed = True
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
-
- return changed
-
-
-def detach_volume(module, ec2, volume):
-
- changed = False
-
- if volume.attachment_state() is not None:
- adata = volume.attach_data
- volume.detach()
- while volume.attachment_state() is not None:
- time.sleep(3)
- volume.update()
- changed = True
-
- return volume, changed
-
-
-def get_volume_info(volume, state):
-
- # If we're just listing volumes then do nothing, else get the latest update for the volume
- if state != 'list':
- volume.update()
-
- volume_info = {}
- attachment = volume.attach_data
-
- volume_info = {
- 'create_time': volume.create_time,
- 'encrypted': volume.encrypted,
- 'id': volume.id,
- 'iops': volume.iops,
- 'size': volume.size,
- 'snapshot_id': volume.snapshot_id,
- 'status': volume.status,
- 'type': volume.type,
- 'zone': volume.zone,
- 'attachment_set': {
- 'attach_time': attachment.attach_time,
- 'device': attachment.device,
- 'instance_id': attachment.instance_id,
- 'status': attachment.status
- },
- 'tags': volume.tags
- }
- if hasattr(attachment, 'deleteOnTermination'):
- volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination
-
- return volume_info
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- instance=dict(),
- id=dict(),
- name=dict(),
- volume_size=dict(type='int'),
- volume_type=dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
- iops=dict(type='int'),
- encrypted=dict(type='bool', default=False),
- kms_key_id=dict(),
- device_name=dict(),
- delete_on_termination=dict(type='bool', default=False),
- zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
- snapshot=dict(),
- state=dict(choices=['absent', 'present', 'list'], default='present'),
- tags=dict(type='dict', default={})
- )
- )
- module = AnsibleModule(argument_spec=argument_spec)
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- id = module.params.get('id')
- name = module.params.get('name')
- instance = module.params.get('instance')
- volume_size = module.params.get('volume_size')
- encrypted = module.params.get('encrypted')
- kms_key_id = module.params.get('kms_key_id')
- device_name = module.params.get('device_name')
- zone = module.params.get('zone')
- snapshot = module.params.get('snapshot')
- state = module.params.get('state')
- tags = module.params.get('tags')
-
- # Ensure we have the zone or can get the zone
- if instance is None and zone is None and state == 'present':
- module.fail_json(msg="You must specify either instance or zone")
-
- # Set volume detach flag
- if instance == 'None' or instance == '':
- instance = None
- detach_vol_flag = True
- else:
- detach_vol_flag = False
-
- # Set changed flag
- changed = False
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
-
- if region:
- try:
- ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- module.fail_json(msg=str(e))
- else:
- module.fail_json(msg="region must be specified")
-
- if state == 'list':
- returned_volumes = []
- vols = get_volumes(module, ec2)
-
- for v in vols:
- attachment = v.attach_data
-
- returned_volumes.append(get_volume_info(v, state))
-
- module.exit_json(changed=False, volumes=returned_volumes)
-
- if encrypted and not boto_supports_volume_encryption():
- module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
-
- if kms_key_id is not None and not boto_supports_kms_key_id():
- module.fail_json(msg="You must use boto >= v2.39.0 to use kms_key_id")
-
- # Here we need to get the zone info for the instance. This covers situation where
- # instance is specified but zone isn't.
- # Useful for playbooks chaining instance launch with volume create + attach and where the
- # zone doesn't matter to the user.
- inst = None
- if instance:
- try:
- reservation = ec2.get_all_instances(instance_ids=instance)
- except BotoServerError as e:
- module.fail_json(msg=e.message)
- inst = reservation[0].instances[0]
- zone = inst.placement
-
- # Check if there is a volume already mounted there.
- if device_name:
- if device_name in inst.block_device_mapping:
- module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
- volume_id=inst.block_device_mapping[device_name].volume_id,
- device=device_name,
- changed=False)
-
- # Delaying the checks until after the instance check allows us to get volume ids for existing volumes
- # without needing to pass an unused volume_size
- if not volume_size and not (id or name or snapshot):
- module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
-
- if volume_size and id:
- module.fail_json(msg="Cannot specify volume_size together with id")
-
- if state == 'present':
- volume, changed = create_volume(module, ec2, zone)
- if detach_vol_flag:
- volume, changed = detach_volume(module, ec2, volume)
- elif inst is not None:
- volume, changed = attach_volume(module, ec2, volume, inst)
-
- # Add device, volume_id and volume_type parameters separately to maintain backward compatibility
- volume_info = get_volume_info(volume, state)
-
- # deleteOnTermination is not correctly reflected on attachment
- if module.params.get('delete_on_termination'):
- for attempt in range(0, 8):
- if volume_info['attachment_set'].get('deleteOnTermination') == 'true':
- break
- time.sleep(5)
- volume = ec2.get_all_volumes(volume_ids=volume.id)[0]
- volume_info = get_volume_info(volume, state)
- module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'],
- volume_id=volume_info['id'], volume_type=volume_info['type'])
- elif state == 'absent':
- delete_volume(module, ec2)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vol_info.py b/lib/ansible/modules/cloud/amazon/ec2_vol_info.py
deleted file mode 100644
index 0989f9bb6b..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vol_info.py
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vol_info
-short_description: Gather information about ec2 volumes in AWS
-description:
- - Gather information about ec2 volumes in AWS.
- - This module was called C(ec2_vol_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.1"
-requirements: [ boto3 ]
-author: "Rob White (@wimnat)"
-options:
- filters:
- type: dict
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all volumes
-- ec2_vol_info:
-
-# Gather information about a particular volume using volume ID
-- ec2_vol_info:
- filters:
- volume-id: vol-00112233
-
-# Gather information about any volume with a tag key Name and value Example
-- ec2_vol_info:
- filters:
- "tag:Name": Example
-
-# Gather information about any volume that is attached
-- ec2_vol_info:
- filters:
- attachment.status: attached
-
-'''
-
-# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
-# fix this
-RETURN = '''# '''
-
-import traceback
-
-try:
- from botocore.exceptions import ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import AWSRetry
-from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
-
-
-def get_volume_info(volume, region):
-
- attachment = volume["attachments"]
-
- volume_info = {
- 'create_time': volume["create_time"],
- 'id': volume["volume_id"],
- 'encrypted': volume["encrypted"],
- 'iops': volume["iops"] if "iops" in volume else None,
- 'size': volume["size"],
- 'snapshot_id': volume["snapshot_id"],
- 'status': volume["state"],
- 'type': volume["volume_type"],
- 'zone': volume["availability_zone"],
- 'region': region,
- 'attachment_set': {
- 'attach_time': attachment[0]["attach_time"] if len(attachment) > 0 else None,
- 'device': attachment[0]["device"] if len(attachment) > 0 else None,
- 'instance_id': attachment[0]["instance_id"] if len(attachment) > 0 else None,
- 'status': attachment[0]["state"] if len(attachment) > 0 else None,
- 'delete_on_termination': attachment[0]["delete_on_termination"] if len(attachment) > 0 else None
- },
- 'tags': boto3_tag_list_to_ansible_dict(volume['tags']) if "tags" in volume else None
- }
-
- return volume_info
-
-
-@AWSRetry.jittered_backoff()
-def describe_volumes_with_backoff(connection, filters):
- paginator = connection.get_paginator('describe_volumes')
- return paginator.paginate(Filters=filters).build_full_result()
-
-
-def list_ec2_volumes(connection, module):
-
- # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
- sanitized_filters = module.params.get("filters")
- for key in list(sanitized_filters):
- if not key.startswith("tag:"):
- sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key)
- volume_dict_array = []
-
- try:
- all_volumes = describe_volumes_with_backoff(connection, ansible_dict_to_boto3_filter_list(sanitized_filters))
- except ClientError as e:
- module.fail_json_aws(e, msg="Failed to describe volumes.")
-
- for volume in all_volumes["Volumes"]:
- volume = camel_dict_to_snake_dict(volume, ignore_list=['Tags'])
- volume_dict_array.append(get_volume_info(volume, module.region))
- module.exit_json(volumes=volume_dict_array)
-
-
-def main():
- argument_spec = dict(filters=dict(default={}, type='dict'))
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._name == 'ec2_vol_facts':
- module.deprecate("The 'ec2_vol_facts' module has been renamed to 'ec2_vol_info'", version='2.13')
-
- connection = module.client('ec2')
-
- list_ec2_volumes(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py
deleted file mode 100644
index d111bed0f0..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py
+++ /dev/null
@@ -1,414 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: ec2_vpc_dhcp_option
-short_description: Manages DHCP Options, and can ensure the DHCP options for the given VPC match what's
- requested
-description:
- - This module removes, or creates DHCP option sets, and can associate them to a VPC.
- Optionally, a new DHCP Options set can be created that converges a VPC's existing
- DHCP option set with values provided.
- When dhcp_options_id is provided, the module will
- 1. remove (with state='absent')
- 2. ensure tags are applied (if state='present' and tags are provided
- 3. attach it to a VPC (if state='present' and a vpc_id is provided.
- If any of the optional values are missing, they will either be treated
- as a no-op (i.e., inherit what already exists for the VPC)
- To remove existing options while inheriting, supply an empty value
- (e.g. set ntp_servers to [] if you want to remove them from the VPC's options)
- Most of the options should be self-explanatory.
-author: "Joel Thompson (@joelthompson)"
-version_added: 2.1
-options:
- domain_name:
- description:
- - The domain name to set in the DHCP option sets
- type: str
- dns_servers:
- description:
- - A list of hosts to set the DNS servers for the VPC to. (Should be a
- list of IP addresses rather than host names.)
- type: list
- elements: str
- ntp_servers:
- description:
- - List of hosts to advertise as NTP servers for the VPC.
- type: list
- elements: str
- netbios_name_servers:
- description:
- - List of hosts to advertise as NetBIOS servers.
- type: list
- elements: str
- netbios_node_type:
- description:
- - NetBIOS node type to advertise in the DHCP options.
- The AWS recommendation is to use 2 (when using netbios name services)
- U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html)
- type: int
- vpc_id:
- description:
- - VPC ID to associate with the requested DHCP option set.
- If no vpc id is provided, and no matching option set is found then a new
- DHCP option set is created.
- type: str
- delete_old:
- description:
- - Whether to delete the old VPC DHCP option set when associating a new one.
- This is primarily useful for debugging/development purposes when you
- want to quickly roll back to the old option set. Note that this setting
- will be ignored, and the old DHCP option set will be preserved, if it
- is in use by any other VPC. (Otherwise, AWS will return an error.)
- type: bool
- default: 'yes'
- inherit_existing:
- description:
- - For any DHCP options not specified in these parameters, whether to
- inherit them from the options set already applied to vpc_id, or to
- reset them to be empty.
- type: bool
- default: 'no'
- tags:
- description:
- - Tags to be applied to a VPC options set if a new one is created, or
- if the resource_id is provided. (options must match)
- aliases: [ 'resource_tags']
- version_added: "2.1"
- type: dict
- dhcp_options_id:
- description:
- - The resource_id of an existing DHCP options set.
- If this is specified, then it will override other settings, except tags
- (which will be updated to match)
- version_added: "2.1"
- type: str
- state:
- description:
- - create/assign or remove the DHCP options.
- If state is set to absent, then a DHCP options set matched either
- by id, or tags and options will be removed if possible.
- default: present
- choices: [ 'absent', 'present' ]
- version_added: "2.1"
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - boto
-"""
-
-RETURN = """
-new_options:
- description: The DHCP options created, associated or found
- returned: when appropriate
- type: dict
- sample:
- domain-name-servers:
- - 10.0.0.1
- - 10.0.1.1
- netbois-name-servers:
- - 10.0.0.1
- - 10.0.1.1
- netbios-node-type: 2
- domain-name: "my.example.com"
-dhcp_options_id:
- description: The aws resource id of the primary DCHP options set created, found or removed
- type: str
- returned: when available
-changed:
- description: Whether the dhcp options were changed
- type: bool
- returned: always
-"""
-
-EXAMPLES = """
-# Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing
-# DHCP option set that may have been attached to that VPC.
-- ec2_vpc_dhcp_option:
- domain_name: "foo.example.com"
- region: us-east-1
- dns_servers:
- - 10.0.0.1
- - 10.0.1.1
- ntp_servers:
- - 10.0.0.2
- - 10.0.1.2
- netbios_name_servers:
- - 10.0.0.1
- - 10.0.1.1
- netbios_node_type: 2
- vpc_id: vpc-123456
- delete_old: True
- inherit_existing: False
-
-
-# Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but
-# keep any other existing settings. Also, keep the old DHCP option set around.
-- ec2_vpc_dhcp_option:
- region: us-east-1
- dns_servers:
- - "{{groups['dns-primary']}}"
- - "{{groups['dns-secondary']}}"
- vpc_id: vpc-123456
- inherit_existing: True
- delete_old: False
-
-
-## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags
-## but do not assign to a VPC
-- ec2_vpc_dhcp_option:
- region: us-east-1
- dns_servers:
- - 4.4.4.4
- - 8.8.8.8
- tags:
- Name: google servers
- Environment: Test
-
-## Delete a DHCP options set that matches the tags and options specified
-- ec2_vpc_dhcp_option:
- region: us-east-1
- dns_servers:
- - 4.4.4.4
- - 8.8.8.8
- tags:
- Name: google servers
- Environment: Test
- state: absent
-
-## Associate a DHCP options set with a VPC by ID
-- ec2_vpc_dhcp_option:
- region: us-east-1
- dhcp_options_id: dopt-12345678
- vpc_id: vpc-123456
-
-"""
-
-import collections
-import traceback
-from time import sleep, time
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
-
-if HAS_BOTO:
- import boto.vpc
- import boto.ec2
- from boto.exception import EC2ResponseError
-
-
-def get_resource_tags(vpc_conn, resource_id):
- return dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
-
-
-def retry_not_found(to_call, *args, **kwargs):
- start_time = time()
- while time() < start_time + 300:
- try:
- return to_call(*args, **kwargs)
- except EC2ResponseError as e:
- if e.error_code in ['InvalidDhcpOptionID.NotFound', 'InvalidDhcpOptionsID.NotFound']:
- sleep(3)
- continue
- raise e
-
-
-def ensure_tags(module, vpc_conn, resource_id, tags, add_only, check_mode):
- try:
- cur_tags = get_resource_tags(vpc_conn, resource_id)
- if tags == cur_tags:
- return {'changed': False, 'tags': cur_tags}
-
- to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
- if to_delete and not add_only:
- retry_not_found(vpc_conn.delete_tags, resource_id, to_delete, dry_run=check_mode)
-
- to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
- if to_add:
- retry_not_found(vpc_conn.create_tags, resource_id, to_add, dry_run=check_mode)
-
- latest_tags = get_resource_tags(vpc_conn, resource_id)
- return {'changed': True, 'tags': latest_tags}
- except EC2ResponseError as e:
- module.fail_json(msg="Failed to modify tags: %s" % e.message, exception=traceback.format_exc())
-
-
-def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
- """
- Returns the DHCP options object currently associated with the requested VPC ID using the VPC
- connection variable.
- """
- vpcs = vpc_conn.get_all_vpcs(vpc_ids=[vpc_id])
- if len(vpcs) != 1 or vpcs[0].dhcp_options_id == "default":
- return None
- dhcp_options = vpc_conn.get_all_dhcp_options(dhcp_options_ids=[vpcs[0].dhcp_options_id])
- if len(dhcp_options) != 1:
- return None
- return dhcp_options[0]
-
-
-def match_dhcp_options(vpc_conn, tags=None, options=None):
- """
- Finds a DHCP Options object that optionally matches the tags and options provided
- """
- dhcp_options = vpc_conn.get_all_dhcp_options()
- for dopts in dhcp_options:
- if (not tags) or get_resource_tags(vpc_conn, dopts.id) == tags:
- if (not options) or dopts.options == options:
- return(True, dopts)
- return(False, None)
-
-
-def remove_dhcp_options_by_id(vpc_conn, dhcp_options_id):
- associations = vpc_conn.get_all_vpcs(filters={'dhcpOptionsId': dhcp_options_id})
- if len(associations) > 0:
- return False
- else:
- vpc_conn.delete_dhcp_options(dhcp_options_id)
- return True
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- dhcp_options_id=dict(type='str', default=None),
- domain_name=dict(type='str', default=None),
- dns_servers=dict(type='list', default=None),
- ntp_servers=dict(type='list', default=None),
- netbios_name_servers=dict(type='list', default=None),
- netbios_node_type=dict(type='int', default=None),
- vpc_id=dict(type='str', default=None),
- delete_old=dict(type='bool', default=True),
- inherit_existing=dict(type='bool', default=False),
- tags=dict(type='dict', default=None, aliases=['resource_tags']),
- state=dict(type='str', default='present', choices=['present', 'absent'])
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
-
- params = module.params
- found = False
- changed = False
- new_options = collections.defaultdict(lambda: None)
-
- if not HAS_BOTO:
- module.fail_json(msg='boto is required for this module')
-
- region, ec2_url, boto_params = get_aws_connection_info(module)
- connection = connect_to_aws(boto.vpc, region, **boto_params)
-
- existing_options = None
-
- # First check if we were given a dhcp_options_id
- if not params['dhcp_options_id']:
- # No, so create new_options from the parameters
- if params['dns_servers'] is not None:
- new_options['domain-name-servers'] = params['dns_servers']
- if params['netbios_name_servers'] is not None:
- new_options['netbios-name-servers'] = params['netbios_name_servers']
- if params['ntp_servers'] is not None:
- new_options['ntp-servers'] = params['ntp_servers']
- if params['domain_name'] is not None:
- # needs to be a list for comparison with boto objects later
- new_options['domain-name'] = [params['domain_name']]
- if params['netbios_node_type'] is not None:
- # needs to be a list for comparison with boto objects later
- new_options['netbios-node-type'] = [str(params['netbios_node_type'])]
- # If we were given a vpc_id then we need to look at the options on that
- if params['vpc_id']:
- existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id'])
- # if we've been asked to inherit existing options, do that now
- if params['inherit_existing']:
- if existing_options:
- for option in ['domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
- if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]):
- new_options[option] = existing_options.options.get(option)
-
- # Do the vpc's dhcp options already match what we're asked for? if so we are done
- if existing_options and new_options == existing_options.options:
- module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=existing_options.id)
-
- # If no vpc_id was given, or the options don't match then look for an existing set using tags
- found, dhcp_option = match_dhcp_options(connection, params['tags'], new_options)
-
- # Now let's cover the case where there are existing options that we were told about by id
- # If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
- else:
- supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id': params['dhcp_options_id']})
- if len(supplied_options) != 1:
- if params['state'] != 'absent':
- module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")
- else:
- found = True
- dhcp_option = supplied_options[0]
- if params['state'] != 'absent' and params['tags']:
- ensure_tags(module, connection, dhcp_option.id, params['tags'], False, module.check_mode)
-
- # Now we have the dhcp options set, let's do the necessary
-
- # if we found options we were asked to remove then try to do so
- if params['state'] == 'absent':
- if not module.check_mode:
- if found:
- changed = remove_dhcp_options_by_id(connection, dhcp_option.id)
- module.exit_json(changed=changed, new_options={})
-
- # otherwise if we haven't found the required options we have something to do
- elif not module.check_mode and not found:
-
- # create some dhcp options if we weren't able to use existing ones
- if not found:
- # Convert netbios-node-type and domain-name back to strings
- if new_options['netbios-node-type']:
- new_options['netbios-node-type'] = new_options['netbios-node-type'][0]
- if new_options['domain-name']:
- new_options['domain-name'] = new_options['domain-name'][0]
-
- # create the new dhcp options set requested
- dhcp_option = connection.create_dhcp_options(
- new_options['domain-name'],
- new_options['domain-name-servers'],
- new_options['ntp-servers'],
- new_options['netbios-name-servers'],
- new_options['netbios-node-type'])
-
- # wait for dhcp option to be accessible
- found_dhcp_opt = False
- start_time = time()
- try:
- found_dhcp_opt = retry_not_found(connection.get_all_dhcp_options, dhcp_options_ids=[dhcp_option.id])
- except EC2ResponseError as e:
- module.fail_json(msg="Failed to describe DHCP options", exception=traceback.format_exc)
- if not found_dhcp_opt:
- module.fail_json(msg="Failed to wait for {0} to be available.".format(dhcp_option.id))
-
- changed = True
- if params['tags']:
- ensure_tags(module, connection, dhcp_option.id, params['tags'], False, module.check_mode)
-
- # If we were given a vpc_id, then attach the options we now have to that before we finish
- if params['vpc_id'] and not module.check_mode:
- changed = True
- connection.associate_dhcp_options(dhcp_option.id, params['vpc_id'])
- # and remove old ones if that was requested
- if params['delete_old'] and existing_options:
- remove_dhcp_options_by_id(connection, existing_options.id)
-
- module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id)
-
-
-if __name__ == "__main__":
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option_info.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option_info.py
deleted file mode 100644
index ac909fb1a9..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option_info.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_dhcp_option_info
-short_description: Gather information about dhcp options sets in AWS
-description:
- - Gather information about dhcp options sets in AWS
- - This module was called C(ec2_vpc_dhcp_option_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.2"
-requirements: [ boto3 ]
-author: "Nick Aslanidis (@naslanidis)"
-options:
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html) for possible filters.
- type: dict
- dhcp_options_ids:
- description:
- - Get details of specific DHCP Option IDs.
- aliases: ['DhcpOptionIds']
- type: list
- elements: str
- dry_run:
- description:
- - Checks whether you have the required permissions to view the DHCP
- Options.
- aliases: ['DryRun']
- version_added: "2.4"
- type: bool
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# # Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Gather information about all DHCP Option sets for an account or profile
- ec2_vpc_dhcp_option_info:
- region: ap-southeast-2
- profile: production
- register: dhcp_info
-
-- name: Gather information about a filtered list of DHCP Option sets
- ec2_vpc_dhcp_option_info:
- region: ap-southeast-2
- profile: production
- filters:
- "tag:Name": "abc-123"
- register: dhcp_info
-
-- name: Gather information about a specific DHCP Option set by DhcpOptionId
- ec2_vpc_dhcp_option_info:
- region: ap-southeast-2
- profile: production
- DhcpOptionsIds: dopt-123fece2
- register: dhcp_info
-
-'''
-
-RETURN = '''
-dhcp_options:
- description: The dhcp option sets for the account
- returned: always
- type: list
-
-changed:
- description: True if listing the dhcp options succeeds
- type: bool
- returned: always
-'''
-
-import traceback
-
-try:
- import botocore
-except ImportError:
- pass # caught by imported HAS_BOTO3
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (ec2_argument_spec, boto3_conn, HAS_BOTO3,
- ansible_dict_to_boto3_filter_list, get_aws_connection_info,
- camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict)
-
-
-def get_dhcp_options_info(dhcp_option):
- dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'],
- 'DhcpConfigurations': dhcp_option['DhcpConfigurations'],
- 'Tags': boto3_tag_list_to_ansible_dict(dhcp_option.get('Tags', [{'Value': '', 'Key': 'Name'}]))}
- return dhcp_option_info
-
-
-def list_dhcp_options(client, module):
- params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get('filters')))
-
- if module.params.get("dry_run"):
- params['DryRun'] = True
-
- if module.params.get("dhcp_options_ids"):
- params['DhcpOptionsIds'] = module.params.get("dhcp_options_ids")
-
- try:
- all_dhcp_options = client.describe_dhcp_options(**params)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=str(e), exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- return [camel_dict_to_snake_dict(get_dhcp_options_info(option))
- for option in all_dhcp_options['DhcpOptions']]
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- filters=dict(type='dict', default={}),
- dry_run=dict(type='bool', default=False, aliases=['DryRun']),
- dhcp_options_ids=dict(type='list', aliases=['DhcpOptionIds'])
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
- if module._name == 'ec2_vpc_dhcp_option_facts':
- module.deprecate("The 'ec2_vpc_dhcp_option_facts' module has been renamed to 'ec2_vpc_dhcp_option_info'", version='2.13')
-
- # Validate Requirements
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 and botocore are required.')
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg="Can't authorize connection - " + str(e))
-
- # call your function here
- results = list_dhcp_options(connection, module)
-
- module.exit_json(dhcp_options=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_net.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_net.py
deleted file mode 100644
index 30e4b1e94c..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_net.py
+++ /dev/null
@@ -1,524 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_net
-short_description: Configure AWS virtual private clouds
-description:
- - Create, modify, and terminate AWS virtual private clouds.
-version_added: "2.0"
-author:
- - Jonathan Davila (@defionscode)
- - Sloane Hertel (@s-hertel)
-options:
- name:
- description:
- - The name to give your VPC. This is used in combination with C(cidr_block) to determine if a VPC already exists.
- required: yes
- type: str
- cidr_block:
- description:
- - The primary CIDR of the VPC. After 2.5 a list of CIDRs can be provided. The first in the list will be used as the primary CIDR
- and is used in conjunction with the C(name) to ensure idempotence.
- required: yes
- type: list
- elements: str
- ipv6_cidr:
- description:
- - Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses,
- or the size of the CIDR block.
- default: False
- type: bool
- version_added: '2.10'
- purge_cidrs:
- description:
- - Remove CIDRs that are associated with the VPC and are not specified in C(cidr_block).
- default: no
- type: bool
- version_added: '2.5'
- tenancy:
- description:
- - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
- default: default
- choices: [ 'default', 'dedicated' ]
- type: str
- dns_support:
- description:
- - Whether to enable AWS DNS support.
- default: yes
- type: bool
- dns_hostnames:
- description:
- - Whether to enable AWS hostname support.
- default: yes
- type: bool
- dhcp_opts_id:
- description:
- - The id of the DHCP options to use for this VPC.
- type: str
- tags:
- description:
- - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of
- the VPC if it's different.
- aliases: [ 'resource_tags' ]
- type: dict
- state:
- description:
- - The state of the VPC. Either absent or present.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- multi_ok:
- description:
- - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want
- duplicate VPCs created.
- type: bool
- default: false
-requirements:
- - boto3
- - botocore
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: create a VPC with dedicated tenancy and a couple of tags
- ec2_vpc_net:
- name: Module_dev2
- cidr_block: 10.10.0.0/16
- region: us-east-1
- tags:
- module: ec2_vpc_net
- this: works
- tenancy: dedicated
-
-- name: create a VPC with dedicated tenancy and request an IPv6 CIDR
- ec2_vpc_net:
- name: Module_dev2
- cidr_block: 10.10.0.0/16
- ipv6_cidr: True
- region: us-east-1
- tenancy: dedicated
-'''
-
-RETURN = '''
-vpc:
- description: info about the VPC that was created or deleted
- returned: always
- type: complex
- contains:
- cidr_block:
- description: The CIDR of the VPC
- returned: always
- type: str
- sample: 10.0.0.0/16
- cidr_block_association_set:
- description: IPv4 CIDR blocks associated with the VPC
- returned: success
- type: list
- sample:
- "cidr_block_association_set": [
- {
- "association_id": "vpc-cidr-assoc-97aeeefd",
- "cidr_block": "20.0.0.0/24",
- "cidr_block_state": {
- "state": "associated"
- }
- }
- ]
- classic_link_enabled:
- description: indicates whether ClassicLink is enabled
- returned: always
- type: bool
- sample: false
- dhcp_options_id:
- description: the id of the DHCP options associated with this VPC
- returned: always
- type: str
- sample: dopt-0fb8bd6b
- id:
- description: VPC resource id
- returned: always
- type: str
- sample: vpc-c2e00da5
- instance_tenancy:
- description: indicates whether VPC uses default or dedicated tenancy
- returned: always
- type: str
- sample: default
- ipv6_cidr_block_association_set:
- description: IPv6 CIDR blocks associated with the VPC
- returned: success
- type: list
- sample:
- "ipv6_cidr_block_association_set": [
- {
- "association_id": "vpc-cidr-assoc-97aeeefd",
- "ipv6_cidr_block": "2001:db8::/56",
- "ipv6_cidr_block_state": {
- "state": "associated"
- }
- }
- ]
- is_default:
- description: indicates whether this is the default VPC
- returned: always
- type: bool
- sample: false
- state:
- description: state of the VPC
- returned: always
- type: str
- sample: available
- tags:
- description: tags attached to the VPC, includes name
- returned: always
- type: complex
- contains:
- Name:
- description: name tag for the VPC
- returned: always
- type: str
- sample: pk_vpc4
-'''
-
-try:
- import botocore
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-from time import sleep, time
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict, compare_aws_tags,
- ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict)
-from ansible.module_utils.six import string_types
-from ansible.module_utils._text import to_native
-from ansible.module_utils.network.common.utils import to_subnet
-
-
-def vpc_exists(module, vpc, name, cidr_block, multi):
- """Returns None or a vpc object depending on the existence of a VPC. When supplied
- with a CIDR, it will check for matching tags to determine if it is a match
- otherwise it will assume the VPC does not exist and thus return None.
- """
- try:
- matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': cidr_block}])['Vpcs']
- # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block)
- if not matching_vpcs:
- matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': [cidr_block[0]]}])['Vpcs']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to describe VPCs")
-
- if multi:
- return None
- elif len(matching_vpcs) == 1:
- return matching_vpcs[0]['VpcId']
- elif len(matching_vpcs) > 1:
- module.fail_json(msg='Currently there are %d VPCs that have the same name and '
- 'CIDR block you specified. If you would like to create '
- 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
- return None
-
-
-@AWSRetry.backoff(delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound'])
-def get_classic_link_with_backoff(connection, vpc_id):
- try:
- return connection.describe_vpc_classic_link(VpcIds=[vpc_id])['Vpcs'][0].get('ClassicLinkEnabled')
- except botocore.exceptions.ClientError as e:
- if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
- return False
- else:
- raise
-
-
-def get_vpc(module, connection, vpc_id):
- # wait for vpc to be available
- try:
- connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id))
-
- try:
- vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to describe VPCs")
- try:
- vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff(connection, vpc_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to describe VPCs")
-
- return vpc_obj
-
-
-def update_vpc_tags(connection, module, vpc_id, tags, name):
- if tags is None:
- tags = dict()
-
- tags.update({'Name': name})
- tags = dict((k, to_native(v)) for k, v in tags.items())
- try:
- current_tags = dict((t['Key'], t['Value']) for t in connection.describe_tags(Filters=[{'Name': 'resource-id', 'Values': [vpc_id]}])['Tags'])
- tags_to_update, dummy = compare_aws_tags(current_tags, tags, False)
- if tags_to_update:
- if not module.check_mode:
- tags = ansible_dict_to_boto3_tag_list(tags_to_update)
- vpc_obj = connection.create_tags(Resources=[vpc_id], Tags=tags, aws_retry=True)
-
- # Wait for tags to be updated
- expected_tags = boto3_tag_list_to_ansible_dict(tags)
- filters = [{'Name': 'tag:{0}'.format(key), 'Values': [value]} for key, value in expected_tags.items()]
- connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters)
-
- return True
- else:
- return False
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to update tags")
-
-
-def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
- if vpc_obj['DhcpOptionsId'] != dhcp_id:
- if not module.check_mode:
- try:
- connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id))
-
- try:
- # Wait for DhcpOptionsId to be updated
- filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}]
- connection.get_waiter('vpc_available').wait(VpcIds=[vpc_obj['VpcId']], Filters=filters)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json(msg="Failed to wait for DhcpOptionsId to be updated")
-
- return True
- else:
- return False
-
-
-def create_vpc(connection, module, cidr_block, tenancy):
- try:
- if not module.check_mode:
- vpc_obj = connection.create_vpc(CidrBlock=cidr_block, InstanceTenancy=tenancy)
- else:
- module.exit_json(changed=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Failed to create the VPC")
-
- # wait for vpc to exist
- try:
- connection.get_waiter('vpc_exists').wait(VpcIds=[vpc_obj['Vpc']['VpcId']])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be created.".format(vpc_obj['Vpc']['VpcId']))
-
- return vpc_obj['Vpc']['VpcId']
-
-
-def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value):
- start_time = time()
- updated = False
- while time() < start_time + 300:
- current_value = connection.describe_vpc_attribute(
- Attribute=attribute,
- VpcId=vpc_id
- )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value']
- if current_value != expected_value:
- sleep(3)
- else:
- updated = True
- break
- if not updated:
- module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute))
-
-
-def get_cidr_network_bits(module, cidr_block):
- fixed_cidrs = []
- for cidr in cidr_block:
- split_addr = cidr.split('/')
- if len(split_addr) == 2:
- # this_ip is a IPv4 CIDR that may or may not have host bits set
- # Get the network bits.
- valid_cidr = to_subnet(split_addr[0], split_addr[1])
- if cidr != valid_cidr:
- module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
- "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr))
- fixed_cidrs.append(valid_cidr)
- else:
- # let AWS handle invalid CIDRs
- fixed_cidrs.append(cidr)
- return fixed_cidrs
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- cidr_block=dict(type='list', required=True),
- ipv6_cidr=dict(type='bool', default=False),
- tenancy=dict(choices=['default', 'dedicated'], default='default'),
- dns_support=dict(type='bool', default=True),
- dns_hostnames=dict(type='bool', default=True),
- dhcp_opts_id=dict(),
- tags=dict(type='dict', aliases=['resource_tags']),
- state=dict(choices=['present', 'absent'], default='present'),
- multi_ok=dict(type='bool', default=False),
- purge_cidrs=dict(type='bool', default=False),
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- name = module.params.get('name')
- cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block'))
- ipv6_cidr = module.params.get('ipv6_cidr')
- purge_cidrs = module.params.get('purge_cidrs')
- tenancy = module.params.get('tenancy')
- dns_support = module.params.get('dns_support')
- dns_hostnames = module.params.get('dns_hostnames')
- dhcp_id = module.params.get('dhcp_opts_id')
- tags = module.params.get('tags')
- state = module.params.get('state')
- multi = module.params.get('multi_ok')
-
- changed = False
-
- connection = module.client(
- 'ec2',
- retry_decorator=AWSRetry.jittered_backoff(
- retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound']
- )
- )
-
- if dns_hostnames and not dns_support:
- module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support')
-
- if state == 'present':
-
- # Check if VPC exists
- vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
-
- if vpc_id is None:
- vpc_id = create_vpc(connection, module, cidr_block[0], tenancy)
- changed = True
-
- vpc_obj = get_vpc(module, connection, vpc_id)
-
- associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', [])
- if cidr['CidrBlockState']['State'] != 'disassociated')
- to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs]
- to_remove = [associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block]
- expected_cidrs = [cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove] + to_add
-
- if len(cidr_block) > 1:
- for cidr in to_add:
- changed = True
- try:
- connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
- if ipv6_cidr:
- if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys():
- module.warn("Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}".format(
- vpc_id,
- vpc_obj['Ipv6CidrBlockAssociationSet'][0]['Ipv6CidrBlock']))
- else:
- try:
- connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id)
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
-
- if purge_cidrs:
- for association_id in to_remove:
- changed = True
- try:
- connection.disassociate_vpc_cidr_block(AssociationId=association_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that "
- "are associated with the CIDR block before you can disassociate it.".format(association_id))
-
- if dhcp_id is not None:
- try:
- if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Failed to update DHCP options")
-
- if tags is not None or name is not None:
- try:
- if update_vpc_tags(connection, module, vpc_id, tags, name):
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to update tags")
-
- current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value']
- current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value']
- if current_dns_enabled != dns_support:
- changed = True
- if not module.check_mode:
- try:
- connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support})
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Failed to update enabled dns support attribute")
- if current_dns_hostnames != dns_hostnames:
- changed = True
- if not module.check_mode:
- try:
- connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames})
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute")
-
- # wait for associated cidrs to match
- if to_add or to_remove:
- try:
- connection.get_waiter('vpc_available').wait(
- VpcIds=[vpc_id],
- Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}]
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Failed to wait for CIDRs to update")
-
- # try to wait for enableDnsSupport and enableDnsHostnames to match
- wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support)
- wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames)
-
- final_state = camel_dict_to_snake_dict(get_vpc(module, connection, vpc_id))
- final_state['tags'] = boto3_tag_list_to_ansible_dict(final_state.get('tags', []))
- final_state['id'] = final_state.pop('vpc_id')
-
- module.exit_json(changed=changed, vpc=final_state)
-
- elif state == 'absent':
-
- # Check if VPC exists
- vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
-
- if vpc_id is not None:
- try:
- if not module.check_mode:
- connection.delete_vpc(VpcId=vpc_id)
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
- "and/or ec2_vpc_route_table modules to ensure the other components are absent.".format(vpc_id))
-
- module.exit_json(changed=changed, vpc={})
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_net_info.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_net_info.py
deleted file mode 100644
index 96c4f46155..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_net_info.py
+++ /dev/null
@@ -1,306 +0,0 @@
-#!/usr/bin/python
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_net_info
-short_description: Gather information about ec2 VPCs in AWS
-description:
- - Gather information about ec2 VPCs in AWS
- - This module was called C(ec2_vpc_net_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.1"
-author: "Rob White (@wimnat)"
-requirements:
- - boto3
- - botocore
-options:
- vpc_ids:
- description:
- - A list of VPC IDs that exist in your account.
- version_added: "2.5"
- type: list
- elements: str
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
- type: dict
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all VPCs
-- ec2_vpc_net_info:
-
-# Gather information about a particular VPC using VPC ID
-- ec2_vpc_net_info:
- vpc_ids: vpc-00112233
-
-# Gather information about any VPC with a tag key Name and value Example
-- ec2_vpc_net_info:
- filters:
- "tag:Name": Example
-
-'''
-
-RETURN = '''
-vpcs:
- description: Returns an array of complex objects as described below.
- returned: success
- type: complex
- contains:
- id:
- description: The ID of the VPC (for backwards compatibility).
- returned: always
- type: str
- vpc_id:
- description: The ID of the VPC .
- returned: always
- type: str
- state:
- description: The state of the VPC.
- returned: always
- type: str
- tags:
- description: A dict of tags associated with the VPC.
- returned: always
- type: dict
- instance_tenancy:
- description: The instance tenancy setting for the VPC.
- returned: always
- type: str
- is_default:
- description: True if this is the default VPC for account.
- returned: always
- type: bool
- cidr_block:
- description: The IPv4 CIDR block assigned to the VPC.
- returned: always
- type: str
- classic_link_dns_supported:
- description: True/False depending on attribute setting for classic link DNS support.
- returned: always
- type: bool
- classic_link_enabled:
- description: True/False depending on if classic link support is enabled.
- returned: always
- type: bool
- enable_dns_hostnames:
- description: True/False depending on attribute setting for DNS hostnames support.
- returned: always
- type: bool
- enable_dns_support:
- description: True/False depending on attribute setting for DNS support.
- returned: always
- type: bool
- cidr_block_association_set:
- description: An array of IPv4 cidr block association set information.
- returned: always
- type: complex
- contains:
- association_id:
- description: The association ID
- returned: always
- type: str
- cidr_block:
- description: The IPv4 CIDR block that is associated with the VPC.
- returned: always
- type: str
- cidr_block_state:
- description: A hash/dict that contains a single item. The state of the cidr block association.
- returned: always
- type: dict
- contains:
- state:
- description: The CIDR block association state.
- returned: always
- type: str
- ipv6_cidr_block_association_set:
- description: An array of IPv6 cidr block association set information.
- returned: always
- type: complex
- contains:
- association_id:
- description: The association ID
- returned: always
- type: str
- ipv6_cidr_block:
- description: The IPv6 CIDR block that is associated with the VPC.
- returned: always
- type: str
- ipv6_cidr_block_state:
- description: A hash/dict that contains a single item. The state of the cidr block association.
- returned: always
- type: dict
- contains:
- state:
- description: The CIDR block association state.
- returned: always
- type: str
-'''
-
-import traceback
-from ansible.module_utils._text import to_native
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (
- boto3_conn,
- ec2_argument_spec,
- get_aws_connection_info,
- AWSRetry,
- HAS_BOTO3,
- boto3_tag_list_to_ansible_dict,
- camel_dict_to_snake_dict,
- ansible_dict_to_boto3_filter_list
-)
-
-try:
- import botocore
-except ImportError:
- pass # caught by imported HAS_BOTO3
-
-
-@AWSRetry.exponential_backoff()
-def describe_vpc_attr_with_backoff(connection, vpc_id, vpc_attribute):
- """
- Describe VPC Attributes with AWSRetry backoff throttling support.
-
- connection : boto3 client connection object
- vpc_id : The VPC ID to pull attribute value from
- vpc_attribute : The VPC attribute to get the value from - valid options = enableDnsSupport or enableDnsHostnames
- """
-
- return connection.describe_vpc_attribute(VpcId=vpc_id, Attribute=vpc_attribute)
-
-
-def describe_vpcs(connection, module):
- """
- Describe VPCs.
-
- connection : boto3 client connection object
- module : AnsibleModule object
- """
- # collect parameters
- filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
- vpc_ids = module.params.get('vpc_ids')
-
- # init empty list for return vars
- vpc_info = list()
- vpc_list = list()
-
- # Get the basic VPC info
- try:
- response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable to describe VPCs {0}: {1}".format(vpc_ids, to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Unable to describe VPCs {0}: {1}".format(vpc_ids, to_native(e)),
- exception=traceback.format_exc())
-
- # Loop through results and create a list of VPC IDs
- for vpc in response['Vpcs']:
- vpc_list.append(vpc['VpcId'])
-
- # We can get these results in bulk but still needs two separate calls to the API
- try:
- cl_enabled = connection.describe_vpc_classic_link(VpcIds=vpc_list)
- except botocore.exceptions.ClientError as e:
- if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
- cl_enabled = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkEnabled': False} for vpc_id in vpc_list]}
- else:
- module.fail_json(msg="Unable to describe if ClassicLink is enabled: {0}".format(to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Unable to describe if ClassicLink is enabled: {0}".format(to_native(e)),
- exception=traceback.format_exc())
-
- try:
- cl_dns_support = connection.describe_vpc_classic_link_dns_support(VpcIds=vpc_list)
- except botocore.exceptions.ClientError as e:
- if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
- cl_dns_support = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkDnsSupported': False} for vpc_id in vpc_list]}
- else:
- module.fail_json(msg="Unable to describe if ClassicLinkDns is supported: {0}".format(to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Unable to describe if ClassicLinkDns is supported: {0}".format(to_native(e)),
- exception=traceback.format_exc())
-
- # Loop through the results and add the other VPC attributes we gathered
- for vpc in response['Vpcs']:
- error_message = "Unable to describe VPC attribute {0}: {1}"
- # We have to make two separate calls per VPC to get these attributes.
- try:
- dns_support = describe_vpc_attr_with_backoff(connection, vpc['VpcId'], 'enableDnsSupport')
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=error_message.format('enableDnsSupport', to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg=error_message.format('enableDnsSupport', to_native(e)),
- exception=traceback.format_exc())
- try:
- dns_hostnames = describe_vpc_attr_with_backoff(connection, vpc['VpcId'], 'enableDnsHostnames')
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=error_message.format('enableDnsHostnames', to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg=error_message.format('enableDnsHostnames', to_native(e)),
- exception=traceback.format_exc())
-
- # loop through the ClassicLink Enabled results and add the value for the correct VPC
- for item in cl_enabled['Vpcs']:
- if vpc['VpcId'] == item['VpcId']:
- vpc['ClassicLinkEnabled'] = item['ClassicLinkEnabled']
-
- # loop through the ClassicLink DNS support results and add the value for the correct VPC
- for item in cl_dns_support['Vpcs']:
- if vpc['VpcId'] == item['VpcId']:
- vpc['ClassicLinkDnsSupported'] = item['ClassicLinkDnsSupported']
-
- # add the two DNS attributes
- vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value')
- vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value')
- # for backwards compatibility
- vpc['id'] = vpc['VpcId']
- vpc_info.append(camel_dict_to_snake_dict(vpc))
- # convert tag list to ansible dict
- vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', []))
-
- module.exit_json(vpcs=vpc_info)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- vpc_ids=dict(type='list', default=[]),
- filters=dict(type='dict', default={})
- ))
-
- module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._name == 'ec2_vpc_net_facts':
- module.deprecate("The 'ec2_vpc_net_facts' module has been renamed to 'ec2_vpc_net_info'", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 and botocore are required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
- connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
-
- describe_vpcs(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet.py
deleted file mode 100644
index 5085e99b79..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet.py
+++ /dev/null
@@ -1,604 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_subnet
-short_description: Manage subnets in AWS virtual private clouds
-description:
- - Manage subnets in AWS virtual private clouds.
-version_added: "2.0"
-author:
-- Robert Estelle (@erydo)
-- Brad Davidson (@brandond)
-requirements: [ boto3 ]
-options:
- az:
- description:
- - "The availability zone for the subnet."
- type: str
- cidr:
- description:
- - "The CIDR block for the subnet. E.g. 192.0.2.0/24."
- type: str
- required: true
- ipv6_cidr:
- description:
- - "The IPv6 CIDR block for the subnet. The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range."
- - "Required if I(assign_instances_ipv6=true)"
- version_added: "2.5"
- type: str
- tags:
- description:
- - "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
- aliases: [ 'resource_tags' ]
- type: dict
- state:
- description:
- - "Create or remove the subnet."
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- vpc_id:
- description:
- - "VPC ID of the VPC in which to create or delete the subnet."
- required: true
- type: str
- map_public:
- description:
- - "Specify C(yes) to indicate that instances launched into the subnet should be assigned public IP address by default."
- type: bool
- default: 'no'
- version_added: "2.4"
- assign_instances_ipv6:
- description:
- - "Specify C(yes) to indicate that instances launched into the subnet should be automatically assigned an IPv6 address."
- type: bool
- default: false
- version_added: "2.5"
- wait:
- description:
- - "When I(wait=true) and I(state=present), module will wait for subnet to be in available state before continuing."
- type: bool
- default: true
- version_added: "2.5"
- wait_timeout:
- description:
- - "Number of seconds to wait for subnet to become available I(wait=True)."
- default: 300
- version_added: "2.5"
- type: int
- purge_tags:
- description:
- - Whether or not to remove tags that do not appear in the I(tags) list.
- type: bool
- default: true
- version_added: "2.5"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Create subnet for database servers
- ec2_vpc_subnet:
- state: present
- vpc_id: vpc-123456
- cidr: 10.0.1.16/28
- tags:
- Name: Database Subnet
- register: database_subnet
-
-- name: Remove subnet for database servers
- ec2_vpc_subnet:
- state: absent
- vpc_id: vpc-123456
- cidr: 10.0.1.16/28
-
-- name: Create subnet with IPv6 block assigned
- ec2_vpc_subnet:
- state: present
- vpc_id: vpc-123456
- cidr: 10.1.100.0/24
- ipv6_cidr: 2001:db8:0:102::/64
-
-- name: Remove IPv6 block assigned to subnet
- ec2_vpc_subnet:
- state: present
- vpc_id: vpc-123456
- cidr: 10.1.100.0/24
- ipv6_cidr: ''
-'''
-
-RETURN = '''
-subnet:
- description: Dictionary of subnet values
- returned: I(state=present)
- type: complex
- contains:
- id:
- description: Subnet resource id
- returned: I(state=present)
- type: str
- sample: subnet-b883b2c4
- cidr_block:
- description: The IPv4 CIDR of the Subnet
- returned: I(state=present)
- type: str
- sample: "10.0.0.0/16"
- ipv6_cidr_block:
- description: The IPv6 CIDR block actively associated with the Subnet
- returned: I(state=present)
- type: str
- sample: "2001:db8:0:102::/64"
- availability_zone:
- description: Availability zone of the Subnet
- returned: I(state=present)
- type: str
- sample: us-east-1a
- state:
- description: state of the Subnet
- returned: I(state=present)
- type: str
- sample: available
- tags:
- description: tags attached to the Subnet, includes name
- returned: I(state=present)
- type: dict
- sample: {"Name": "My Subnet", "env": "staging"}
- map_public_ip_on_launch:
- description: whether public IP is auto-assigned to new instances
- returned: I(state=present)
- type: bool
- sample: false
- assign_ipv6_address_on_creation:
- description: whether IPv6 address is auto-assigned to new instances
- returned: I(state=present)
- type: bool
- sample: false
- vpc_id:
- description: the id of the VPC where this Subnet exists
- returned: I(state=present)
- type: str
- sample: vpc-67236184
- available_ip_address_count:
- description: number of available IPv4 addresses
- returned: I(state=present)
- type: str
- sample: 251
- default_for_az:
- description: indicates whether this is the default Subnet for this Availability Zone
- returned: I(state=present)
- type: bool
- sample: false
- ipv6_association_id:
- description: The IPv6 association ID for the currently associated CIDR
- returned: I(state=present)
- type: str
- sample: subnet-cidr-assoc-b85c74d2
- ipv6_cidr_block_association_set:
- description: An array of IPv6 cidr block association set information.
- returned: I(state=present)
- type: complex
- contains:
- association_id:
- description: The association ID
- returned: always
- type: str
- ipv6_cidr_block:
- description: The IPv6 CIDR block that is associated with the subnet.
- returned: always
- type: str
- ipv6_cidr_block_state:
- description: A hash/dict that contains a single item. The state of the cidr block association.
- returned: always
- type: dict
- contains:
- state:
- description: The CIDR block association state.
- returned: always
- type: str
-'''
-
-
-import time
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.waiters import get_waiter
-from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, ansible_dict_to_boto3_tag_list,
- camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags, AWSRetry)
-
-
-def get_subnet_info(subnet):
- if 'Subnets' in subnet:
- return [get_subnet_info(s) for s in subnet['Subnets']]
- elif 'Subnet' in subnet:
- subnet = camel_dict_to_snake_dict(subnet['Subnet'])
- else:
- subnet = camel_dict_to_snake_dict(subnet)
-
- if 'tags' in subnet:
- subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags'])
- else:
- subnet['tags'] = dict()
-
- if 'subnet_id' in subnet:
- subnet['id'] = subnet['subnet_id']
- del subnet['subnet_id']
-
- subnet['ipv6_cidr_block'] = ''
- subnet['ipv6_association_id'] = ''
- ipv6set = subnet.get('ipv6_cidr_block_association_set')
- if ipv6set:
- for item in ipv6set:
- if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'):
- subnet['ipv6_cidr_block'] = item['ipv6_cidr_block']
- subnet['ipv6_association_id'] = item['association_id']
-
- return subnet
-
-
-@AWSRetry.exponential_backoff()
-def describe_subnets_with_backoff(client, **params):
- return client.describe_subnets(**params)
-
-
-def waiter_params(module, params, start_time):
- if not module.botocore_at_least("1.7.0"):
- remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time())
- params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5}
- return params
-
-
-def handle_waiter(conn, module, waiter_name, params, start_time):
- try:
- get_waiter(conn, waiter_name).wait(
- **waiter_params(module, params, start_time)
- )
- except botocore.exceptions.WaiterError as e:
- module.fail_json_aws(e, "Failed to wait for updates to complete")
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "An exception happened while trying to wait for updates")
-
-
-def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, az=None, start_time=None):
- wait = module.params['wait']
- wait_timeout = module.params['wait_timeout']
-
- params = dict(VpcId=vpc_id,
- CidrBlock=cidr)
-
- if ipv6_cidr:
- params['Ipv6CidrBlock'] = ipv6_cidr
-
- if az:
- params['AvailabilityZone'] = az
-
- try:
- subnet = get_subnet_info(conn.create_subnet(**params))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create subnet")
-
- # Sometimes AWS takes its time to create a subnet and so using
- # new subnets's id to do things like create tags results in
- # exception.
- if wait and subnet.get('state') != 'available':
- handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
- try:
- conn.get_waiter('subnet_available').wait(
- **waiter_params(module, {'SubnetIds': [subnet['id']]}, start_time)
- )
- subnet['state'] = 'available'
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Create subnet action timed out waiting for subnet to become available")
-
- return subnet
-
-
-def ensure_tags(conn, module, subnet, tags, purge_tags, start_time):
- changed = False
-
- filters = ansible_dict_to_boto3_filter_list({'resource-id': subnet['id'], 'resource-type': 'subnet'})
- try:
- cur_tags = conn.describe_tags(Filters=filters)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't describe tags")
-
- to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
-
- if to_update:
- try:
- if not module.check_mode:
- AWSRetry.exponential_backoff(
- catch_extra_error_codes=['InvalidSubnetID.NotFound']
- )(conn.create_tags)(
- Resources=[subnet['id']],
- Tags=ansible_dict_to_boto3_tag_list(to_update)
- )
-
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create tags")
-
- if to_delete:
- try:
- if not module.check_mode:
- tags_list = []
- for key in to_delete:
- tags_list.append({'Key': key})
-
- AWSRetry.exponential_backoff(
- catch_extra_error_codes=['InvalidSubnetID.NotFound']
- )(conn.delete_tags)(Resources=[subnet['id']], Tags=tags_list)
-
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete tags")
-
- if module.params['wait'] and not module.check_mode:
- # Wait for tags to be updated
- filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()]
- handle_waiter(conn, module, 'subnet_exists',
- {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
-
- return changed
-
-
-def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time):
- if check_mode:
- return
- try:
- conn.modify_subnet_attribute(SubnetId=subnet['id'], MapPublicIpOnLaunch={'Value': map_public})
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
-
-
-def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time):
- if check_mode:
- return
- try:
- conn.modify_subnet_attribute(SubnetId=subnet['id'], AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6})
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
-
-
-def disassociate_ipv6_cidr(conn, module, subnet, start_time):
- if subnet.get('assign_ipv6_address_on_creation'):
- ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time)
-
- try:
- conn.disassociate_subnet_cidr_block(AssociationId=subnet['ipv6_association_id'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}"
- .format(subnet['ipv6_association_id'], subnet['id']))
-
- # Wait for cidr block to be disassociated
- if module.params['wait']:
- filters = ansible_dict_to_boto3_filter_list(
- {'ipv6-cidr-block-association.state': ['disassociated'],
- 'vpc-id': subnet['vpc_id']}
- )
- handle_waiter(conn, module, 'subnet_exists',
- {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
-
-
-def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time):
- wait = module.params['wait']
- changed = False
-
- if subnet['ipv6_association_id'] and not ipv6_cidr:
- if not check_mode:
- disassociate_ipv6_cidr(conn, module, subnet, start_time)
- changed = True
-
- if ipv6_cidr:
- filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr,
- 'vpc-id': subnet['vpc_id']})
-
- try:
- check_subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get subnet info")
-
- if check_subnets and check_subnets[0]['ipv6_cidr_block']:
- module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr))
-
- if subnet['ipv6_association_id']:
- if not check_mode:
- disassociate_ipv6_cidr(conn, module, subnet, start_time)
- changed = True
-
- try:
- if not check_mode:
- associate_resp = conn.associate_subnet_cidr_block(SubnetId=subnet['id'], Ipv6CidrBlock=ipv6_cidr)
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id']))
- else:
- if not check_mode and wait:
- filters = ansible_dict_to_boto3_filter_list(
- {'ipv6-cidr-block-association.state': ['associated'],
- 'vpc-id': subnet['vpc_id']}
- )
- handle_waiter(conn, module, 'subnet_exists',
- {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
-
- if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'):
- subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId']
- subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock']
- if subnet['ipv6_cidr_block_association_set']:
- subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])
- else:
- subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']))
-
- return changed
-
-
-def get_matching_subnet(conn, module, vpc_id, cidr):
- filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr})
- try:
- subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get matching subnet")
-
- if subnets:
- return subnets[0]
-
- return None
-
-
-def ensure_subnet_present(conn, module):
- subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
- changed = False
-
- # Initialize start so max time does not exceed the specified wait_timeout for multiple operations
- start_time = time.time()
-
- if subnet is None:
- if not module.check_mode:
- subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'],
- ipv6_cidr=module.params['ipv6_cidr'], az=module.params['az'], start_time=start_time)
- changed = True
- # Subnet will be None when check_mode is true
- if subnet is None:
- return {
- 'changed': changed,
- 'subnet': {}
- }
- if module.params['wait']:
- handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
-
- if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'):
- if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time):
- changed = True
-
- if module.params['map_public'] != subnet['map_public_ip_on_launch']:
- ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time)
- changed = True
-
- if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'):
- ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time)
- changed = True
-
- if module.params['tags'] != subnet['tags']:
- stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items())
- if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time):
- changed = True
-
- subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
- if not module.check_mode and module.params['wait']:
- # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation
- # so we only wait for those if necessary just before returning the subnet
- subnet = ensure_final_subnet(conn, module, subnet, start_time)
-
- return {
- 'changed': changed,
- 'subnet': subnet
- }
-
-
-def ensure_final_subnet(conn, module, subnet, start_time):
- for rewait in range(0, 30):
- map_public_correct = False
- assign_ipv6_correct = False
-
- if module.params['map_public'] == subnet['map_public_ip_on_launch']:
- map_public_correct = True
- else:
- if module.params['map_public']:
- handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time)
- else:
- handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time)
-
- if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'):
- assign_ipv6_correct = True
- else:
- if module.params['assign_instances_ipv6']:
- handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
- else:
- handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
-
- if map_public_correct and assign_ipv6_correct:
- break
-
- time.sleep(5)
- subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
-
- return subnet
-
-
-def ensure_subnet_absent(conn, module):
- subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
- if subnet is None:
- return {'changed': False}
-
- try:
- if not module.check_mode:
- conn.delete_subnet(SubnetId=subnet['id'])
- if module.params['wait']:
- handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time())
- return {'changed': True}
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete subnet")
-
-
-def main():
- argument_spec = dict(
- az=dict(default=None, required=False),
- cidr=dict(required=True),
- ipv6_cidr=dict(default='', required=False),
- state=dict(default='present', choices=['present', 'absent']),
- tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']),
- vpc_id=dict(required=True),
- map_public=dict(default=False, required=False, type='bool'),
- assign_instances_ipv6=dict(default=False, required=False, type='bool'),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=300, required=False),
- purge_tags=dict(default=True, type='bool')
- )
-
- required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])]
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
-
- if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'):
- module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string")
-
- if not module.botocore_at_least("1.7.0"):
- module.warn("botocore >= 1.7.0 is required to use wait_timeout for custom wait times")
-
- connection = module.client('ec2')
-
- state = module.params.get('state')
-
- try:
- if state == 'present':
- result = ensure_subnet_present(connection, module)
- elif state == 'absent':
- result = ensure_subnet_absent(connection, module)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_info.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_info.py
deleted file mode 100644
index d582e31f3d..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_info.py
+++ /dev/null
@@ -1,250 +0,0 @@
-#!/usr/bin/python
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_subnet_info
-short_description: Gather information about ec2 VPC subnets in AWS
-description:
- - Gather information about ec2 VPC subnets in AWS
- - This module was called C(ec2_vpc_subnet_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.1"
-author: "Rob White (@wimnat)"
-requirements:
- - boto3
- - botocore
-options:
- subnet_ids:
- description:
- - A list of subnet IDs to gather information for.
- version_added: "2.5"
- aliases: ['subnet_id']
- type: list
- elements: str
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
- type: dict
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all VPC subnets
-- ec2_vpc_subnet_info:
-
-# Gather information about a particular VPC subnet using ID
-- ec2_vpc_subnet_info:
- subnet_ids: subnet-00112233
-
-# Gather information about any VPC subnet with a tag key Name and value Example
-- ec2_vpc_subnet_info:
- filters:
- "tag:Name": Example
-
-# Gather information about any VPC subnet within VPC with ID vpc-abcdef00
-- ec2_vpc_subnet_info:
- filters:
- vpc-id: vpc-abcdef00
-
-# Gather information about a set of VPC subnets, publicA, publicB and publicC within a
-# VPC with ID vpc-abcdef00 and then use the jinja map function to return the
-# subnet_ids as a list.
-
-- ec2_vpc_subnet_info:
- filters:
- vpc-id: vpc-abcdef00
- "tag:Name": "{{ item }}"
- loop:
- - publicA
- - publicB
- - publicC
- register: subnet_info
-
-- set_fact:
- subnet_ids: "{{ subnet_info.subnets|map(attribute='id')|list }}"
-'''
-
-RETURN = '''
-subnets:
- description: Returns an array of complex objects as described below.
- returned: success
- type: complex
- contains:
- subnet_id:
- description: The ID of the Subnet.
- returned: always
- type: str
- id:
- description: The ID of the Subnet (for backwards compatibility).
- returned: always
- type: str
- vpc_id:
- description: The ID of the VPC .
- returned: always
- type: str
- state:
- description: The state of the subnet.
- returned: always
- type: str
- tags:
- description: A dict of tags associated with the Subnet.
- returned: always
- type: dict
- map_public_ip_on_launch:
- description: True/False depending on attribute setting for public IP mapping.
- returned: always
- type: bool
- default_for_az:
- description: True if this is the default subnet for AZ.
- returned: always
- type: bool
- cidr_block:
- description: The IPv4 CIDR block assigned to the subnet.
- returned: always
- type: str
- available_ip_address_count:
- description: Count of available IPs in subnet.
- returned: always
- type: str
- availability_zone:
- description: The availability zone where the subnet exists.
- returned: always
- type: str
- assign_ipv6_address_on_creation:
- description: True/False depending on attribute setting for IPv6 address assignment.
- returned: always
- type: bool
- ipv6_cidr_block_association_set:
- description: An array of IPv6 cidr block association set information.
- returned: always
- type: complex
- contains:
- association_id:
- description: The association ID
- returned: always
- type: str
- ipv6_cidr_block:
- description: The IPv6 CIDR block that is associated with the subnet.
- returned: always
- type: str
- ipv6_cidr_block_state:
- description: A hash/dict that contains a single item. The state of the cidr block association.
- returned: always
- type: dict
- contains:
- state:
- description: The CIDR block association state.
- returned: always
- type: str
-'''
-
-import traceback
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (
- boto3_conn,
- ec2_argument_spec,
- get_aws_connection_info,
- AWSRetry,
- HAS_BOTO3,
- boto3_tag_list_to_ansible_dict,
- camel_dict_to_snake_dict,
- ansible_dict_to_boto3_filter_list
-)
-from ansible.module_utils._text import to_native
-
-try:
- import botocore
-except ImportError:
- pass # caught by imported HAS_BOTO3
-
-
-@AWSRetry.exponential_backoff()
-def describe_subnets_with_backoff(connection, subnet_ids, filters):
- """
- Describe Subnets with AWSRetry backoff throttling support.
-
- connection : boto3 client connection object
- subnet_ids : list of subnet ids for which to gather information
- filters : additional filters to apply to request
- """
- return connection.describe_subnets(SubnetIds=subnet_ids, Filters=filters)
-
-
-def describe_subnets(connection, module):
- """
- Describe Subnets.
-
- module : AnsibleModule object
- connection : boto3 client connection object
- """
- # collect parameters
- filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
- subnet_ids = module.params.get('subnet_ids')
-
- if subnet_ids is None:
- # Set subnet_ids to empty list if it is None
- subnet_ids = []
-
- # init empty list for return vars
- subnet_info = list()
-
- # Get the basic VPC info
- try:
- response = describe_subnets_with_backoff(connection, subnet_ids, filters)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
- for subnet in response['Subnets']:
- # for backwards compatibility
- subnet['id'] = subnet['SubnetId']
- subnet_info.append(camel_dict_to_snake_dict(subnet))
- # convert tag list to ansible dict
- subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', []))
-
- module.exit_json(subnets=subnet_info)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- subnet_ids=dict(type='list', default=[], aliases=['subnet_id']),
- filters=dict(type='dict', default={})
- ))
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
- if module._name == 'ec2_vpc_subnet_facts':
- module.deprecate("The 'ec2_vpc_subnet_facts' module has been renamed to 'ec2_vpc_subnet_info'", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
-
- if region:
- try:
- connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
- except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- else:
- module.fail_json(msg="Region must be specified")
-
- describe_subnets(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/s3_bucket.py b/lib/ansible/modules/cloud/amazon/s3_bucket.py
deleted file mode 100644
index e0439d0531..0000000000
--- a/lib/ansible/modules/cloud/amazon/s3_bucket.py
+++ /dev/null
@@ -1,767 +0,0 @@
-#!/usr/bin/python
-#
-# This is a free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This Ansible library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this library. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: s3_bucket
-short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
-description:
- - Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
-version_added: "2.0"
-requirements: [ boto3 ]
-author: "Rob White (@wimnat)"
-options:
- force:
- description:
- - When trying to delete a bucket, delete all keys (including versions and delete markers)
- in the bucket first (an s3 bucket must be empty for a successful deletion)
- type: bool
- default: 'no'
- name:
- description:
- - Name of the s3 bucket
- required: true
- type: str
- policy:
- description:
- - The JSON policy as a string.
- type: json
- s3_url:
- description:
- - S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and fakes3 etc.
- - Assumes AWS if not specified.
- - For Walrus, use FQDN of the endpoint without scheme nor path.
- aliases: [ S3_URL ]
- type: str
- ceph:
- description:
- - Enable API compatibility with Ceph. It takes into account the S3 API subset working
- with Ceph in order to provide the same module behaviour where possible.
- type: bool
- version_added: "2.2"
- requester_pays:
- description:
- - With Requester Pays buckets, the requester instead of the bucket owner pays the cost
- of the request and the data download from the bucket.
- type: bool
- default: False
- state:
- description:
- - Create or remove the s3 bucket
- required: false
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- tags:
- description:
- - tags dict to apply to bucket
- type: dict
- purge_tags:
- description:
- - whether to remove tags that aren't present in the C(tags) parameter
- type: bool
- default: True
- version_added: "2.9"
- versioning:
- description:
- - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
- type: bool
- encryption:
- description:
- - Describes the default server-side encryption to apply to new objects in the bucket.
- In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly.
- choices: [ 'none', 'AES256', 'aws:kms' ]
- version_added: "2.9"
- type: str
- encryption_key_id:
- description: KMS master key ID to use for the default encryption. This parameter is allowed if encryption is aws:kms. If
- not specified then it will default to the AWS provided KMS key.
- version_added: "2.9"
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-notes:
- - If C(requestPayment), C(policy), C(tagging) or C(versioning)
- operations/API aren't implemented by the endpoint, module doesn't fail
- if each parameter satisfies the following condition.
- I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None).
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Create a simple s3 bucket
-- s3_bucket:
- name: mys3bucket
- state: present
-
-# Create a simple s3 bucket on Ceph Rados Gateway
-- s3_bucket:
- name: mys3bucket
- s3_url: http://your-ceph-rados-gateway-server.xxx
- ceph: true
-
-# Remove an s3 bucket and any keys it contains
-- s3_bucket:
- name: mys3bucket
- state: absent
- force: yes
-
-# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
-- s3_bucket:
- name: mys3bucket
- policy: "{{ lookup('file','policy.json') }}"
- requester_pays: yes
- versioning: yes
- tags:
- example: tag1
- another: tag2
-
-# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint
-- s3_bucket:
- name: mydobucket
- s3_url: 'https://nyc3.digitaloceanspaces.com'
-
-# Create a bucket with AES256 encryption
-- s3_bucket:
- name: mys3bucket
- state: present
- encryption: "AES256"
-
-# Create a bucket with aws:kms encryption, KMS key
-- s3_bucket:
- name: mys3bucket
- state: present
- encryption: "aws:kms"
- encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example"
-
-# Create a bucket with aws:kms encryption, default key
-- s3_bucket:
- name: mys3bucket
- state: present
- encryption: "aws:kms"
-'''
-
-import json
-import os
-import time
-
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-from ansible.module_utils.six import string_types
-from ansible.module_utils.basic import to_text
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import compare_policies, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
-from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, AWSRetry
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def create_or_update_bucket(s3_client, module, location):
-
- policy = module.params.get("policy")
- name = module.params.get("name")
- requester_pays = module.params.get("requester_pays")
- tags = module.params.get("tags")
- purge_tags = module.params.get("purge_tags")
- versioning = module.params.get("versioning")
- encryption = module.params.get("encryption")
- encryption_key_id = module.params.get("encryption_key_id")
- changed = False
- result = {}
-
- try:
- bucket_is_present = bucket_exists(s3_client, name)
- except EndpointConnectionError as e:
- module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to check bucket presence")
-
- if not bucket_is_present:
- try:
- bucket_changed = create_bucket(s3_client, name, location)
- s3_client.get_waiter('bucket_exists').wait(Bucket=name)
- changed = changed or bucket_changed
- except WaiterError as e:
- module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available')
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed while creating bucket")
-
- # Versioning
- try:
- versioning_status = get_bucket_versioning(s3_client, name)
- except BotoCoreError as exp:
- module.fail_json_aws(exp, msg="Failed to get bucket versioning")
- except ClientError as exp:
- if exp.response['Error']['Code'] != 'NotImplemented' or versioning is not None:
- module.fail_json_aws(exp, msg="Failed to get bucket versioning")
- else:
- if versioning is not None:
- required_versioning = None
- if versioning and versioning_status.get('Status') != "Enabled":
- required_versioning = 'Enabled'
- elif not versioning and versioning_status.get('Status') == "Enabled":
- required_versioning = 'Suspended'
-
- if required_versioning:
- try:
- put_bucket_versioning(s3_client, name, required_versioning)
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to update bucket versioning")
-
- versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
-
- # This output format is there to ensure compatibility with previous versions of the module
- result['versioning'] = {
- 'Versioning': versioning_status.get('Status', 'Disabled'),
- 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'),
- }
-
- # Requester pays
- try:
- requester_pays_status = get_bucket_request_payment(s3_client, name)
- except BotoCoreError as exp:
- module.fail_json_aws(exp, msg="Failed to get bucket request payment")
- except ClientError as exp:
- if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or requester_pays:
- module.fail_json_aws(exp, msg="Failed to get bucket request payment")
- else:
- if requester_pays:
- payer = 'Requester' if requester_pays else 'BucketOwner'
- if requester_pays_status != payer:
- put_bucket_request_payment(s3_client, name, payer)
- requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False)
- if requester_pays_status is None:
- # We have seen that it happens quite a lot of times that the put request was not taken into
- # account, so we retry one more time
- put_bucket_request_payment(s3_client, name, payer)
- requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
- changed = True
-
- result['requester_pays'] = requester_pays
-
- # Policy
- try:
- current_policy = get_bucket_policy(s3_client, name)
- except BotoCoreError as exp:
- module.fail_json_aws(exp, msg="Failed to get bucket policy")
- except ClientError as exp:
- if exp.response['Error']['Code'] != 'NotImplemented' or policy is not None:
- module.fail_json_aws(exp, msg="Failed to get bucket policy")
- else:
- if policy is not None:
- if isinstance(policy, string_types):
- policy = json.loads(policy)
-
- if not policy and current_policy:
- try:
- delete_bucket_policy(s3_client, name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to delete bucket policy")
- current_policy = wait_policy_is_applied(module, s3_client, name, policy)
- changed = True
- elif compare_policies(current_policy, policy):
- try:
- put_bucket_policy(s3_client, name, policy)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to update bucket policy")
- current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False)
- if current_policy is None:
- # As for request payement, it happens quite a lot of times that the put request was not taken into
- # account, so we retry one more time
- put_bucket_policy(s3_client, name, policy)
- current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
- changed = True
-
- result['policy'] = current_policy
-
- # Tags
- try:
- current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
- except BotoCoreError as exp:
- module.fail_json_aws(exp, msg="Failed to get bucket tags")
- except ClientError as exp:
- if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or tags is not None:
- module.fail_json_aws(exp, msg="Failed to get bucket tags")
- else:
- if tags is not None:
- # Tags are always returned as text
- tags = dict((to_text(k), to_text(v)) for k, v in tags.items())
- if not purge_tags:
- # Ensure existing tags that aren't updated by desired tags remain
- current_copy = current_tags_dict.copy()
- current_copy.update(tags)
- tags = current_copy
- if current_tags_dict != tags:
- if tags:
- try:
- put_bucket_tagging(s3_client, name, tags)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to update bucket tags")
- else:
- if purge_tags:
- try:
- delete_bucket_tagging(s3_client, name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to delete bucket tags")
- current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags)
- changed = True
-
- result['tags'] = current_tags_dict
-
- # Encryption
- try:
- current_encryption = get_bucket_encryption(s3_client, name)
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to get bucket encryption")
-
- if encryption is not None:
- current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None
- current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None
- if encryption == 'none' and current_encryption_algorithm is not None:
- try:
- delete_bucket_encryption(s3_client, name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to delete bucket encryption")
- current_encryption = wait_encryption_is_applied(module, s3_client, name, None)
- changed = True
- elif encryption != 'none' and (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id):
- expected_encryption = {'SSEAlgorithm': encryption}
- if encryption == 'aws:kms' and encryption_key_id is not None:
- expected_encryption.update({'KMSMasterKeyID': encryption_key_id})
- current_encryption = put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption)
- changed = True
-
- result['encryption'] = current_encryption
-
- module.exit_json(changed=changed, name=name, **result)
-
-
-def bucket_exists(s3_client, bucket_name):
- # head_bucket appeared to be really inconsistent, so we use list_buckets instead,
- # and loop over all the buckets, even if we know it's less performant :(
- all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets']
- return any(bucket['Name'] == bucket_name for bucket in all_buckets)
-
-
-@AWSRetry.exponential_backoff(max_delay=120)
-def create_bucket(s3_client, bucket_name, location):
- try:
- configuration = {}
- if location not in ('us-east-1', None):
- configuration['LocationConstraint'] = location
- if len(configuration) > 0:
- s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
- else:
- s3_client.create_bucket(Bucket=bucket_name)
- return True
- except ClientError as e:
- error_code = e.response['Error']['Code']
- if error_code == 'BucketAlreadyOwnedByYou':
- # We should never get there since we check the bucket presence before calling the create_or_update_bucket
- # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception
- return False
- else:
- raise e
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def put_bucket_tagging(s3_client, bucket_name, tags):
- s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def put_bucket_policy(s3_client, bucket_name, policy):
- s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def delete_bucket_policy(s3_client, bucket_name):
- s3_client.delete_bucket_policy(Bucket=bucket_name)
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def get_bucket_policy(s3_client, bucket_name):
- try:
- current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy'))
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchBucketPolicy':
- current_policy = None
- else:
- raise e
- return current_policy
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def put_bucket_request_payment(s3_client, bucket_name, payer):
- s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer})
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def get_bucket_request_payment(s3_client, bucket_name):
- return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer')
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def get_bucket_versioning(s3_client, bucket_name):
- return s3_client.get_bucket_versioning(Bucket=bucket_name)
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def put_bucket_versioning(s3_client, bucket_name, required_versioning):
- s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning})
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def get_bucket_encryption(s3_client, bucket_name):
- if not hasattr(s3_client, "get_bucket_encryption"):
- return None
-
- try:
- result = s3_client.get_bucket_encryption(Bucket=bucket_name)
- return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault')
- except ClientError as e:
- if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
- return None
- else:
- raise e
- except (IndexError, KeyError):
- return None
-
-
-def put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption):
- max_retries = 3
- for retries in range(1, max_retries + 1):
- try:
- put_bucket_encryption(s3_client, name, expected_encryption)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to set bucket encryption")
- current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption,
- should_fail=(retries == max_retries), retries=5)
- if current_encryption == expected_encryption:
- return current_encryption
-
- # We shouldn't get here, the only time this should happen is if
- # current_encryption != expected_encryption and retries == max_retries
- # Which should use module.fail_json and fail out first.
- module.fail_json(msg='Failed to apply bucket encryption',
- current=current_encryption, expected=expected_encryption, retries=retries)
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def put_bucket_encryption(s3_client, bucket_name, encryption):
- server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]}
- s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration)
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def delete_bucket_tagging(s3_client, bucket_name):
- s3_client.delete_bucket_tagging(Bucket=bucket_name)
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
-def delete_bucket_encryption(s3_client, bucket_name):
- s3_client.delete_bucket_encryption(Bucket=bucket_name)
-
-
-@AWSRetry.exponential_backoff(max_delay=240, catch_extra_error_codes=['OperationAborted'])
-def delete_bucket(s3_client, bucket_name):
- try:
- s3_client.delete_bucket(Bucket=bucket_name)
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchBucket':
- # This means bucket should have been in a deleting state when we checked it existence
- # We just ignore the error
- pass
- else:
- raise e
-
-
-def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
- for dummy in range(0, 12):
- try:
- current_policy = get_bucket_policy(s3_client, bucket_name)
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to get bucket policy")
-
- if compare_policies(current_policy, expected_policy):
- time.sleep(5)
- else:
- return current_policy
- if should_fail:
- module.fail_json(msg="Bucket policy failed to apply in the expected time",
- requested_policy=expected_policy, live_policy=current_policy)
- else:
- return None
-
-
-def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
- for dummy in range(0, 12):
- try:
- requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to get bucket request payment")
- if requester_pays_status != expected_payer:
- time.sleep(5)
- else:
- return requester_pays_status
- if should_fail:
- module.fail_json(msg="Bucket request payment failed to apply in the expected time",
- requested_status=expected_payer, live_status=requester_pays_status)
- else:
- return None
-
-
-def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption, should_fail=True, retries=12):
- for dummy in range(0, retries):
- try:
- encryption = get_bucket_encryption(s3_client, bucket_name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to get updated encryption for bucket")
- if encryption != expected_encryption:
- time.sleep(5)
- else:
- return encryption
-
- if should_fail:
- module.fail_json(msg="Bucket encryption failed to apply in the expected time",
- requested_encryption=expected_encryption, live_encryption=encryption)
-
- return encryption
-
-
-def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
- for dummy in range(0, 24):
- try:
- versioning_status = get_bucket_versioning(s3_client, bucket_name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to get updated versioning for bucket")
- if versioning_status.get('Status') != required_versioning:
- time.sleep(8)
- else:
- return versioning_status
- module.fail_json(msg="Bucket versioning failed to apply in the expected time",
- requested_versioning=required_versioning, live_versioning=versioning_status)
-
-
-def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
- for dummy in range(0, 12):
- try:
- current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to get bucket policy")
- if current_tags_dict != expected_tags_dict:
- time.sleep(5)
- else:
- return current_tags_dict
- module.fail_json(msg="Bucket tags failed to apply in the expected time",
- requested_tags=expected_tags_dict, live_tags=current_tags_dict)
-
-
-def get_current_bucket_tags_dict(s3_client, bucket_name):
- try:
- current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet')
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchTagSet':
- return {}
- raise e
-
- return boto3_tag_list_to_ansible_dict(current_tags)
-
-
-def paginated_list(s3_client, **pagination_params):
- pg = s3_client.get_paginator('list_objects_v2')
- for page in pg.paginate(**pagination_params):
- yield [data['Key'] for data in page.get('Contents', [])]
-
-
-def paginated_versions_list(s3_client, **pagination_params):
- try:
- pg = s3_client.get_paginator('list_object_versions')
- for page in pg.paginate(**pagination_params):
- # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion
- yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))]
- except is_boto3_error_code('NoSuchBucket'):
- yield []
-
-
-def destroy_bucket(s3_client, module):
-
- force = module.params.get("force")
- name = module.params.get("name")
- try:
- bucket_is_present = bucket_exists(s3_client, name)
- except EndpointConnectionError as e:
- module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to check bucket presence")
-
- if not bucket_is_present:
- module.exit_json(changed=False)
-
- if force:
- # if there are contents then we need to delete them (including versions) before we can delete the bucket
- try:
- for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
- formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs]
- for fk in formatted_keys:
- # remove VersionId from cases where they are `None` so that
- # unversioned objects are deleted using `DeleteObject`
- # rather than `DeleteObjectVersion`, improving backwards
- # compatibility with older IAM policies.
- if not fk.get('VersionId'):
- fk.pop('VersionId')
-
- if formatted_keys:
- resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys})
- if resp.get('Errors'):
- module.fail_json(
- msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format(
- ', '.join([k['Key'] for k in resp['Errors']])
- ),
- errors=resp['Errors'], response=resp
- )
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed while deleting bucket")
-
- try:
- delete_bucket(s3_client, name)
- s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60))
- except WaiterError as e:
- module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.')
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to delete bucket")
-
- module.exit_json(changed=True)
-
-
-def is_fakes3(s3_url):
- """ Return True if s3_url has scheme fakes3:// """
- if s3_url is not None:
- return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
- else:
- return False
-
-
-def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
- if s3_url and ceph: # TODO - test this
- ceph = urlparse(s3_url)
- params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
- elif is_fakes3(s3_url):
- fakes3 = urlparse(s3_url)
- port = fakes3.port
- if fakes3.scheme == 'fakes3s':
- protocol = "https"
- if port is None:
- port = 443
- else:
- protocol = "http"
- if port is None:
- port = 80
- params = dict(module=module, conn_type='client', resource='s3', region=location,
- endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
- use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
- else:
- params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
- return boto3_conn(**params)
-
-
-def main():
-
- argument_spec = dict(
- force=dict(default=False, type='bool'),
- policy=dict(type='json'),
- name=dict(required=True),
- requester_pays=dict(default=False, type='bool'),
- s3_url=dict(aliases=['S3_URL']),
- state=dict(default='present', choices=['present', 'absent']),
- tags=dict(type='dict'),
- purge_tags=dict(type='bool', default=True),
- versioning=dict(type='bool'),
- ceph=dict(default=False, type='bool'),
- encryption=dict(choices=['none', 'AES256', 'aws:kms']),
- encryption_key_id=dict()
- )
-
- required_by = dict(
- encryption_key_id=('encryption',),
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec, required_by=required_by
- )
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
-
- if region in ('us-east-1', '', None):
- # default to US Standard region
- location = 'us-east-1'
- else:
- # Boto uses symbolic names for locations but region strings will
- # actually work fine for everything except us-east-1 (US Standard)
- location = region
-
- s3_url = module.params.get('s3_url')
- ceph = module.params.get('ceph')
-
- # allow eucarc environment variables to be used if ansible vars aren't set
- if not s3_url and 'S3_URL' in os.environ:
- s3_url = os.environ['S3_URL']
-
- if ceph and not s3_url:
- module.fail_json(msg='ceph flavour requires s3_url')
-
- # Look at s3_url and tweak connection settings
- # if connecting to Ceph RGW, Walrus or fakes3
- if s3_url:
- for key in ['validate_certs', 'security_token', 'profile_name']:
- aws_connect_kwargs.pop(key, None)
- s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)
-
- if s3_client is None: # this should never happen
- module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')
-
- state = module.params.get("state")
- encryption = module.params.get("encryption")
- encryption_key_id = module.params.get("encryption_key_id")
-
- if not hasattr(s3_client, "get_bucket_encryption"):
- if encryption is not None:
- module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41")
-
- # Parameter validation
- if encryption_key_id is not None and encryption != 'aws:kms':
- module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")
-
- if state == 'present':
- create_or_update_bucket(s3_client, module, location)
- elif state == 'absent':
- destroy_bucket(s3_client, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/plugins/action/aws_s3.py b/lib/ansible/plugins/action/aws_s3.py
deleted file mode 100644
index a454922a10..0000000000
--- a/lib/ansible/plugins/action/aws_s3.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# (c) 2018, Will Thames <will@thames.id.au>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-
-from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound
-from ansible.module_utils._text import to_text
-from ansible.plugins.action import ActionBase
-from ansible.utils.vars import merge_hash
-
-
-class ActionModule(ActionBase):
-
- TRANSFERS_FILES = True
-
- def run(self, tmp=None, task_vars=None):
- ''' handler for aws_s3 operations '''
- self._supports_async = True
-
- if task_vars is None:
- task_vars = dict()
-
- result = super(ActionModule, self).run(tmp, task_vars)
- del tmp # tmp no longer has any effect
-
- source = self._task.args.get('src', None)
-
- try:
- new_module_args = self._task.args.copy()
- if source:
- source = os.path.expanduser(source)
-
- # For backward compatibility check if the file exists on the remote; it should take precedence
- if not self._remote_file_exists(source):
- try:
- source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False)
- new_module_args['src'] = source
- except AnsibleFileNotFound as e:
- # module handles error message for nonexistent files
- new_module_args['src'] = source
- except AnsibleError as e:
- raise AnsibleActionFail(to_text(e))
-
- wrap_async = self._task.async_val and not self._connection.has_native_async
- # execute the aws_s3 module with the updated args
- result = merge_hash(result, self._execute_module(module_args=new_module_args, task_vars=task_vars, wrap_async=wrap_async))
-
- if not wrap_async:
- # remove a temporary path we created
- self._remove_tmp_path(self._connection._shell.tmpdir)
-
- except AnsibleAction as e:
- result.update(e.result)
- return result
diff --git a/lib/ansible/plugins/callback/aws_resource_actions.py b/lib/ansible/plugins/callback/aws_resource_actions.py
deleted file mode 100644
index f871fe5479..0000000000
--- a/lib/ansible/plugins/callback/aws_resource_actions.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# (C) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- callback: aws_resource_actions
- type: aggregate
- short_description: summarizes all "resource:actions" completed
- version_added: "2.8"
- description:
- - Ansible callback plugin for collecting the AWS actions completed by all boto3 modules using
- AnsibleAWSModule in a playbook. Botocore endpoint logs need to be enabled for those modules, which can
- be done easily by setting debug_botocore_endpoint_logs to True for group/aws using module_defaults.
- requirements:
- - whitelisting in configuration - see examples section below for details.
-'''
-
-EXAMPLES = '''
-example: >
- To enable, add this to your ansible.cfg file in the defaults block
- [defaults]
- callback_whitelist = aws_resource_actions
-sample output: >
-#
-# AWS ACTIONS: ['s3:PutBucketAcl', 's3:HeadObject', 's3:DeleteObject', 's3:PutObjectAcl', 's3:CreateMultipartUpload',
-# 's3:DeleteBucket', 's3:GetObject', 's3:DeleteObjects', 's3:CreateBucket', 's3:CompleteMultipartUpload',
-# 's3:ListObjectsV2', 's3:HeadBucket', 's3:UploadPart', 's3:PutObject']
-#
-sample output: >
-#
-# AWS ACTIONS: ['ec2:DescribeVpcAttribute', 'ec2:DescribeVpcClassicLink', 'ec2:ModifyVpcAttribute', 'ec2:CreateTags',
-# 'sts:GetCallerIdentity', 'ec2:DescribeSecurityGroups', 'ec2:DescribeTags', 'ec2:DescribeVpcs', 'ec2:CreateVpc']
-#
-'''
-
-from ansible.plugins.callback import CallbackBase
-from ansible.module_utils._text import to_native
-
-
-class CallbackModule(CallbackBase):
- CALLBACK_VERSION = 2.8
- CALLBACK_TYPE = 'aggregate'
- CALLBACK_NAME = 'aws_resource_actions'
- CALLBACK_NEEDS_WHITELIST = True
-
- def __init__(self):
- self.aws_resource_actions = []
- super(CallbackModule, self).__init__()
-
- def extend_aws_resource_actions(self, result):
- if result.get('resource_actions'):
- self.aws_resource_actions.extend(result['resource_actions'])
-
- def runner_on_ok(self, host, res):
- self.extend_aws_resource_actions(res)
-
- def runner_on_failed(self, host, res, ignore_errors=False):
- self.extend_aws_resource_actions(res)
-
- def v2_runner_item_on_ok(self, result):
- self.extend_aws_resource_actions(result._result)
-
- def v2_runner_item_on_failed(self, result):
- self.extend_aws_resource_actions(result._result)
-
- def playbook_on_stats(self, stats):
- if self.aws_resource_actions:
- self.aws_resource_actions = sorted(list(to_native(action) for action in set(self.aws_resource_actions)))
- self._display.display("AWS ACTIONS: {0}".format(self.aws_resource_actions))
diff --git a/lib/ansible/plugins/doc_fragments/aws.py b/lib/ansible/plugins/doc_fragments/aws.py
deleted file mode 100644
index 668955196f..0000000000
--- a/lib/ansible/plugins/doc_fragments/aws.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2014, Will Thames <will@thames.id.au>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-class ModuleDocFragment(object):
-
- # AWS only documentation fragment
- DOCUMENTATION = r'''
-options:
- debug_botocore_endpoint_logs:
- description:
- - Use a botocore.endpoint logger to parse the unique (rather than total) "resource:action" API calls made during a task, outputing
- the set to the resource_actions key in the task results. Use the aws_resource_action callback to output to total list made during
- a playbook. The ANSIBLE_DEBUG_BOTOCORE_LOGS environment variable may also be used.
- type: bool
- default: 'no'
- version_added: "2.8"
- ec2_url:
- description:
- - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints).
- Ignored for modules where region is required. Must be specified for all other modules if region is not used.
- If not set then the value of the EC2_URL environment variable, if any, is used.
- type: str
- aws_secret_key:
- description:
- - AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used.
- type: str
- aliases: [ ec2_secret_key, secret_key ]
- aws_access_key:
- description:
- - AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used.
- type: str
- aliases: [ ec2_access_key, access_key ]
- security_token:
- description:
- - AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used.
- type: str
- aliases: [ access_token ]
- version_added: "1.6"
- validate_certs:
- description:
- - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
- type: bool
- default: yes
- version_added: "1.5"
- profile:
- description:
- - Uses a boto profile. Only works with boto >= 2.24.0.
- type: str
- version_added: "1.6"
- aws_config:
- description:
- - A dictionary to modify the botocore configuration.
- - Parameters can be found at U(https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config).
- - Only the 'user_agent' key is used for boto modules. See U(http://boto.cloudhackers.com/en/latest/boto_config_tut.html#boto) for more boto configuration.
- type: dict
- version_added: "2.10"
-requirements:
- - python >= 2.6
- - boto
-notes:
- - If parameters are not set within the module, the following
- environment variables can be used in decreasing order of precedence
- C(AWS_URL) or C(EC2_URL),
- C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY),
- C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY),
- C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN),
- C(AWS_REGION) or C(EC2_REGION)
- - Ansible uses the boto configuration file (typically ~/.boto) if no
- credentials are provided. See https://boto.readthedocs.io/en/latest/boto_config_tut.html
- - C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
- AWS region, when required, but this can also be configured in the boto config file
-'''
diff --git a/lib/ansible/plugins/doc_fragments/aws_credentials.py b/lib/ansible/plugins/doc_fragments/aws_credentials.py
deleted file mode 100644
index ef37ca1932..0000000000
--- a/lib/ansible/plugins/doc_fragments/aws_credentials.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-class ModuleDocFragment(object):
-
- # Plugin options for AWS credentials
- DOCUMENTATION = r'''
-options:
- aws_profile:
- description: The AWS profile
- type: str
- aliases: [ boto_profile ]
- env:
- - name: AWS_DEFAULT_PROFILE
- - name: AWS_PROFILE
- aws_access_key:
- description: The AWS access key to use.
- type: str
- aliases: [ aws_access_key_id ]
- env:
- - name: EC2_ACCESS_KEY
- - name: AWS_ACCESS_KEY
- - name: AWS_ACCESS_KEY_ID
- aws_secret_key:
- description: The AWS secret key that corresponds to the access key.
- type: str
- aliases: [ aws_secret_access_key ]
- env:
- - name: EC2_SECRET_KEY
- - name: AWS_SECRET_KEY
- - name: AWS_SECRET_ACCESS_KEY
- aws_security_token:
- description: The AWS security token if using temporary access and secret keys.
- type: str
- env:
- - name: EC2_SECURITY_TOKEN
- - name: AWS_SESSION_TOKEN
- - name: AWS_SECURITY_TOKEN
-'''
diff --git a/lib/ansible/plugins/doc_fragments/aws_region.py b/lib/ansible/plugins/doc_fragments/aws_region.py
deleted file mode 100644
index e214d78a2e..0000000000
--- a/lib/ansible/plugins/doc_fragments/aws_region.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-class ModuleDocFragment(object):
-
- # Plugin option for AWS region
- DOCUMENTATION = r'''
-options:
- region:
- description: The region for which to create the connection.
- type: str
- env:
- - name: EC2_REGION
- - name: AWS_REGION
-'''
diff --git a/lib/ansible/plugins/doc_fragments/ec2.py b/lib/ansible/plugins/doc_fragments/ec2.py
deleted file mode 100644
index 0ec0cf7a8d..0000000000
--- a/lib/ansible/plugins/doc_fragments/ec2.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2015, Ansible, Inc
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-class ModuleDocFragment(object):
-
- # EC2 only documentation fragment
- DOCUMENTATION = r'''
-options:
- region:
- description:
- - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
- See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
- type: str
- aliases: [ aws_region, ec2_region ]
-'''
diff --git a/lib/ansible/plugins/inventory/aws_ec2.py b/lib/ansible/plugins/inventory/aws_ec2.py
deleted file mode 100644
index 5f75795616..0000000000
--- a/lib/ansible/plugins/inventory/aws_ec2.py
+++ /dev/null
@@ -1,659 +0,0 @@
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: aws_ec2
- plugin_type: inventory
- short_description: EC2 inventory source
- requirements:
- - boto3
- - botocore
- extends_documentation_fragment:
- - inventory_cache
- - constructed
- - aws_credentials
- description:
- - Get inventory hosts from Amazon Web Services EC2.
- - Uses a YAML configuration file that ends with C(aws_ec2.(yml|yaml)).
- notes:
- - If no credentials are provided and the control node has an associated IAM instance profile then the
- role will be used for authentication.
- author:
- - Sloane Hertel (@s-hertel)
- options:
- plugin:
- description: Token that ensures this is a source file for the plugin.
- required: True
- choices: ['aws_ec2']
- iam_role_arn:
- description: The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS
- credentials with enough privilege to perform the AssumeRole action.
- version_added: '2.9'
- regions:
- description:
- - A list of regions in which to describe EC2 instances.
- - If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1.
- type: list
- default: []
- hostnames:
- description:
- - A list in order of precedence for hostname variables.
- - You can use the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
- - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag.
- type: list
- default: []
- filters:
- description:
- - A dictionary of filter value pairs.
- - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
- type: dict
- default: {}
- include_extra_api_calls:
- description:
- - Add two additional API calls for every instance to include 'persistent' and 'events' host variables.
- - Spot instances may be persistent and instances may have associated events.
- type: bool
- default: False
- version_added: '2.8'
- strict_permissions:
- description:
- - By default if a 403 (Forbidden) error code is encountered this plugin will fail.
- - You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped.
- type: bool
- default: True
- use_contrib_script_compatible_sanitization:
- description:
- - By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
- This option allows you to override that, in efforts to allow migration from the old inventory script and
- matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``.
- To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups,
- you will need to replace hyphens with underscores via the regex_replace filter for those entries.
- - For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
- otherwise the core engine will just use the standard sanitization on top.
- - This is not the default as such names break certain functionality as not all characters are valid Python identifiers
- which group names end up being used as.
- type: bool
- default: False
- version_added: '2.8'
-'''
-
-EXAMPLES = '''
-# Minimal example using environment vars or instance role credentials
-# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address
-plugin: aws_ec2
-regions:
- - us-east-1
-
-# Example using filters, ignoring permission errors, and specifying the hostname precedence
-plugin: aws_ec2
-boto_profile: aws_profile
-# Populate inventory with instances in these regions
-regions:
- - us-east-1
- - us-east-2
-filters:
- # All instances with their `Environment` tag set to `dev`
- tag:Environment: dev
- # All dev and QA hosts
- tag:Environment:
- - dev
- - qa
- instance.group-id: sg-xxxxxxxx
-# Ignores 403 errors rather than failing
-strict_permissions: False
-# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying
-# inventory_hostname use compose (see example below).
-hostnames:
- - tag:Name=Tag1,Name=Tag2 # Return specific hosts only
- - tag:CustomDNSName
- - dns-name
- - private-ip-address
-
-# Example using constructed features to create groups and set ansible_host
-plugin: aws_ec2
-regions:
- - us-east-1
- - us-west-1
-# keyed_groups may be used to create custom groups
-strict: False
-keyed_groups:
- # Add e.g. x86_64 hosts to an arch_x86_64 group
- - prefix: arch
- key: 'architecture'
- # Add hosts to tag_Name_Value groups for each Name/Value tag pair
- - prefix: tag
- key: tags
- # Add hosts to e.g. instance_type_z3_tiny
- - prefix: instance_type
- key: instance_type
- # Create security_groups_sg_abcd1234 group for each SG
- - key: 'security_groups|json_query("[].group_id")'
- prefix: 'security_groups'
- # Create a group for each value of the Application tag
- - key: tags.Application
- separator: ''
- # Create a group per region e.g. aws_region_us_east_2
- - key: placement.region
- prefix: aws_region
- # Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project"
- - key: tags['Role']
- prefix: foo
- parent_group: "project"
-# Set individual variables with compose
-compose:
- # Use the private IP address to connect to the host
- # (note: this does not modify inventory_hostname, which is set via I(hostnames))
- ansible_host: private_ip_address
-'''
-
-import re
-
-from ansible.errors import AnsibleError
-from ansible.module_utils._text import to_native, to_text
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-from ansible.utils.display import Display
-
-try:
- import boto3
- import botocore
-except ImportError:
- raise AnsibleError('The ec2 dynamic inventory plugin requires boto3 and botocore.')
-
-display = Display()
-
-# The mappings give an array of keys to get from the filter name to the value
-# returned by boto3's EC2 describe_instances method.
-
-instance_meta_filter_to_boto_attr = {
- 'group-id': ('Groups', 'GroupId'),
- 'group-name': ('Groups', 'GroupName'),
- 'network-interface.attachment.instance-owner-id': ('OwnerId',),
- 'owner-id': ('OwnerId',),
- 'requester-id': ('RequesterId',),
- 'reservation-id': ('ReservationId',),
-}
-
-instance_data_filter_to_boto_attr = {
- 'affinity': ('Placement', 'Affinity'),
- 'architecture': ('Architecture',),
- 'availability-zone': ('Placement', 'AvailabilityZone'),
- 'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'),
- 'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'),
- 'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'),
- 'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'),
- 'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'),
- 'client-token': ('ClientToken',),
- 'dns-name': ('PublicDnsName',),
- 'host-id': ('Placement', 'HostId'),
- 'hypervisor': ('Hypervisor',),
- 'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'),
- 'image-id': ('ImageId',),
- 'instance-id': ('InstanceId',),
- 'instance-lifecycle': ('InstanceLifecycle',),
- 'instance-state-code': ('State', 'Code'),
- 'instance-state-name': ('State', 'Name'),
- 'instance-type': ('InstanceType',),
- 'instance.group-id': ('SecurityGroups', 'GroupId'),
- 'instance.group-name': ('SecurityGroups', 'GroupName'),
- 'ip-address': ('PublicIpAddress',),
- 'kernel-id': ('KernelId',),
- 'key-name': ('KeyName',),
- 'launch-index': ('AmiLaunchIndex',),
- 'launch-time': ('LaunchTime',),
- 'monitoring-state': ('Monitoring', 'State'),
- 'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'),
- 'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'),
- 'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'),
- 'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'),
- 'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'),
- 'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
- 'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'),
- 'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'),
- 'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'),
- 'network-interface.attachment.instance-id': ('InstanceId',),
- 'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'),
- 'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'),
- 'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'),
- 'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'),
- 'network-interface.availability-zone': ('Placement', 'AvailabilityZone'),
- 'network-interface.description': ('NetworkInterfaces', 'Description'),
- 'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'),
- 'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'),
- 'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'),
- 'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'),
- 'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'),
- 'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'),
- 'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'),
- # 'network-interface.requester-id': (),
- 'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
- 'network-interface.status': ('NetworkInterfaces', 'Status'),
- 'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'),
- 'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'),
- 'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'),
- 'placement-group-name': ('Placement', 'GroupName'),
- 'platform': ('Platform',),
- 'private-dns-name': ('PrivateDnsName',),
- 'private-ip-address': ('PrivateIpAddress',),
- 'product-code': ('ProductCodes', 'ProductCodeId'),
- 'product-code.type': ('ProductCodes', 'ProductCodeType'),
- 'ramdisk-id': ('RamdiskId',),
- 'reason': ('StateTransitionReason',),
- 'root-device-name': ('RootDeviceName',),
- 'root-device-type': ('RootDeviceType',),
- 'source-dest-check': ('SourceDestCheck',),
- 'spot-instance-request-id': ('SpotInstanceRequestId',),
- 'state-reason-code': ('StateReason', 'Code'),
- 'state-reason-message': ('StateReason', 'Message'),
- 'subnet-id': ('SubnetId',),
- 'tag': ('Tags',),
- 'tag-key': ('Tags',),
- 'tag-value': ('Tags',),
- 'tenancy': ('Placement', 'Tenancy'),
- 'virtualization-type': ('VirtualizationType',),
- 'vpc-id': ('VpcId',),
-}
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
-
- NAME = 'aws_ec2'
-
- def __init__(self):
- super(InventoryModule, self).__init__()
-
- self.group_prefix = 'aws_ec2_'
-
- # credentials
- self.boto_profile = None
- self.aws_secret_access_key = None
- self.aws_access_key_id = None
- self.aws_security_token = None
- self.iam_role_arn = None
-
- def _compile_values(self, obj, attr):
- '''
- :param obj: A list or dict of instance attributes
- :param attr: A key
- :return The value(s) found via the attr
- '''
- if obj is None:
- return
-
- temp_obj = []
-
- if isinstance(obj, list) or isinstance(obj, tuple):
- for each in obj:
- value = self._compile_values(each, attr)
- if value:
- temp_obj.append(value)
- else:
- temp_obj = obj.get(attr)
-
- has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)])
- if has_indexes and len(temp_obj) == 1:
- return temp_obj[0]
-
- return temp_obj
-
- def _get_boto_attr_chain(self, filter_name, instance):
- '''
- :param filter_name: The filter
- :param instance: instance dict returned by boto3 ec2 describe_instances()
- '''
- allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()))
- if filter_name not in allowed_filters:
- raise AnsibleError("Invalid filter '%s' provided; filter must be one of %s." % (filter_name,
- allowed_filters))
- if filter_name in instance_data_filter_to_boto_attr:
- boto_attr_list = instance_data_filter_to_boto_attr[filter_name]
- else:
- boto_attr_list = instance_meta_filter_to_boto_attr[filter_name]
-
- instance_value = instance
- for attribute in boto_attr_list:
- instance_value = self._compile_values(instance_value, attribute)
- return instance_value
-
- def _get_credentials(self):
- '''
- :return A dictionary of boto client credentials
- '''
- boto_params = {}
- for credential in (('aws_access_key_id', self.aws_access_key_id),
- ('aws_secret_access_key', self.aws_secret_access_key),
- ('aws_session_token', self.aws_security_token)):
- if credential[1]:
- boto_params[credential[0]] = credential[1]
-
- return boto_params
-
- def _get_connection(self, credentials, region='us-east-1'):
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- if self.boto_profile:
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- else:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- return connection
-
- def _boto3_assume_role(self, credentials, region):
- """
- Assume an IAM role passed by iam_role_arn parameter
-
- :return: a dict containing the credentials of the assumed role
- """
-
- iam_role_arn = self.iam_role_arn
-
- try:
- sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
- sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory')
- return dict(
- aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
- aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
- aws_session_token=sts_session['Credentials']['SessionToken']
- )
- except botocore.exceptions.ClientError as e:
- raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
-
- def _boto3_conn(self, regions):
- '''
- :param regions: A list of regions to create a boto3 client
-
- Generator that yields a boto3 client and the region
- '''
-
- credentials = self._get_credentials()
- iam_role_arn = self.iam_role_arn
-
- if not regions:
- try:
- # as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html
- client = self._get_connection(credentials)
- resp = client.describe_regions()
- regions = [x['RegionName'] for x in resp.get('Regions', [])]
- except botocore.exceptions.NoRegionError:
- # above seems to fail depending on boto3 version, ignore and lets try something else
- pass
-
- # fallback to local list hardcoded in boto3 if still no regions
- if not regions:
- session = boto3.Session()
- regions = session.get_available_regions('ec2')
-
- # I give up, now you MUST give me regions
- if not regions:
- raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.')
-
- for region in regions:
- connection = self._get_connection(credentials, region)
- try:
- if iam_role_arn is not None:
- assumed_credentials = self._boto3_assume_role(credentials, region)
- else:
- assumed_credentials = credentials
- connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- if self.boto_profile:
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- else:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- yield connection, region
-
- def _get_instances_by_region(self, regions, filters, strict_permissions):
- '''
- :param regions: a list of regions in which to describe instances
- :param filters: a list of boto3 filter dictionaries
- :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
- :return A list of instance dictionaries
- '''
- all_instances = []
-
- for connection, region in self._boto3_conn(regions):
- try:
- # By default find non-terminated/terminating instances
- if not any([f['Name'] == 'instance-state-name' for f in filters]):
- filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']})
- paginator = connection.get_paginator('describe_instances')
- reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations')
- instances = []
- for r in reservations:
- new_instances = r['Instances']
- for instance in new_instances:
- instance.update(self._get_reservation_details(r))
- if self.get_option('include_extra_api_calls'):
- instance.update(self._get_event_set_and_persistence(connection, instance['InstanceId'], instance.get('SpotInstanceRequestId')))
- instances.extend(new_instances)
- except botocore.exceptions.ClientError as e:
- if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions:
- instances = []
- else:
- raise AnsibleError("Failed to describe instances: %s" % to_native(e))
- except botocore.exceptions.BotoCoreError as e:
- raise AnsibleError("Failed to describe instances: %s" % to_native(e))
-
- all_instances.extend(instances)
-
- return sorted(all_instances, key=lambda x: x['InstanceId'])
-
- def _get_reservation_details(self, reservation):
- return {
- 'OwnerId': reservation['OwnerId'],
- 'RequesterId': reservation.get('RequesterId', ''),
- 'ReservationId': reservation['ReservationId']
- }
-
- def _get_event_set_and_persistence(self, connection, instance_id, spot_instance):
- host_vars = {'Events': '', 'Persistent': False}
- try:
- kwargs = {'InstanceIds': [instance_id]}
- host_vars['Events'] = connection.describe_instance_status(**kwargs)['InstanceStatuses'][0].get('Events', '')
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- if not self.get_option('strict_permissions'):
- pass
- else:
- raise AnsibleError("Failed to describe instance status: %s" % to_native(e))
- if spot_instance:
- try:
- kwargs = {'SpotInstanceRequestIds': [spot_instance]}
- host_vars['Persistent'] = bool(
- connection.describe_spot_instance_requests(**kwargs)['SpotInstanceRequests'][0].get('Type') == 'persistent'
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- if not self.get_option('strict_permissions'):
- pass
- else:
- raise AnsibleError("Failed to describe spot instance requests: %s" % to_native(e))
- return host_vars
-
- def _get_tag_hostname(self, preference, instance):
- tag_hostnames = preference.split('tag:', 1)[1]
- if ',' in tag_hostnames:
- tag_hostnames = tag_hostnames.split(',')
- else:
- tag_hostnames = [tag_hostnames]
- tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', []))
- for v in tag_hostnames:
- if '=' in v:
- tag_name, tag_value = v.split('=')
- if tags.get(tag_name) == tag_value:
- return to_text(tag_name) + "_" + to_text(tag_value)
- else:
- tag_value = tags.get(v)
- if tag_value:
- return to_text(tag_value)
- return None
-
- def _get_hostname(self, instance, hostnames):
- '''
- :param instance: an instance dict returned by boto3 ec2 describe_instances()
- :param hostnames: a list of hostname destination variables in order of preference
- :return the preferred identifer for the host
- '''
- if not hostnames:
- hostnames = ['dns-name', 'private-dns-name']
-
- hostname = None
- for preference in hostnames:
- if 'tag' in preference:
- if not preference.startswith('tag:'):
- raise AnsibleError("To name a host by tags name_value, use 'tag:name=value'.")
- hostname = self._get_tag_hostname(preference, instance)
- else:
- hostname = self._get_boto_attr_chain(preference, instance)
- if hostname:
- break
- if hostname:
- if ':' in to_text(hostname):
- return self._sanitize_group_name((to_text(hostname)))
- else:
- return to_text(hostname)
-
- def _query(self, regions, filters, strict_permissions):
- '''
- :param regions: a list of regions to query
- :param filters: a list of boto3 filter dictionaries
- :param hostnames: a list of hostname destination variables in order of preference
- :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
- '''
- return {'aws_ec2': self._get_instances_by_region(regions, filters, strict_permissions)}
-
- def _populate(self, groups, hostnames):
- for group in groups:
- group = self.inventory.add_group(group)
- self._add_hosts(hosts=groups[group], group=group, hostnames=hostnames)
- self.inventory.add_child('all', group)
-
- def _add_hosts(self, hosts, group, hostnames):
- '''
- :param hosts: a list of hosts to be added to a group
- :param group: the name of the group to which the hosts belong
- :param hostnames: a list of hostname destination variables in order of preference
- '''
- for host in hosts:
- hostname = self._get_hostname(host, hostnames)
-
- host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
- host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
-
- # Allow easier grouping by region
- host['placement']['region'] = host['placement']['availability_zone'][:-1]
-
- if not hostname:
- continue
- self.inventory.add_host(hostname, group=group)
- for hostvar, hostval in host.items():
- self.inventory.set_variable(hostname, hostvar, hostval)
-
- # Use constructed if applicable
-
- strict = self.get_option('strict')
-
- # Composed variables
- self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
-
- # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
- self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
-
- # Create groups based on variable values and add the corresponding hosts to it
- self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
-
- def _set_credentials(self):
- '''
- :param config_data: contents of the inventory config file
- '''
-
- self.boto_profile = self.get_option('aws_profile')
- self.aws_access_key_id = self.get_option('aws_access_key')
- self.aws_secret_access_key = self.get_option('aws_secret_key')
- self.aws_security_token = self.get_option('aws_security_token')
- self.iam_role_arn = self.get_option('iam_role_arn')
-
- if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
- session = botocore.session.get_session()
- try:
- credentials = session.get_credentials().get_frozen_credentials()
- except AttributeError:
- pass
- else:
- self.aws_access_key_id = credentials.access_key
- self.aws_secret_access_key = credentials.secret_key
- self.aws_security_token = credentials.token
-
- if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
- raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
- "inventory configuration file or set them as environment variables.")
-
- def verify_file(self, path):
- '''
- :param loader: an ansible.parsing.dataloader.DataLoader object
- :param path: the path to the inventory config file
- :return the contents of the config file
- '''
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')):
- return True
- display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'")
- return False
-
- def parse(self, inventory, loader, path, cache=True):
-
- super(InventoryModule, self).parse(inventory, loader, path)
-
- self._read_config_data(path)
-
- if self.get_option('use_contrib_script_compatible_sanitization'):
- self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
-
- self._set_credentials()
-
- # get user specifications
- regions = self.get_option('regions')
- filters = ansible_dict_to_boto3_filter_list(self.get_option('filters'))
- hostnames = self.get_option('hostnames')
- strict_permissions = self.get_option('strict_permissions')
-
- cache_key = self.get_cache_key(path)
- # false when refresh_cache or --flush-cache is used
- if cache:
- # get the user-specified directive
- cache = self.get_option('cache')
-
- # Generate inventory
- cache_needs_update = False
- if cache:
- try:
- results = self._cache[cache_key]
- except KeyError:
- # if cache expires or cache file doesn't exist
- cache_needs_update = True
-
- if not cache or cache_needs_update:
- results = self._query(regions, filters, strict_permissions)
-
- self._populate(results, hostnames)
-
- # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
- # when the user is using caching, update the cached inventory
- if cache_needs_update or (not cache and self.get_option('cache')):
- self._cache[cache_key] = results
-
- @staticmethod
- def _legacy_script_compatible_group_sanitization(name):
-
- # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python
- regex = re.compile(r"[^A-Za-z0-9\_\-]")
-
- return regex.sub('_', name)
diff --git a/lib/ansible/plugins/inventory/aws_rds.py b/lib/ansible/plugins/inventory/aws_rds.py
deleted file mode 100644
index 6b89032473..0000000000
--- a/lib/ansible/plugins/inventory/aws_rds.py
+++ /dev/null
@@ -1,326 +0,0 @@
-# Copyright (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: aws_rds
- plugin_type: inventory
- short_description: rds instance source
- description:
- - Get instances and clusters from Amazon Web Services RDS.
- - Uses a YAML configuration file that ends with aws_rds.(yml|yaml).
- options:
- regions:
- description: A list of regions in which to describe RDS instances and clusters. Available regions are listed here
- U(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html)
- default: []
- filters:
- description: A dictionary of filter value pairs. Available filters are listed here
- U(https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-instances.html#options). If you filter by
- db-cluster-id and I(include_clusters) is True it will apply to clusters as well.
- default: {}
- strict_permissions:
- description: By default if an AccessDenied exception is encountered this plugin will fail. You can set strict_permissions to
- False in the inventory config file which will allow the restrictions to be gracefully skipped.
- type: bool
- default: True
- include_clusters:
- description: Whether or not to query for Aurora clusters as well as instances
- type: bool
- default: False
- statuses:
- description: A list of desired states for instances/clusters to be added to inventory. Set to ['all'] as a shorthand to find everything.
- type: list
- default:
- - creating
- - available
- extends_documentation_fragment:
- - inventory_cache
- - constructed
- - aws_credentials
- requirements:
- - boto3
- - botocore
- author: Sloane Hertel (@s-hertel)
-'''
-
-EXAMPLES = '''
-plugin: aws_rds
-regions:
- - us-east-1
- - ca-central-1
-keyed_groups:
- - key: 'db_parameter_groups|json_query("[].db_parameter_group_name")'
- prefix: rds_parameter_group
- - key: engine
- prefix: rds
- - key: tags
- - key: region
-'''
-
-from ansible.errors import AnsibleError
-from ansible.module_utils._text import to_native
-from ansible.module_utils.aws.core import is_boto3_error_code
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-
-try:
- import boto3
- import botocore
-except ImportError:
- raise AnsibleError('The RDS dynamic inventory plugin requires boto3 and botocore.')
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
-
- NAME = 'aws_rds'
-
- def __init__(self):
- super(InventoryModule, self).__init__()
- self.credentials = {}
- self.boto_profile = None
-
- def _boto3_conn(self, regions):
- '''
- :param regions: A list of regions to create a boto3 client
-
- Generator that yields a boto3 client and the region
- '''
- for region in regions:
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **self.credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- if self.boto_profile:
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- else:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- yield connection, region
-
- def _get_hosts_by_region(self, connection, filters, strict):
-
- def _add_tags_for_hosts(connection, hosts, strict):
- for host in hosts:
- if 'DBInstanceArn' in host:
- resource_arn = host['DBInstanceArn']
- else:
- resource_arn = host['DBClusterArn']
-
- try:
- tags = connection.list_tags_for_resource(ResourceName=resource_arn)['TagList']
- except is_boto3_error_code('AccessDenied') as e:
- if not strict:
- tags = []
- else:
- raise e
- host['Tags'] = tags
-
- def wrapper(f, *args, **kwargs):
- try:
- results = f(*args, **kwargs)
- if 'DBInstances' in results:
- results = results['DBInstances']
- else:
- results = results['DBClusters']
- _add_tags_for_hosts(connection, results, strict)
- except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except
- if not strict:
- results = []
- else:
- raise AnsibleError("Failed to query RDS: {0}".format(to_native(e)))
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
- raise AnsibleError("Failed to query RDS: {0}".format(to_native(e)))
- return results
- return wrapper
-
- def _get_all_hosts(self, regions, instance_filters, cluster_filters, strict, statuses, gather_clusters=False):
- '''
- :param regions: a list of regions in which to describe hosts
- :param instance_filters: a list of boto3 filter dictionaries
- :param cluster_filters: a list of boto3 filter dictionaries
- :param strict: a boolean determining whether to fail or ignore 403 error codes
- :param statuses: a list of statuses that the returned hosts should match
- :return A list of host dictionaries
- '''
- all_instances = []
- all_clusters = []
- for connection, region in self._boto3_conn(regions):
- paginator = connection.get_paginator('describe_db_instances')
- all_instances.extend(
- self._get_hosts_by_region(connection, instance_filters, strict)
- (paginator.paginate(Filters=instance_filters).build_full_result)
- )
- if gather_clusters:
- all_clusters.extend(
- self._get_hosts_by_region(connection, cluster_filters, strict)
- (connection.describe_db_clusters, **{'Filters': cluster_filters})
- )
- sorted_hosts = list(
- sorted(all_instances, key=lambda x: x['DBInstanceIdentifier']) +
- sorted(all_clusters, key=lambda x: x['DBClusterIdentifier'])
- )
- return self.find_hosts_with_valid_statuses(sorted_hosts, statuses)
-
- def find_hosts_with_valid_statuses(self, hosts, statuses):
- if 'all' in statuses:
- return hosts
- valid_hosts = []
- for host in hosts:
- if host.get('DBInstanceStatus') in statuses:
- valid_hosts.append(host)
- elif host.get('Status') in statuses:
- valid_hosts.append(host)
- return valid_hosts
-
- def _populate(self, hosts):
- group = 'aws_rds'
- self.inventory.add_group(group)
- if hosts:
- self._add_hosts(hosts=hosts, group=group)
- self.inventory.add_child('all', group)
-
- def _populate_from_source(self, source_data):
- hostvars = source_data.pop('_meta', {}).get('hostvars', {})
- for group in source_data:
- if group == 'all':
- continue
- else:
- self.inventory.add_group(group)
- hosts = source_data[group].get('hosts', [])
- for host in hosts:
- self._populate_host_vars([host], hostvars.get(host, {}), group)
- self.inventory.add_child('all', group)
-
- def _get_hostname(self, host):
- if host.get('DBInstanceIdentifier'):
- return host['DBInstanceIdentifier']
- else:
- return host['DBClusterIdentifier']
-
- def _format_inventory(self, hosts):
- results = {'_meta': {'hostvars': {}}}
- group = 'aws_rds'
- results[group] = {'hosts': []}
- for host in hosts:
- hostname = self._get_hostname(host)
- results[group]['hosts'].append(hostname)
- h = self.inventory.get_host(hostname)
- results['_meta']['hostvars'][h.name] = h.vars
- return results
-
- def _add_hosts(self, hosts, group):
- '''
- :param hosts: a list of hosts to be added to a group
- :param group: the name of the group to which the hosts belong
- '''
- for host in hosts:
- hostname = self._get_hostname(host)
- host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
- host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
-
- # Allow easier grouping by region
- if 'availability_zone' in host:
- host['region'] = host['availability_zone'][:-1]
- elif 'availability_zones' in host:
- host['region'] = host['availability_zones'][0][:-1]
-
- self.inventory.add_host(hostname, group=group)
- for hostvar, hostval in host.items():
- self.inventory.set_variable(hostname, hostvar, hostval)
-
- # Use constructed if applicable
- strict = self.get_option('strict')
- # Composed variables
- self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
- # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
- self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
- # Create groups based on variable values and add the corresponding hosts to it
- self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
-
- def _set_credentials(self):
- '''
- :param config_data: contents of the inventory config file
- '''
- self.boto_profile = self.get_option('aws_profile')
- aws_access_key_id = self.get_option('aws_access_key')
- aws_secret_access_key = self.get_option('aws_secret_key')
- aws_security_token = self.get_option('aws_security_token')
-
- if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):
- session = botocore.session.get_session()
- if session.get_credentials() is not None:
- aws_access_key_id = session.get_credentials().access_key
- aws_secret_access_key = session.get_credentials().secret_key
- aws_security_token = session.get_credentials().token
-
- if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):
- raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
- "inventory configuration file or set them as environment variables.")
-
- if aws_access_key_id:
- self.credentials['aws_access_key_id'] = aws_access_key_id
- if aws_secret_access_key:
- self.credentials['aws_secret_access_key'] = aws_secret_access_key
- if aws_security_token:
- self.credentials['aws_session_token'] = aws_security_token
-
- def verify_file(self, path):
- '''
- :param loader: an ansible.parsing.dataloader.DataLoader object
- :param path: the path to the inventory config file
- :return the contents of the config file
- '''
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('aws_rds.yml', 'aws_rds.yaml')):
- return True
- return False
-
- def parse(self, inventory, loader, path, cache=True):
- super(InventoryModule, self).parse(inventory, loader, path)
-
- config_data = self._read_config_data(path)
- self._set_credentials()
-
- # get user specifications
- regions = self.get_option('regions')
- filters = self.get_option('filters')
- strict_permissions = self.get_option('strict_permissions')
- statuses = self.get_option('statuses')
- include_clusters = self.get_option('include_clusters')
- instance_filters = ansible_dict_to_boto3_filter_list(filters)
- cluster_filters = []
- if 'db-cluster-id' in filters and include_clusters:
- cluster_filters = ansible_dict_to_boto3_filter_list({'db-cluster-id': filters['db-cluster-id']})
-
- cache_key = self.get_cache_key(path)
- # false when refresh_cache or --flush-cache is used
- if cache:
- # get the user-specified directive
- cache = self.get_option('cache')
-
- # Generate inventory
- formatted_inventory = {}
- cache_needs_update = False
- if cache:
- try:
- results = self._cache[cache_key]
- except KeyError:
- # if cache expires or cache file doesn't exist
- cache_needs_update = True
- else:
- self._populate_from_source(results)
-
- if not cache or cache_needs_update:
- results = self._get_all_hosts(regions, instance_filters, cluster_filters, strict_permissions, statuses, include_clusters)
- self._populate(results)
- formatted_inventory = self._format_inventory(results)
-
- # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
- # when the user is using caching, update the cached inventory
- if cache_needs_update or (not cache and self.get_option('cache')):
- self._cache[cache_key] = formatted_inventory
diff --git a/lib/ansible/plugins/lookup/aws_account_attribute.py b/lib/ansible/plugins/lookup/aws_account_attribute.py
deleted file mode 100644
index 23f311da42..0000000000
--- a/lib/ansible/plugins/lookup/aws_account_attribute.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
-lookup: aws_account_attribute
-author:
- - Sloane Hertel <shertel@redhat.com>
-version_added: "2.5"
-requirements:
- - boto3
- - botocore
-extends_documentation_fragment:
- - aws_credentials
- - aws_region
-short_description: Look up AWS account attributes.
-description:
- - Describes attributes of your AWS account. You can specify one of the listed
- attribute choices or omit it to see all attributes.
-options:
- attribute:
- description: The attribute for which to get the value(s).
- choices:
- - supported-platforms
- - default-vpc
- - max-instances
- - vpc-max-security-groups-per-interface
- - max-elastic-ips
- - vpc-max-elastic-ips
- - has-ec2-classic
-"""
-
-EXAMPLES = """
-vars:
- has_ec2_classic: "{{ lookup('aws_account_attribute', attribute='has-ec2-classic') }}"
- # true | false
-
- default_vpc_id: "{{ lookup('aws_account_attribute', attribute='default-vpc') }}"
- # vpc-xxxxxxxx | none
-
- account_details: "{{ lookup('aws_account_attribute', wantlist='true') }}"
- # {'default-vpc': ['vpc-xxxxxxxx'], 'max-elastic-ips': ['5'], 'max-instances': ['20'],
- # 'supported-platforms': ['VPC', 'EC2'], 'vpc-max-elastic-ips': ['5'], 'vpc-max-security-groups-per-interface': ['5']}
-
-"""
-
-RETURN = """
-_raw:
- description:
- Returns a boolean when I(attribute) is check_ec2_classic. Otherwise returns the value(s) of the attribute
- (or all attributes if one is not specified).
-"""
-
-from ansible.errors import AnsibleError
-
-try:
- import boto3
- import botocore
-except ImportError:
- raise AnsibleError("The lookup aws_account_attribute requires boto3 and botocore.")
-
-from ansible.plugins import AnsiblePlugin
-from ansible.plugins.lookup import LookupBase
-from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info
-from ansible.module_utils._text import to_native
-from ansible.module_utils.six import string_types
-import os
-
-
-def _boto3_conn(region, credentials):
- boto_profile = credentials.pop('aws_profile', None)
-
- try:
- connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region, **credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- if boto_profile:
- try:
- connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- raise AnsibleError("Insufficient credentials found.")
- else:
- raise AnsibleError("Insufficient credentials found.")
- return connection
-
-
-def _get_credentials(options):
- credentials = {}
- credentials['aws_profile'] = options['aws_profile']
- credentials['aws_secret_access_key'] = options['aws_secret_key']
- credentials['aws_access_key_id'] = options['aws_access_key']
- credentials['aws_session_token'] = options['aws_security_token']
-
- return credentials
-
-
-class LookupModule(LookupBase):
- def run(self, terms, variables, **kwargs):
-
- self.set_options(var_options=variables, direct=kwargs)
- boto_credentials = _get_credentials(self._options)
-
- region = self._options['region']
- client = _boto3_conn(region, boto_credentials)
-
- attribute = kwargs.get('attribute')
- params = {'AttributeNames': []}
- check_ec2_classic = False
- if 'has-ec2-classic' == attribute:
- check_ec2_classic = True
- params['AttributeNames'] = ['supported-platforms']
- elif attribute:
- params['AttributeNames'] = [attribute]
-
- try:
- response = client.describe_account_attributes(**params)['AccountAttributes']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- raise AnsibleError("Failed to describe account attributes: %s" % to_native(e))
-
- if check_ec2_classic:
- attr = response[0]
- return any(value['AttributeValue'] == 'EC2' for value in attr['AttributeValues'])
-
- if attribute:
- attr = response[0]
- return [value['AttributeValue'] for value in attr['AttributeValues']]
-
- flattened = {}
- for k_v_dict in response:
- flattened[k_v_dict['AttributeName']] = [value['AttributeValue'] for value in k_v_dict['AttributeValues']]
- return flattened
diff --git a/lib/ansible/plugins/lookup/aws_secret.py b/lib/ansible/plugins/lookup/aws_secret.py
deleted file mode 100644
index fa100e7df5..0000000000
--- a/lib/ansible/plugins/lookup/aws_secret.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r"""
-lookup: aws_secret
-author:
- - Aaron Smith <ajsmith10381@gmail.com>
-version_added: "2.8"
-requirements:
- - boto3
- - botocore>=1.10.0
-extends_documentation_fragment:
- - aws_credentials
- - aws_region
-short_description: Look up secrets stored in AWS Secrets Manager.
-description:
- - Look up secrets stored in AWS Secrets Manager provided the caller
- has the appropriate permissions to read the secret.
- - Lookup is based on the secret's `Name` value.
- - Optional parameters can be passed into this lookup; `version_id` and `version_stage`
-options:
- _terms:
- description: Name of the secret to look up in AWS Secrets Manager.
- required: True
- version_id:
- description: Version of the secret(s).
- required: False
- version_stage:
- description: Stage of the secret version.
- required: False
- join:
- description:
- - Join two or more entries to form an extended secret.
- - This is useful for overcoming the 4096 character limit imposed by AWS.
- type: boolean
- default: false
-"""
-
-EXAMPLES = r"""
- - name: Create RDS instance with aws_secret lookup for password param
- rds:
- command: create
- instance_name: app-db
- db_engine: MySQL
- size: 10
- instance_type: db.m1.small
- username: dbadmin
- password: "{{ lookup('aws_secret', 'DbSecret') }}"
- tags:
- Environment: staging
-"""
-
-RETURN = r"""
-_raw:
- description:
- Returns the value of the secret stored in AWS Secrets Manager.
-"""
-
-from ansible.errors import AnsibleError
-
-try:
- import boto3
- import botocore
-except ImportError:
- raise AnsibleError("The lookup aws_secret requires boto3 and botocore.")
-
-from ansible.plugins import AnsiblePlugin
-from ansible.plugins.lookup import LookupBase
-from ansible.module_utils._text import to_native
-
-
-def _boto3_conn(region, credentials):
- boto_profile = credentials.pop('aws_profile', None)
-
- try:
- connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region, **credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- if boto_profile:
- try:
- connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- raise AnsibleError("Insufficient credentials found.")
- else:
- raise AnsibleError("Insufficient credentials found.")
- return connection
-
-
-class LookupModule(LookupBase):
- def _get_credentials(self):
- credentials = {}
- credentials['aws_profile'] = self.get_option('aws_profile')
- credentials['aws_secret_access_key'] = self.get_option('aws_secret_key')
- credentials['aws_access_key_id'] = self.get_option('aws_access_key')
- credentials['aws_session_token'] = self.get_option('aws_security_token')
-
- # fallback to IAM role credentials
- if not credentials['aws_profile'] and not (credentials['aws_access_key_id'] and credentials['aws_secret_access_key']):
- session = botocore.session.get_session()
- if session.get_credentials() is not None:
- credentials['aws_access_key_id'] = session.get_credentials().access_key
- credentials['aws_secret_access_key'] = session.get_credentials().secret_key
- credentials['aws_session_token'] = session.get_credentials().token
-
- return credentials
-
- def run(self, terms, variables, **kwargs):
-
- self.set_options(var_options=variables, direct=kwargs)
- boto_credentials = self._get_credentials()
-
- region = self.get_option('region')
- client = _boto3_conn(region, boto_credentials)
-
- secrets = []
- for term in terms:
- params = {}
- params['SecretId'] = term
- if kwargs.get('version_id'):
- params['VersionId'] = kwargs.get('version_id')
- if kwargs.get('version_stage'):
- params['VersionStage'] = kwargs.get('version_stage')
-
- try:
- response = client.get_secret_value(**params)
- if 'SecretBinary' in response:
- secrets.append(response['SecretBinary'])
- if 'SecretString' in response:
- secrets.append(response['SecretString'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- raise AnsibleError("Failed to retrieve secret: %s" % to_native(e))
-
- if kwargs.get('join'):
- joined_secret = []
- joined_secret.append(''.join(secrets))
- return joined_secret
- else:
- return secrets
diff --git a/lib/ansible/plugins/lookup/aws_service_ip_ranges.py b/lib/ansible/plugins/lookup/aws_service_ip_ranges.py
deleted file mode 100644
index 89072f6962..0000000000
--- a/lib/ansible/plugins/lookup/aws_service_ip_ranges.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# (c) 2016 James Turner <turnerjsm@gmail.com>
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
-lookup: aws_service_ip_ranges
-author:
- - James Turner <turnerjsm@gmail.com>
-version_added: "2.5"
-requirements:
- - must have public internet connectivity
-short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.
-description:
- - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.
- - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.
-options:
- service:
- description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
- region:
- description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
-"""
-
-EXAMPLES = """
-vars:
- ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}"
-tasks:
-
-- name: "use list return option and iterate as a loop"
- debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}"
-# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 "
-
-- name: "Pull S3 IP ranges, and print the default return style"
- debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}"
-# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17"
-"""
-
-RETURN = """
-_raw:
- description: comma-separated list of CIDR ranges
-"""
-
-
-import json
-
-from ansible.errors import AnsibleError
-from ansible.plugins.lookup import LookupBase
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
-from ansible.module_utils._text import to_native
-from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
-
-
-class LookupModule(LookupBase):
- def run(self, terms, variables, **kwargs):
- try:
- resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
- amazon_response = json.load(resp)['prefixes']
- except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
- # on Python 3+, json.decoder.JSONDecodeError is raised for bad
- # JSON. On 2.x it's a ValueError
- raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e))
- except HTTPError as e:
- raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e))
- except SSLValidationError as e:
- raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e))
- except URLError as e:
- raise AnsibleError("Failed look up IP range service: %s" % to_native(e))
- except ConnectionError as e:
- raise AnsibleError("Error connecting to IP range service: %s" % to_native(e))
-
- if 'region' in kwargs:
- region = kwargs['region']
- amazon_response = (item for item in amazon_response if item['region'] == region)
- if 'service' in kwargs:
- service = str.upper(kwargs['service'])
- amazon_response = (item for item in amazon_response if item['service'] == service)
-
- return [item['ip_prefix'] for item in amazon_response]
diff --git a/lib/ansible/plugins/lookup/aws_ssm.py b/lib/ansible/plugins/lookup/aws_ssm.py
deleted file mode 100644
index 7d875ce3e5..0000000000
--- a/lib/ansible/plugins/lookup/aws_ssm.py
+++ /dev/null
@@ -1,233 +0,0 @@
-# (c) 2016, Bill Wang <ozbillwang(at)gmail.com>
-# (c) 2017, Marat Bakeev <hawara(at)gmail.com>
-# (c) 2018, Michael De La Rue <siblemitcom.mddlr(at)spamgourmet.com>
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
-lookup: aws_ssm
-author:
- - Bill Wang <ozbillwang(at)gmail.com>
- - Marat Bakeev <hawara(at)gmail.com>
- - Michael De La Rue <siblemitcom.mddlr@spamgourmet.com>
-version_added: 2.5
-requirements:
- - boto3
- - botocore
-short_description: Get the value for a SSM parameter or all parameters under a path.
-description:
- - Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters.
- The first argument you pass the lookup can either be a parameter name or a hierarchy of
- parameters. Hierarchies start with a forward slash and end with the parameter name. Up to
- 5 layers may be specified.
- - If looking up an explicitly listed parameter by name which does not exist then the lookup will
- return a None value which will be interpreted by Jinja2 as an empty string. You can use the
- ```default``` filter to give a default value in this case but must set the second parameter to
- true (see examples below)
- - When looking up a path for parameters under it a dictionary will be returned for each path.
- If there is no parameter under that path then the return will be successful but the
- dictionary will be empty.
- - If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm
- will generate an error, normally crashing the current ansible task. This is normally the right
- thing since ignoring a value that IAM isn't giving access to could cause bigger problems and
- wrong behaviour or loss of data. If you want to continue in this case then you will have to set
- up two ansible tasks, one which sets a variable and ignores failures one which uses the value
- of that variable with a default. See the examples below.
-
-options:
- decrypt:
- description: A boolean to indicate whether to decrypt the parameter.
- default: true
- type: boolean
- bypath:
- description: A boolean to indicate whether the parameter is provided as a hierarchy.
- default: false
- type: boolean
- recursive:
- description: A boolean to indicate whether to retrieve all parameters within a hierarchy.
- default: false
- type: boolean
- shortnames:
- description: Indicates whether to return the name only without path if using a parameter hierarchy.
- default: false
- type: boolean
-'''
-
-EXAMPLES = '''
-# lookup sample:
-- name: lookup ssm parameter store in the current region
- debug: msg="{{ lookup('aws_ssm', 'Hello' ) }}"
-
-- name: lookup ssm parameter store in nominated region
- debug: msg="{{ lookup('aws_ssm', 'Hello', region='us-east-2' ) }}"
-
-- name: lookup ssm parameter store without decrypted
- debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=False ) }}"
-
-- name: lookup ssm parameter store in nominated aws profile
- debug: msg="{{ lookup('aws_ssm', 'Hello', aws_profile='myprofile' ) }}"
-
-- name: lookup ssm parameter store using explicit aws credentials
- debug: msg="{{ lookup('aws_ssm', 'Hello', aws_access_key=my_aws_access_key, aws_secret_key=my_aws_secret_key, aws_security_token=my_security_token ) }}"
-
-- name: lookup ssm parameter store with all options.
- debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=false, region='us-east-2', aws_profile='myprofile') }}"
-
-- name: lookup a key which doesn't exist, returns ""
- debug: msg="{{ lookup('aws_ssm', 'NoKey') }}"
-
-- name: lookup a key which doesn't exist, returning a default ('root')
- debug: msg="{{ lookup('aws_ssm', 'AdminID') | default('root', true) }}"
-
-- name: lookup a key which doesn't exist failing to store it in a fact
- set_fact:
- temp_secret: "{{ lookup('aws_ssm', '/NoAccess/hiddensecret') }}"
- ignore_errors: true
-
-- name: show fact default to "access failed" if we don't have access
- debug: msg="{{ 'the secret was:' ~ temp_secret | default('could not access secret') }}"
-
-- name: return a dictionary of ssm parameters from a hierarchy path
- debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}"
-
-- name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param)
- debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}"
-
-- name: Iterate over a parameter hierarchy (one iteration per parameter)
- debug: msg='Key contains {{ item.key }} , with value {{ item.value }}'
- loop: '{{ lookup("aws_ssm", "/demo/", region="ap-southeast-2", bypath=True) | dict2items }}'
-
-- name: Iterate over multiple paths as dictionaries (one iteration per path)
- debug: msg='Path contains {{ item }}'
- loop: '{{ lookup("aws_ssm", "/demo/", "/demo1/", bypath=True)}}'
-
-'''
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.ec2 import HAS_BOTO3, boto3_tag_list_to_ansible_dict
-from ansible.errors import AnsibleError
-from ansible.plugins.lookup import LookupBase
-from ansible.utils.display import Display
-
-try:
- from botocore.exceptions import ClientError
- import botocore
- import boto3
-except ImportError:
- pass # will be captured by imported HAS_BOTO3
-
-display = Display()
-
-
-def _boto3_conn(region, credentials):
- if 'boto_profile' in credentials:
- boto_profile = credentials.pop('boto_profile')
- else:
- boto_profile = None
-
- try:
- connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region, **credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
- if boto_profile:
- try:
- connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region)
- # FIXME: we should probably do better passing on of the error information
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
- raise AnsibleError("Insufficient credentials found.")
- else:
- raise AnsibleError("Insufficient credentials found.")
- return connection
-
-
-class LookupModule(LookupBase):
- def run(self, terms, variables=None, boto_profile=None, aws_profile=None,
- aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None,
- bypath=False, shortnames=False, recursive=False, decrypt=True):
- '''
- :arg terms: a list of lookups to run.
- e.g. ['parameter_name', 'parameter_name_too' ]
- :kwarg variables: ansible variables active at the time of the lookup
- :kwarg aws_secret_key: identity of the AWS key to use
- :kwarg aws_access_key: AWS secret key (matching identity)
- :kwarg aws_security_token: AWS session key if using STS
- :kwarg decrypt: Set to True to get decrypted parameters
- :kwarg region: AWS region in which to do the lookup
- :kwarg bypath: Set to True to do a lookup of variables under a path
- :kwarg recursive: Set to True to recurse below the path (requires bypath=True)
- :returns: A list of parameter values or a list of dictionaries if bypath=True.
- '''
-
- if not HAS_BOTO3:
- raise AnsibleError('botocore and boto3 are required for aws_ssm lookup.')
-
- ret = []
- response = {}
- ssm_dict = {}
-
- credentials = {}
- if aws_profile:
- credentials['boto_profile'] = aws_profile
- else:
- credentials['boto_profile'] = boto_profile
- credentials['aws_secret_access_key'] = aws_secret_key
- credentials['aws_access_key_id'] = aws_access_key
- credentials['aws_session_token'] = aws_security_token
-
- client = _boto3_conn(region, credentials)
-
- ssm_dict['WithDecryption'] = decrypt
-
- # Lookup by path
- if bypath:
- ssm_dict['Recursive'] = recursive
- for term in terms:
- ssm_dict["Path"] = term
- display.vvv("AWS_ssm path lookup term: %s in region: %s" % (term, region))
- try:
- response = client.get_parameters_by_path(**ssm_dict)
- except ClientError as e:
- raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
- paramlist = list()
- paramlist.extend(response['Parameters'])
-
- # Manual pagination, since boto doesn't support it yet for get_parameters_by_path
- while 'NextToken' in response:
- response = client.get_parameters_by_path(NextToken=response['NextToken'], **ssm_dict)
- paramlist.extend(response['Parameters'])
-
- # shorten parameter names. yes, this will return duplicate names with different values.
- if shortnames:
- for x in paramlist:
- x['Name'] = x['Name'][x['Name'].rfind('/') + 1:]
-
- display.vvvv("AWS_ssm path lookup returned: %s" % str(paramlist))
- if len(paramlist):
- ret.append(boto3_tag_list_to_ansible_dict(paramlist,
- tag_name_key_name="Name",
- tag_value_key_name="Value"))
- else:
- ret.append({})
- # Lookup by parameter name - always returns a list with one or no entry.
- else:
- display.vvv("AWS_ssm name lookup term: %s" % terms)
- ssm_dict["Names"] = terms
- try:
- response = client.get_parameters(**ssm_dict)
- except ClientError as e:
- raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
- params = boto3_tag_list_to_ansible_dict(response['Parameters'], tag_name_key_name="Name",
- tag_value_key_name="Value")
- for i in terms:
- if i.split(':', 1)[0] in params:
- ret.append(params[i])
- elif i in response['InvalidParameters']:
- ret.append(None)
- else:
- raise AnsibleError("Ansible internal error: aws_ssm lookup failed to understand boto3 return value: {0}".format(str(response)))
- return ret
-
- display.vvvv("AWS_ssm path lookup returning: %s " % str(ret))
- return ret
diff --git a/test/integration/targets/aws_caller_info/aliases b/test/integration/targets/aws_caller_info/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/aws_caller_info/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/aws_caller_info/tasks/main.yaml b/test/integration/targets/aws_caller_info/tasks/main.yaml
deleted file mode 100644
index 5645de6bc4..0000000000
--- a/test/integration/targets/aws_caller_info/tasks/main.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-- name: retrieve caller facts
- aws_caller_info:
- region: "{{ aws_region }}"
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- register: result
-
-- name: assert correct keys are returned
- assert:
- that:
- - result.account is not none
- - result.arn is not none
- - result.user_id is not none
- - result.account_alias is not none
diff --git a/test/integration/targets/aws_s3/aliases b/test/integration/targets/aws_s3/aliases
deleted file mode 100644
index 72a9fb4f57..0000000000
--- a/test/integration/targets/aws_s3/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group4
diff --git a/test/integration/targets/aws_s3/defaults/main.yml b/test/integration/targets/aws_s3/defaults/main.yml
deleted file mode 100644
index eb7dd2d371..0000000000
--- a/test/integration/targets/aws_s3/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# defaults file for s3
-bucket_name: '{{resource_prefix}}'
diff --git a/test/integration/targets/aws_s3/files/hello.txt b/test/integration/targets/aws_s3/files/hello.txt
deleted file mode 100644
index 8ab686eafe..0000000000
--- a/test/integration/targets/aws_s3/files/hello.txt
+++ /dev/null
@@ -1 +0,0 @@
-Hello, World!
diff --git a/test/integration/targets/aws_s3/meta/main.yml b/test/integration/targets/aws_s3/meta/main.yml
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/aws_s3/meta/main.yml
+++ /dev/null
diff --git a/test/integration/targets/aws_s3/tasks/main.yml b/test/integration/targets/aws_s3/tasks/main.yml
deleted file mode 100644
index a0a7aa7950..0000000000
--- a/test/integration/targets/aws_s3/tasks/main.yml
+++ /dev/null
@@ -1,590 +0,0 @@
----
-# tasks file for test_s3
-
-- name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
-- block:
- - name: Create temporary directory
- tempfile:
- state: directory
- register: tmpdir
-
- - name: Create content
- set_fact:
- content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}"
-
- - name: test create bucket without permissions
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: create
- register: result
- ignore_errors: yes
-
- - assert:
- that:
- - result is failed
- - "result.msg != 'MODULE FAILURE'"
-
- - name: test create bucket
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: create
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: trying to create a bucket name that already exists
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: create
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is not changed
-
- - name: Create local upload.txt
- copy:
- content: "{{ content }}"
- dest: "{{ tmpdir.path }}/upload.txt"
-
- - name: stat the file
- stat:
- path: "{{ tmpdir.path }}/upload.txt"
- get_checksum: yes
- register: upload_file
-
- - name: test putting an object in the bucket
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: put
- src: "{{ tmpdir.path }}/upload.txt"
- object: delete.txt
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result is changed
- - result.msg == "PUT operation complete"
-
- - name: test using aws_s3 with async
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: put
- src: "{{ tmpdir.path }}/upload.txt"
- object: delete.txt
- <<: *aws_connection_info
- register: test_async
- async: 30
- poll: 0
-
- - name: ensure it completed
- async_status:
- jid: "{{ test_async.ansible_job_id }}"
- register: status
- until: status is finished
- retries: 10
-
- - name: test put with overwrite=different and unmodified object
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: put
- src: "{{ tmpdir.path }}/upload.txt"
- object: delete.txt
- overwrite: different
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result is not changed
-
- - name: check that roles file lookups work as expected
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: put
- src: hello.txt
- object: delete.txt
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result is changed
- - result.msg == "PUT operation complete"
-
- - name: test put with overwrite=never
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: put
- src: "{{ tmpdir.path }}/upload.txt"
- object: delete.txt
- overwrite: never
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result is not changed
-
- - name: test put with overwrite=different and modified object
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: put
- src: "{{ tmpdir.path }}/upload.txt"
- object: delete.txt
- overwrite: different
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: test put with overwrite=always
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: put
- src: "{{ tmpdir.path }}/upload.txt"
- object: delete.txt
- overwrite: always
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: test get object
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: get
- dest: "{{ tmpdir.path }}/download.txt"
- object: delete.txt
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
- until: "result.msg == 'GET operation complete'"
-
- - name: stat the file so we can compare the checksums
- stat:
- path: "{{ tmpdir.path }}/download.txt"
- get_checksum: yes
- register: download_file
-
- - assert:
- that:
- - upload_file.stat.checksum == download_file.stat.checksum
-
- - name: test get with overwrite=different and identical files
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: get
- dest: "{{ tmpdir.path }}/download.txt"
- object: delete.txt
- overwrite: different
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result is not changed
-
- - name: modify destination
- copy:
- dest: "{{ tmpdir.path }}/download.txt"
- src: hello.txt
-
- - name: test get with overwrite=never
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: get
- dest: "{{ tmpdir.path }}/download.txt"
- object: delete.txt
- overwrite: never
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result is not changed
-
- - name: test get with overwrite=different and modified file
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: get
- dest: "{{ tmpdir.path }}/download.txt"
- object: delete.txt
- overwrite: different
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: test get with overwrite=always
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: get
- dest: "{{ tmpdir.path }}/download.txt"
- object: delete.txt
- overwrite: always
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: test geturl of the object
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: geturl
- object: delete.txt
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
- until: result is changed
-
- - assert:
- that:
- - "'Download url:' in result.msg"
- - result is changed
-
- - name: test getstr of the object
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: getstr
- object: delete.txt
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result.msg == "GET operation complete"
- - result.contents == content
-
- - name: test list to get all objects in the bucket
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: list
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - "'delete.txt' in result.s3_keys"
- - result.msg == "LIST operation complete"
-
- - name: test delobj to just delete an object in the bucket
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: delobj
- object: delete.txt
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - "'Object deleted from bucket' in result.msg"
- - result is changed
-
- - name: test putting an encrypted object in the bucket
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: put
- src: "{{ tmpdir.path }}/upload.txt"
- encrypt: yes
- object: delete_encrypt.txt
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result is changed
- - result.msg == "PUT operation complete"
-
- - name: test get encrypted object
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: get
- dest: "{{ tmpdir.path }}/download_encrypted.txt"
- object: delete_encrypt.txt
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
- until: "result.msg == 'GET operation complete'"
-
- - name: stat the file so we can compare the checksums
- stat:
- path: "{{ tmpdir.path }}/download_encrypted.txt"
- get_checksum: yes
- register: download_file
-
- - assert:
- that:
- - upload_file.stat.checksum == download_file.stat.checksum
-
- - name: delete encrypted file
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: delobj
- object: delete_encrypt.txt
- <<: *aws_connection_info
- retries: 3
- delay: 3
-
- - name: test putting an aws:kms encrypted object in the bucket
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: put
- src: "{{ tmpdir.path }}/upload.txt"
- encrypt: yes
- encryption_mode: aws:kms
- object: delete_encrypt_kms.txt
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - result is changed
- - result.msg == "PUT operation complete"
-
- - name: test get KMS encrypted object
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: get
- dest: "{{ tmpdir.path }}/download_kms.txt"
- object: delete_encrypt_kms.txt
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
- until: "result.msg == 'GET operation complete'"
-
- - name: get the stat of the file so we can compare the checksums
- stat:
- path: "{{ tmpdir.path }}/download_kms.txt"
- get_checksum: yes
- register: download_file
-
- - assert:
- that:
- - upload_file.stat.checksum == download_file.stat.checksum
-
- # FIXME - could use a test that checks uploaded file is *actually* aws:kms encrypted
-
- - name: delete KMS encrypted file
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: delobj
- object: delete_encrypt_kms.txt
- <<: *aws_connection_info
- retries: 3
- delay: 3
-
- # FIXME: could use a test that checks non standard KMS key
- # but that would require ability to create and remove such keys.
- # PRs exist for that, but propose deferring until after merge.
-
- - name: test creation of empty path
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: create
- object: foo/bar/baz/
- <<: *aws_connection_info
- retries: 3
- delay: 3
- register: result
-
- - assert:
- that:
- - "'Virtual directory foo/bar/baz/ created' in result.msg"
- - result is changed
-
- - name: test deletion of empty path
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: delobj
- object: foo/bar/baz/
- <<: *aws_connection_info
- retries: 3
- delay: 3
-
- - name: test delete bucket
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: delete
- <<: *aws_connection_info
- register: result
- retries: 3
- delay: 3
- until: result is changed
-
- - assert:
- that:
- - result is changed
-
- - name: test create a bucket with a dot in the name
- aws_s3:
- bucket: "{{ bucket_name + '.bucket' }}"
- mode: create
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: test delete a bucket with a dot in the name
- aws_s3:
- bucket: "{{ bucket_name + '.bucket' }}"
- mode: delete
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: test delete a nonexistent bucket
- aws_s3:
- bucket: "{{ bucket_name + '.bucket' }}"
- mode: delete
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is not changed
-
- - name: make tempfile 4 GB for OSX
- command:
- _raw_params: "dd if=/dev/zero of={{ tmpdir.path }}/largefile bs=1m count=4096"
- when: ansible_distribution == 'MacOSX'
-
- - name: make tempfile 4 GB for linux
- command:
- _raw_params: "dd if=/dev/zero of={{ tmpdir.path }}/largefile bs=1M count=4096"
- when: ansible_system == 'Linux'
-
- - name: test multipart download - platform specific
- block:
- - name: make a bucket to upload the file
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: create
- <<: *aws_connection_info
-
- - name: upload the file to the bucket
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: put
- src: "{{ tmpdir.path }}/largefile"
- object: multipart.txt
- <<: *aws_connection_info
-
- - name: download file once
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: get
- dest: "{{ tmpdir.path }}/download.txt"
- object: multipart.txt
- overwrite: different
- <<: *aws_connection_info
- retries: 3
- delay: 3
- until: "result.msg == 'GET operation complete'"
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: download file again
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: get
- dest: "{{ tmpdir.path }}/download.txt"
- object: multipart.txt
- overwrite: different
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is not changed
- when: ansible_system == 'Linux' or ansible_distribution == 'MacOSX'
-
- always:
- - name: remove uploaded files
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: delobj
- object: "{{ item }}"
- <<: *aws_connection_info
- loop:
- - hello.txt
- - delete.txt
- - delete_encrypt.txt
- - delete_encrypt_kms.txt
- ignore_errors: yes
-
- - name: delete temporary files
- file:
- state: absent
- path: "{{ tmpdir.path }}"
- ignore_errors: yes
-
- - name: delete the bucket
- aws_s3:
- bucket: "{{ bucket_name }}"
- mode: delete
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/cloudformation/aliases b/test/integration/targets/cloudformation/aliases
deleted file mode 100644
index 55555be789..0000000000
--- a/test/integration/targets/cloudformation/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-shippable/aws/group2
-cloudformation_info
diff --git a/test/integration/targets/cloudformation/defaults/main.yml b/test/integration/targets/cloudformation/defaults/main.yml
deleted file mode 100644
index aaf0ca7e61..0000000000
--- a/test/integration/targets/cloudformation/defaults/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-stack_name: "{{ resource_prefix }}"
-
-vpc_name: '{{ resource_prefix }}-vpc'
-vpc_seed: '{{ resource_prefix }}'
-vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
-subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
-
-ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'
diff --git a/test/integration/targets/cloudformation/files/cf_template.json b/test/integration/targets/cloudformation/files/cf_template.json
deleted file mode 100644
index ff4c5693b0..0000000000
--- a/test/integration/targets/cloudformation/files/cf_template.json
+++ /dev/null
@@ -1,37 +0,0 @@
-{
- "AWSTemplateFormatVersion" : "2010-09-09",
-
- "Description" : "Create an Amazon EC2 instance.",
-
- "Parameters" : {
- "InstanceType" : {
- "Description" : "EC2 instance type",
- "Type" : "String",
- "Default" : "t3.nano",
- "AllowedValues" : [ "t3.micro", "t3.nano"]
- },
- "ImageId" : {
- "Type" : "String"
- },
- "SubnetId" : {
- "Type" : "String"
- }
- },
-
- "Resources" : {
- "EC2Instance" : {
- "Type" : "AWS::EC2::Instance",
- "Properties" : {
- "InstanceType" : { "Ref" : "InstanceType" },
- "ImageId" : { "Ref" : "ImageId" },
- "SubnetId": { "Ref" : "SubnetId" }
- }
- }
- },
-
- "Outputs" : {
- "InstanceId" : {
- "Value" : { "Ref" : "EC2Instance" }
- }
- }
-}
diff --git a/test/integration/targets/cloudformation/tasks/main.yml b/test/integration/targets/cloudformation/tasks/main.yml
deleted file mode 100644
index 9b89722b20..0000000000
--- a/test/integration/targets/cloudformation/tasks/main.yml
+++ /dev/null
@@ -1,463 +0,0 @@
----
-
-- module_defaults:
- group/aws:
- aws_access_key: '{{ aws_access_key | default(omit) }}'
- aws_secret_key: '{{ aws_secret_key | default(omit) }}'
- security_token: '{{ security_token | default(omit) }}'
- region: '{{ aws_region | default(omit) }}'
-
- block:
-
- # ==== Env setup ==========================================================
- - name: list available AZs
- aws_az_info:
- register: region_azs
-
- - name: pick an AZ for testing
- set_fact:
- availability_zone: "{{ region_azs.availability_zones[0].zone_name }}"
-
- - name: Create a test VPC
- ec2_vpc_net:
- name: "{{ vpc_name }}"
- cidr_block: "{{ vpc_cidr }}"
- tags:
- Name: Cloudformation testing
- register: testing_vpc
-
- - name: Create a test subnet
- ec2_vpc_subnet:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: "{{ subnet_cidr }}"
- az: "{{ availability_zone }}"
- register: testing_subnet
-
- - name: Find AMI to use
- ec2_ami_info:
- owners: 'amazon'
- filters:
- name: '{{ ec2_ami_name }}'
- register: ec2_amis
-
- - name: Set fact with latest AMI
- vars:
- latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
- set_fact:
- ec2_ami_image: '{{ latest_ami.image_id }}'
-
- # ==== Cloudformation tests ===============================================
-
- # 1. Basic stack creation (check mode, actual run and idempotency)
- # 2. Tags
- # 3. cloudformation_info tests (basic + all_facts)
- # 4. termination_protection
- # 5. create_changeset + changeset_name
-
- # There is still scope to add tests for -
- # 1. capabilities
- # 2. stack_policy
- # 3. on_create_failure (covered in unit tests)
- # 4. Passing in a role
- # 5. nested stacks?
-
-
- - name: create a cloudformation stack (check mode)
- cloudformation:
- stack_name: "{{ stack_name }}"
- template_body: "{{ lookup('file','cf_template.json') }}"
- template_parameters:
- InstanceType: "t3.nano"
- ImageId: "{{ ec2_ami_image }}"
- SubnetId: "{{ testing_subnet.subnet.id }}"
- tags:
- Stack: "{{ stack_name }}"
- test: "{{ resource_prefix }}"
- register: cf_stack
- check_mode: yes
-
- - name: check task return attributes
- assert:
- that:
- - cf_stack.changed
- - "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg"
-
- - name: create a cloudformation stack
- cloudformation:
- stack_name: "{{ stack_name }}"
- template_body: "{{ lookup('file','cf_template.json') }}"
- template_parameters:
- InstanceType: "t3.nano"
- ImageId: "{{ ec2_ami_image }}"
- SubnetId: "{{ testing_subnet.subnet.id }}"
- tags:
- Stack: "{{ stack_name }}"
- test: "{{ resource_prefix }}"
- register: cf_stack
-
- - name: check task return attributes
- assert:
- that:
- - cf_stack.changed
- - "'events' in cf_stack"
- - "'output' in cf_stack and 'Stack CREATE complete' in cf_stack.output"
- - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
- - "'stack_resources' in cf_stack"
-
- - name: create a cloudformation stack (check mode) (idempotent)
- cloudformation:
- stack_name: "{{ stack_name }}"
- template_body: "{{ lookup('file','cf_template.json') }}"
- template_parameters:
- InstanceType: "t3.nano"
- ImageId: "{{ ec2_ami_image }}"
- SubnetId: "{{ testing_subnet.subnet.id }}"
- tags:
- Stack: "{{ stack_name }}"
- test: "{{ resource_prefix }}"
- register: cf_stack
- check_mode: yes
-
- - name: check task return attributes
- assert:
- that:
- - not cf_stack.changed
-
- - name: create a cloudformation stack (idempotent)
- cloudformation:
- stack_name: "{{ stack_name }}"
- template_body: "{{ lookup('file','cf_template.json') }}"
- template_parameters:
- InstanceType: "t3.nano"
- ImageId: "{{ ec2_ami_image }}"
- SubnetId: "{{ testing_subnet.subnet.id }}"
- tags:
- Stack: "{{ stack_name }}"
- test: "{{ resource_prefix }}"
- register: cf_stack
-
- - name: check task return attributes
- assert:
- that:
- - not cf_stack.changed
- - "'output' in cf_stack and 'Stack is already up-to-date.' in cf_stack.output"
- - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
- - "'stack_resources' in cf_stack"
-
- - name: get stack details
- cloudformation_info:
- stack_name: "{{ stack_name }}"
- register: stack_info
-
- - name: assert stack info
- assert:
- that:
- - "'cloudformation' in stack_info"
- - "stack_info.cloudformation | length == 1"
- - "stack_name in stack_info.cloudformation"
- - "'stack_description' in stack_info.cloudformation[stack_name]"
- - "'stack_outputs' in stack_info.cloudformation[stack_name]"
- - "'stack_parameters' in stack_info.cloudformation[stack_name]"
- - "'stack_tags' in stack_info.cloudformation[stack_name]"
- - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
-
- - name: get stack details (checkmode)
- cloudformation_info:
- stack_name: "{{ stack_name }}"
- register: stack_info
- check_mode: yes
-
- - name: assert stack info
- assert:
- that:
- - "'cloudformation' in stack_info"
- - "stack_info.cloudformation | length == 1"
- - "stack_name in stack_info.cloudformation"
- - "'stack_description' in stack_info.cloudformation[stack_name]"
- - "'stack_outputs' in stack_info.cloudformation[stack_name]"
- - "'stack_parameters' in stack_info.cloudformation[stack_name]"
- - "'stack_tags' in stack_info.cloudformation[stack_name]"
- - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
-
- - name: get stack details (all_facts)
- cloudformation_info:
- stack_name: "{{ stack_name }}"
- all_facts: yes
- register: stack_info
-
- - name: assert stack info
- assert:
- that:
- - "'stack_events' in stack_info.cloudformation[stack_name]"
- - "'stack_policy' in stack_info.cloudformation[stack_name]"
- - "'stack_resource_list' in stack_info.cloudformation[stack_name]"
- - "'stack_resources' in stack_info.cloudformation[stack_name]"
- - "'stack_template' in stack_info.cloudformation[stack_name]"
-
- - name: get stack details (all_facts) (checkmode)
- cloudformation_info:
- stack_name: "{{ stack_name }}"
- all_facts: yes
- register: stack_info
- check_mode: yes
-
- - name: assert stack info
- assert:
- that:
- - "'stack_events' in stack_info.cloudformation[stack_name]"
- - "'stack_policy' in stack_info.cloudformation[stack_name]"
- - "'stack_resource_list' in stack_info.cloudformation[stack_name]"
- - "'stack_resources' in stack_info.cloudformation[stack_name]"
- - "'stack_template' in stack_info.cloudformation[stack_name]"
-
- # ==== Cloudformation tests (create changeset) ============================
-
- # try to create a changeset by changing instance type
- - name: create a changeset
- cloudformation:
- stack_name: "{{ stack_name }}"
- create_changeset: yes
- changeset_name: "test-changeset"
- template_body: "{{ lookup('file','cf_template.json') }}"
- template_parameters:
- InstanceType: "t3.micro"
- ImageId: "{{ ec2_ami_image }}"
- SubnetId: "{{ testing_subnet.subnet.id }}"
- tags:
- Stack: "{{ stack_name }}"
- test: "{{ resource_prefix }}"
- register: create_changeset_result
-
- - name: assert changeset created
- assert:
- that:
- - "create_changeset_result.changed"
- - "'change_set_id' in create_changeset_result"
- - "'Stack CREATE_CHANGESET complete' in create_changeset_result.output"
-
- - name: get stack details with changesets
- cloudformation_info:
- stack_name: "{{ stack_name }}"
- stack_change_sets: True
- register: stack_info
-
- - name: assert changesets in info
- assert:
- that:
- - "'stack_change_sets' in stack_info.cloudformation[stack_name]"
-
- - name: get stack details with changesets (checkmode)
- cloudformation_info:
- stack_name: "{{ stack_name }}"
- stack_change_sets: True
- register: stack_info
- check_mode: yes
-
- - name: assert changesets in info
- assert:
- that:
- - "'stack_change_sets' in stack_info.cloudformation[stack_name]"
-
- # try to create an empty changeset by passing in unchanged template
- - name: create a changeset
- cloudformation:
- stack_name: "{{ stack_name }}"
- create_changeset: yes
- template_body: "{{ lookup('file','cf_template.json') }}"
- template_parameters:
- InstanceType: "t3.nano"
- ImageId: "{{ ec2_ami_image }}"
- SubnetId: "{{ testing_subnet.subnet.id }}"
- tags:
- Stack: "{{ stack_name }}"
- test: "{{ resource_prefix }}"
- register: create_changeset_result
-
- - name: assert changeset created
- assert:
- that:
- - "not create_changeset_result.changed"
- - "'The created Change Set did not contain any changes to this stack and was deleted.' in create_changeset_result.output"
-
- # ==== Cloudformation tests (termination_protection) ======================
-
- - name: set termination protection to true
- cloudformation:
- stack_name: "{{ stack_name }}"
- termination_protection: yes
- template_body: "{{ lookup('file','cf_template.json') }}"
- template_parameters:
- InstanceType: "t3.nano"
- ImageId: "{{ ec2_ami_image }}"
- SubnetId: "{{ testing_subnet.subnet.id }}"
- tags:
- Stack: "{{ stack_name }}"
- test: "{{ resource_prefix }}"
- register: cf_stack
-
-# This fails - #65592
-# - name: check task return attributes
-# assert:
-# that:
-# - cf_stack.changed
-
- - name: get stack details
- cloudformation_info:
- stack_name: "{{ stack_name }}"
- register: stack_info
-
- - name: assert stack info
- assert:
- that:
- - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
-
- - name: get stack details (checkmode)
- cloudformation_info:
- stack_name: "{{ stack_name }}"
- register: stack_info
- check_mode: yes
-
- - name: assert stack info
- assert:
- that:
- - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
-
- - name: set termination protection to false
- cloudformation:
- stack_name: "{{ stack_name }}"
- termination_protection: no
- template_body: "{{ lookup('file','cf_template.json') }}"
- template_parameters:
- InstanceType: "t3.nano"
- ImageId: "{{ ec2_ami_image }}"
- SubnetId: "{{ testing_subnet.subnet.id }}"
- tags:
- Stack: "{{ stack_name }}"
- test: "{{ resource_prefix }}"
- register: cf_stack
-
-# This fails - #65592
-# - name: check task return attributes
-# assert:
-# that:
-# - cf_stack.changed
-
- - name: get stack details
- cloudformation_info:
- stack_name: "{{ stack_name }}"
- register: stack_info
-
- - name: assert stack info
- assert:
- that:
- - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
-
- - name: get stack details (checkmode)
- cloudformation_info:
- stack_name: "{{ stack_name }}"
- register: stack_info
- check_mode: yes
-
- - name: assert stack info
- assert:
- that:
- - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
-
- # ==== Cloudformation tests (delete stack tests) ==========================
-
- - name: delete cloudformation stack (check mode)
- cloudformation:
- stack_name: "{{ stack_name }}"
- state: absent
- check_mode: yes
- register: cf_stack
-
- - name: check task return attributes
- assert:
- that:
- - cf_stack.changed
- - "'msg' in cf_stack and 'Stack would be deleted' in cf_stack.msg"
-
- - name: delete cloudformation stack
- cloudformation:
- stack_name: "{{ stack_name }}"
- state: absent
- register: cf_stack
-
- - name: check task return attributes
- assert:
- that:
- - cf_stack.changed
- - "'output' in cf_stack and 'Stack Deleted' in cf_stack.output"
-
- - name: delete cloudformation stack (check mode) (idempotent)
- cloudformation:
- stack_name: "{{ stack_name }}"
- state: absent
- check_mode: yes
- register: cf_stack
-
- - name: check task return attributes
- assert:
- that:
- - not cf_stack.changed
- - "'msg' in cf_stack"
- - >-
- "Stack doesn't exist" in cf_stack.msg
-
- - name: delete cloudformation stack (idempotent)
- cloudformation:
- stack_name: "{{ stack_name }}"
- state: absent
- register: cf_stack
-
- - name: check task return attributes
- assert:
- that:
- - not cf_stack.changed
- - "'output' in cf_stack and 'Stack not found.' in cf_stack.output"
-
- - name: get stack details
- cloudformation_info:
- stack_name: "{{ stack_name }}"
- register: stack_info
-
- - name: assert stack info
- assert:
- that:
- - "not stack_info.cloudformation"
-
- - name: get stack details (checkmode)
- cloudformation_info:
- stack_name: "{{ stack_name }}"
- register: stack_info
- check_mode: yes
-
- - name: assert stack info
- assert:
- that:
- - "not stack_info.cloudformation"
-
- # ==== Cleanup ============================================================
-
- always:
-
- - name: delete stack
- cloudformation:
- stack_name: "{{ stack_name }}"
- state: absent
- ignore_errors: yes
-
- - name: Delete test subnet
- ec2_vpc_subnet:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: "{{ subnet_cidr }}"
- state: absent
- ignore_errors: yes
-
- - name: Delete test VPC
- ec2_vpc_net:
- name: "{{ vpc_name }}"
- cidr_block: "{{ vpc_cidr }}"
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_ami/aliases b/test/integration/targets/ec2_ami/aliases
deleted file mode 100644
index 0e61c5bb7b..0000000000
--- a/test/integration/targets/ec2_ami/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/aws
-shippable/aws/group2
-unstable
-ec2_ami_info
diff --git a/test/integration/targets/ec2_ami/defaults/main.yml b/test/integration/targets/ec2_ami/defaults/main.yml
deleted file mode 100644
index 86665321a2..0000000000
--- a/test/integration/targets/ec2_ami/defaults/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-# defaults file for test_ec2_ami
-ec2_ami_name: '{{resource_prefix}}'
-ec2_ami_description: 'Created by ansible integration tests'
-# image for Amazon Linux AMI 2017.03.1 (HVM), SSD Volume Type
-ec2_ami_image:
- us-east-1: ami-4fffc834
- us-east-2: ami-ea87a78f
diff --git a/test/integration/targets/ec2_ami/meta/main.yml b/test/integration/targets/ec2_ami/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_ami/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_ami/tasks/main.yml b/test/integration/targets/ec2_ami/tasks/main.yml
deleted file mode 100644
index 2cb15b5da1..0000000000
--- a/test/integration/targets/ec2_ami/tasks/main.yml
+++ /dev/null
@@ -1,462 +0,0 @@
----
-# tasks file for test_ec2_ami
-
-- block:
-
- # ============================================================
-
- # SETUP: vpc, ec2 key pair, subnet, security group, ec2 instance, snapshot
- - name: set aws_connection_info fact
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_region: '{{aws_region}}'
- aws_access_key: '{{aws_access_key}}'
- aws_secret_key: '{{aws_secret_key}}'
- security_token: '{{security_token}}'
- no_log: yes
-
- - name: create a VPC to work in
- ec2_vpc_net:
- cidr_block: 10.0.0.0/24
- state: present
- name: '{{ ec2_ami_name }}_setup'
- resource_tags:
- Name: '{{ ec2_ami_name }}_setup'
- <<: *aws_connection_info
- register: setup_vpc
-
- - name: create a key pair to use for creating an ec2 instance
- ec2_key:
- name: '{{ ec2_ami_name }}_setup'
- state: present
- <<: *aws_connection_info
- register: setup_key
-
- - name: create a subnet to use for creating an ec2 instance
- ec2_vpc_subnet:
- az: '{{ ec2_region }}a'
- tags: '{{ ec2_ami_name }}_setup'
- vpc_id: '{{ setup_vpc.vpc.id }}'
- cidr: 10.0.0.0/24
- state: present
- resource_tags:
- Name: '{{ ec2_ami_name }}_setup'
- <<: *aws_connection_info
- register: setup_subnet
-
- - name: create a security group to use for creating an ec2 instance
- ec2_group:
- name: '{{ ec2_ami_name }}_setup'
- description: 'created by Ansible integration tests'
- state: present
- vpc_id: '{{ setup_vpc.vpc.id }}'
- <<: *aws_connection_info
- register: setup_sg
-
- - name: provision ec2 instance to create an image
- ec2:
- key_name: '{{ setup_key.key.name }}'
- instance_type: t2.micro
- state: present
- image: '{{ ec2_region_images[ec2_region] }}'
- wait: yes
- instance_tags:
- '{{ec2_ami_name}}_instance_setup': 'integration_tests'
- group_id: '{{ setup_sg.group_id }}'
- vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
- <<: *aws_connection_info
- register: setup_instance
-
- - name: take a snapshot of the instance to create an image
- ec2_snapshot:
- instance_id: '{{ setup_instance.instance_ids[0] }}'
- device_name: /dev/xvda
- state: present
- <<: *aws_connection_info
- register: setup_snapshot
-
- # ============================================================
-
- - name: test clean failure if not providing image_id or name with state=present
- ec2_ami:
- instance_id: '{{ setup_instance.instance_ids[0] }}'
- state: present
- description: '{{ ec2_ami_description }}'
- tags:
- Name: '{{ ec2_ami_name }}_ami'
- wait: yes
- root_device_name: /dev/xvda
- <<: *aws_connection_info
- register: result
- ignore_errors: yes
-
- - name: assert error message is helpful
- assert:
- that:
- - result.failed
- - "result.msg == 'one of the following is required: name, image_id'"
-
- # ============================================================
-
- - name: create an image from the instance
- ec2_ami:
- instance_id: '{{ setup_instance.instance_ids[0] }}'
- state: present
- name: '{{ ec2_ami_name }}_ami'
- description: '{{ ec2_ami_description }}'
- tags:
- Name: '{{ ec2_ami_name }}_ami'
- wait: yes
- root_device_name: /dev/xvda
- <<: *aws_connection_info
- register: result
-
- - name: set image id fact for deletion later
- set_fact:
- ec2_ami_image_id: "{{ result.image_id }}"
-
- - name: assert that image has been created
- assert:
- that:
- - "result.changed"
- - "result.image_id.startswith('ami-')"
- - "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'"
-
- # ============================================================
-
- - name: gather facts about the image created
- ec2_ami_info:
- image_ids: '{{ ec2_ami_image_id }}'
- <<: *aws_connection_info
- register: ami_facts_result
- ignore_errors: true
-
- - name: assert that the right image was found
- assert:
- that:
- - "ami_facts_result.images[0].image_id == ec2_ami_image_id"
-
- # ============================================================
-
- - name: delete the image
- ec2_ami:
- instance_id: '{{ setup_instance.instance_ids[0] }}'
- state: absent
- delete_snapshot: yes
- name: '{{ ec2_ami_name }}_ami'
- description: '{{ ec2_ami_description }}'
- image_id: '{{ result.image_id }}'
- tags:
- Name: '{{ ec2_ami_name }}_ami'
- wait: yes
- <<: *aws_connection_info
- ignore_errors: true
- register: result
-
- - name: assert that the image has been deleted
- assert:
- that:
- - "result.changed"
- - "'image_id' not in result"
- - "result.snapshots_deleted"
-
- # ============================================================
-
- - name: test removing an ami if no image ID is provided (expected failed=true)
- ec2_ami:
- state: absent
- <<: *aws_connection_info
- register: result
- ignore_errors: yes
-
- - name: assert that an image ID is required
- assert:
- that:
- - "result.failed"
- - "result.msg == 'state is absent but all of the following are missing: image_id'"
-
- # ============================================================
-
- - name: create an image from the snapshot
- ec2_ami:
- name: '{{ ec2_ami_name }}_ami'
- description: '{{ ec2_ami_description }}'
- state: present
- launch_permissions:
- user_ids: []
- tags:
- Name: '{{ ec2_ami_name }}_ami'
- root_device_name: /dev/xvda
- device_mapping:
- - device_name: /dev/xvda
- volume_type: gp2
- size: 8
- delete_on_termination: true
- snapshot_id: '{{ setup_snapshot.snapshot_id }}'
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: set image id fact for deletion later
- set_fact:
- ec2_ami_image_id: "{{ result.image_id }}"
- ec2_ami_snapshot: "{{ result.block_device_mapping['/dev/xvda'].snapshot_id }}"
-
- - name: assert a new ami has been created
- assert:
- that:
- - "result.changed"
- - "result.image_id.startswith('ami-')"
-
- # ============================================================
-
- - name: test default launch permissions idempotence
- ec2_ami:
- description: '{{ ec2_ami_description }}'
- state: present
- name: '{{ ec2_ami_name }}_ami'
- tags:
- Name: '{{ ec2_ami_name }}_ami'
- root_device_name: /dev/xvda
- image_id: '{{ result.image_id }}'
- launch_permissions:
- user_ids: []
- device_mapping:
- - device_name: /dev/xvda
- volume_type: gp2
- size: 8
- delete_on_termination: true
- snapshot_id: '{{ setup_snapshot.snapshot_id }}'
- <<: *aws_connection_info
- register: result
-
- - name: assert a new ami has not been created
- assert:
- that:
- - "not result.changed"
- - "result.image_id.startswith('ami-')"
-
- # ============================================================
-
- - name: add a tag to the AMI
- ec2_ami:
- state: present
- description: '{{ ec2_ami_description }}'
- image_id: '{{ result.image_id }}'
- name: '{{ ec2_ami_name }}_ami'
- tags:
- New: Tag
- <<: *aws_connection_info
- register: result
-
- - name: assert a tag was added
- assert:
- that:
- - "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'"
- - "'New' in result.tags and result.tags.New == 'Tag'"
-
- - name: use purge_tags to remove a tag from the AMI
- ec2_ami:
- state: present
- description: '{{ ec2_ami_description }}'
- image_id: '{{ result.image_id }}'
- name: '{{ ec2_ami_name }}_ami'
- tags:
- New: Tag
- purge_tags: yes
- <<: *aws_connection_info
- register: result
-
- - name: assert a tag was removed
- assert:
- that:
- - "'Name' not in result.tags"
- - "'New' in result.tags and result.tags.New == 'Tag'"
-
- # ============================================================
-
- - name: update AMI launch permissions
- ec2_ami:
- state: present
- image_id: '{{ result.image_id }}'
- description: '{{ ec2_ami_description }}'
- tags:
- Name: '{{ ec2_ami_name }}_ami'
- launch_permissions:
- group_names: ['all']
- <<: *aws_connection_info
- register: result
-
- - name: assert launch permissions were updated
- assert:
- that:
- - "result.changed"
-
- # ============================================================
-
- - name: modify the AMI description
- ec2_ami:
- state: present
- image_id: '{{ result.image_id }}'
- name: '{{ ec2_ami_name }}_ami'
- description: '{{ ec2_ami_description }}CHANGED'
- tags:
- Name: '{{ ec2_ami_name }}_ami'
- launch_permissions:
- group_names: ['all']
- <<: *aws_connection_info
- register: result
-
- - name: assert the description changed
- assert:
- that:
- - "result.changed"
-
- # ============================================================
-
- - name: remove public launch permissions
- ec2_ami:
- state: present
- image_id: '{{ result.image_id }}'
- name: '{{ ec2_ami_name }}_ami'
- tags:
- Name: '{{ ec2_ami_name }}_ami'
- launch_permissions:
- group_names: []
- <<: *aws_connection_info
- register: result
-
- - name: assert launch permissions were updated
- assert:
- that:
- - "result.changed"
-
- # ============================================================
-
- - name: delete ami without deleting the snapshot (default is not to delete)
- ec2_ami:
- instance_id: '{{ setup_instance.instance_ids[0] }}'
- state: absent
- name: '{{ ec2_ami_name }}_ami'
- image_id: '{{ ec2_ami_image_id }}'
- tags:
- Name: '{{ ec2_ami_name }}_ami'
- wait: yes
- <<: *aws_connection_info
- ignore_errors: true
- register: result
-
- - name: assert that the image has been deleted
- assert:
- that:
- - "result.changed"
- - "'image_id' not in result"
-
- - name: ensure the snapshot still exists
- ec2_snapshot_info:
- snapshot_ids:
- - '{{ ec2_ami_snapshot }}'
- <<: *aws_connection_info
- register: snapshot_result
-
- - name: assert the snapshot wasn't deleted
- assert:
- that:
- - "snapshot_result.snapshots[0].snapshot_id == ec2_ami_snapshot"
-
- - name: delete ami for a second time
- ec2_ami:
- instance_id: '{{ setup_instance.instance_ids[0] }}'
- state: absent
- name: '{{ ec2_ami_name }}_ami'
- image_id: '{{ ec2_ami_image_id }}'
- tags:
- Name: '{{ ec2_ami_name }}_ami'
- wait: yes
- <<: *aws_connection_info
- register: result
-
- - name: assert that image does not exist
- assert:
- that:
- - not result.changed
- - not result.failed
-
-
- # ============================================================
-
- always:
-
- # ============================================================
-
- # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc
- - name: Announce teardown start
- debug:
- msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****"
-
- - name: delete ami
- ec2_ami:
- state: absent
- image_id: "{{ ec2_ami_image_id }}"
- name: '{{ ec2_ami_name }}_ami'
- wait: yes
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove setup snapshot of ec2 instance
- ec2_snapshot:
- state: absent
- snapshot_id: '{{ setup_snapshot.snapshot_id }}'
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- '{{ec2_ami_name}}_instance_setup': 'integration_tests'
- group_id: '{{ setup_sg.group_id }}'
- vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove setup keypair
- ec2_key:
- name: '{{ec2_ami_name}}_setup'
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove setup security group
- ec2_group:
- name: '{{ ec2_ami_name }}_setup'
- description: 'created by Ansible integration tests'
- state: absent
- vpc_id: '{{ setup_vpc.vpc.id }}'
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove setup subnet
- ec2_vpc_subnet:
- az: '{{ ec2_region }}a'
- tags: '{{ec2_ami_name}}_setup'
- vpc_id: '{{ setup_vpc.vpc.id }}'
- cidr: 10.0.0.0/24
- state: absent
- resource_tags:
- Name: '{{ ec2_ami_name }}_setup'
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove setup VPC
- ec2_vpc_net:
- cidr_block: 10.0.0.0/24
- state: absent
- name: '{{ ec2_ami_name }}_setup'
- resource_tags:
- Name: '{{ ec2_ami_name }}_setup'
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_ami/vars/main.yml b/test/integration/targets/ec2_ami/vars/main.yml
deleted file mode 100644
index dac1fda2e9..0000000000
--- a/test/integration/targets/ec2_ami/vars/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-# vars file for test_ec2_ami
-
-# based on Amazon Linux AMI 2017.09.0 (HVM), SSD Volume Type
-ec2_region_images:
- us-east-1: ami-8c1be5f6
- us-east-2: ami-c5062ba0
- us-west-1: ami-02eada62
- us-west-2: ami-e689729e
- ca-central-1: ami-fd55ec99
- eu-west-1: ami-acd005d5
- eu-central-1: ami-c7ee5ca8
- eu-west-2: ami-1a7f6d7e
- ap-southeast-1: ami-0797ea64
- ap-southeast-2: ami-8536d6e7
- ap-northeast-2: ami-9bec36f5
- ap-northeast-1: ami-2a69be4c
- ap-south-1: ami-4fc58420
- sa-east-1: ami-f1344b9d
- cn-north-1: ami-fba67596
diff --git a/test/integration/targets/ec2_elb_lb/aliases b/test/integration/targets/ec2_elb_lb/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/ec2_elb_lb/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/ec2_elb_lb/defaults/main.yml b/test/integration/targets/ec2_elb_lb/defaults/main.yml
deleted file mode 100644
index 76164523d4..0000000000
--- a/test/integration/targets/ec2_elb_lb/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# defaults file for test_ec2_eip
-tag_prefix: '{{resource_prefix}}'
diff --git a/test/integration/targets/ec2_elb_lb/meta/main.yml b/test/integration/targets/ec2_elb_lb/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_elb_lb/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_elb_lb/tasks/main.yml b/test/integration/targets/ec2_elb_lb/tasks/main.yml
deleted file mode 100644
index 4f25493023..0000000000
--- a/test/integration/targets/ec2_elb_lb/tasks/main.yml
+++ /dev/null
@@ -1,425 +0,0 @@
----
-# __Test Info__
-# Create a self signed cert and upload it to AWS
-# http://www.akadia.com/services/ssh_test_certificate.html
-# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/ssl-server-cert.html
-
-# __Test Outline__
-#
-# __ec2_elb_lb__
-# create test elb with listeners and certificate
-# change AZ's
-# change listeners
-# remove listeners
-# remove elb
-
-# __ec2-common__
-# test environment variable EC2_REGION
-# test with no parameters
-# test with only instance_id
-# test invalid region parameter
-# test valid region parameter
-# test invalid ec2_url parameter
-# test valid ec2_url parameter
-# test credentials from environment
-# test credential parameters
-
-- block:
-
- # ============================================================
- # create test elb with listeners, certificate, and health check
-
- - name: Create ELB
- ec2_elb_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- ec2_access_key: "{{ ec2_access_key }}"
- ec2_secret_key: "{{ ec2_secret_key }}"
- security_token: "{{ security_token }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- - protocol: http
- load_balancer_port: 8080
- instance_port: 8080
- health_check:
- ping_protocol: http
- ping_port: 80
- ping_path: "/index.html"
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 10
- register: info
-
- - assert:
- that:
- - 'info.changed'
- - 'info.elb.status == "created"'
- - '"{{ ec2_region }}a" in info.elb.zones'
- - '"{{ ec2_region }}b" in info.elb.zones'
- - 'info.elb.health_check.healthy_threshold == 10'
- - 'info.elb.health_check.interval == 30'
- - 'info.elb.health_check.target == "HTTP:80/index.html"'
- - 'info.elb.health_check.timeout == 5'
- - 'info.elb.health_check.unhealthy_threshold == 2'
- - '[80, 80, "HTTP", "HTTP"] in info.elb.listeners'
- - '[8080, 8080, "HTTP", "HTTP"] in info.elb.listeners'
-
- # ============================================================
-
- # check ports, would be cool, but we are at the mercy of AWS
- # to start things in a timely manner
-
- #- name: check to make sure 80 is listening
- # wait_for: host={{ info.elb.dns_name }} port=80 timeout=600
- # register: result
-
- #- name: assert can connect to port#
- # assert: 'result.state == "started"'
-
- #- name: check to make sure 443 is listening
- # wait_for: host={{ info.elb.dns_name }} port=443 timeout=600
- # register: result
-
- #- name: assert can connect to port#
- # assert: 'result.state == "started"'
-
- # ============================================================
-
- # Change AZ's
-
- - name: Change AZ's
- ec2_elb_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- ec2_access_key: "{{ ec2_access_key }}"
- ec2_secret_key: "{{ ec2_secret_key }}"
- security_token: "{{ security_token }}"
- state: present
- zones:
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- purge_zones: yes
- health_check:
- ping_protocol: http
- ping_port: 80
- ping_path: "/index.html"
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 10
- register: info
-
-
-
- - assert:
- that:
- - 'info.elb.status == "ok"'
- - 'info.changed'
- - 'info.elb.zones[0] == "{{ ec2_region }}c"'
-
- # ============================================================
-
- # Update AZ's
-
- - name: Update AZ's
- ec2_elb_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- ec2_access_key: "{{ ec2_access_key }}"
- ec2_secret_key: "{{ ec2_secret_key }}"
- security_token: "{{ security_token }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- purge_zones: yes
- register: info
-
- - assert:
- that:
- - 'info.changed'
- - 'info.elb.status == "ok"'
- - '"{{ ec2_region }}a" in info.elb.zones'
- - '"{{ ec2_region }}b" in info.elb.zones'
- - '"{{ ec2_region }}c" in info.elb.zones'
-
-
- # ============================================================
-
- # Purge Listeners
-
- - name: Purge Listeners
- ec2_elb_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- ec2_access_key: "{{ ec2_access_key }}"
- ec2_secret_key: "{{ ec2_secret_key }}"
- security_token: "{{ security_token }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 81
- purge_listeners: yes
- register: info
-
- - assert:
- that:
- - 'info.elb.status == "ok"'
- - 'info.changed'
- - '[80, 81, "HTTP", "HTTP"] in info.elb.listeners'
- - 'info.elb.listeners|length == 1'
-
-
-
- # ============================================================
-
- # add Listeners
-
- - name: Add Listeners
- ec2_elb_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- ec2_access_key: "{{ ec2_access_key }}"
- ec2_secret_key: "{{ ec2_secret_key }}"
- security_token: "{{ security_token }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 8081
- instance_port: 8081
- purge_listeners: no
- register: info
-
- - assert:
- that:
- - 'info.elb.status == "ok"'
- - 'info.changed'
- - '[80, 81, "HTTP", "HTTP"] in info.elb.listeners'
- - '[8081, 8081, "HTTP", "HTTP"] in info.elb.listeners'
- - 'info.elb.listeners|length == 2'
-
-
- # ============================================================
-
- - name: test with no parameters
- ec2_elb_lb:
- register: result
- ignore_errors: true
-
- - name: assert failure when called with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("missing required arguments: ")'
-
-
-
- # ============================================================
- - name: test with only name
- ec2_elb_lb:
- name="{{ tag_prefix }}"
- register: result
- ignore_errors: true
-
- - name: assert failure when called with only name
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "missing required arguments: state"'
-
-
- # ============================================================
- - name: test invalid region parameter
- ec2_elb_lb:
- name: "{{ tag_prefix }}"
- region: 'asdf querty 1234'
- state: present
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- register: result
- ignore_errors: true
-
- - name: assert invalid region parameter
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("Region asdf querty 1234 does not seem to be available ")'
-
-
- # ============================================================
- - name: test valid region parameter
- ec2_elb_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
-
- register: result
- ignore_errors: true
-
- - name: assert valid region parameter
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("No handler was ready to authenticate.")'
-
-
- # ============================================================
-
- - name: test invalid ec2_url parameter
- ec2_elb_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- environment:
- EC2_URL: bogus.example.com
- register: result
- ignore_errors: true
-
- - name: assert invalid ec2_url parameter
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("No handler was ready to authenticate.")'
-
-
- # ============================================================
- - name: test valid ec2_url parameter
- ec2_elb_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- environment:
- EC2_URL: '{{ec2_url}}'
- register: result
- ignore_errors: true
-
- - name: assert valid ec2_url parameter
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("No handler was ready to authenticate.")'
-
-
- # ============================================================
- - name: test credentials from environment
- ec2_elb_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- environment:
- EC2_ACCESS_KEY: bogus_access_key
- EC2_SECRET_KEY: bogus_secret_key
- register: result
- ignore_errors: true
-
- - name: assert credentials from environment
- assert:
- that:
- - 'result.failed'
- - '"InvalidClientTokenId" in result.exception'
-
-
- # ============================================================
- - name: test credential parameters
- ec2_elb_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- register: result
- ignore_errors: true
-
- - name: assert credential parameters
- assert:
- that:
- - 'result.failed'
- - '"No handler was ready to authenticate. 1 handlers were checked." in result.msg'
-
- always:
-
- # ============================================================
- - name: remove the test load balancer completely
- ec2_elb_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- state: absent
- ec2_access_key: "{{ ec2_access_key }}"
- ec2_secret_key: "{{ ec2_secret_key }}"
- security_token: "{{ security_token }}"
- register: result
-
- - name: assert the load balancer was removed
- assert:
- that:
- - 'result.changed'
- - 'result.elb.name == "{{tag_prefix}}"'
- - 'result.elb.status == "deleted"'
diff --git a/test/integration/targets/ec2_elb_lb/vars/main.yml b/test/integration/targets/ec2_elb_lb/vars/main.yml
deleted file mode 100644
index 79194af1ef..0000000000
--- a/test/integration/targets/ec2_elb_lb/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for test_ec2_elb_lb
diff --git a/test/integration/targets/ec2_group/aliases b/test/integration/targets/ec2_group/aliases
deleted file mode 100644
index 5e7a8d3877..0000000000
--- a/test/integration/targets/ec2_group/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-shippable/aws/group2
-unstable
diff --git a/test/integration/targets/ec2_group/defaults/main.yml b/test/integration/targets/ec2_group/defaults/main.yml
deleted file mode 100644
index a48051eac7..0000000000
--- a/test/integration/targets/ec2_group/defaults/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# defaults file for test_ec2_group
-ec2_group_name: '{{resource_prefix}}'
-ec2_group_description: 'Created by ansible integration tests'
diff --git a/test/integration/targets/ec2_group/meta/main.yml b/test/integration/targets/ec2_group/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_group/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_group/tasks/credential_tests.yml b/test/integration/targets/ec2_group/tasks/credential_tests.yml
deleted file mode 100644
index 1957eaae18..0000000000
--- a/test/integration/targets/ec2_group/tasks/credential_tests.yml
+++ /dev/null
@@ -1,161 +0,0 @@
----
-# A Note about ec2 environment variable name preference:
-# - EC2_URL -> AWS_URL
-# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
-# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
-# - EC2_REGION -> AWS_REGION
-#
-
-# - include: ../../setup_ec2/tasks/common.yml module_name: ec2_group
-
-- block:
- # ============================================================
- - name: test failure with no parameters
- ec2_group:
- register: result
- ignore_errors: true
-
- - name: assert failure with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "one of the following is required: name, group_id"'
-
- # ============================================================
- - name: test failure with only name
- ec2_group:
- name: '{{ec2_group_name}}'
- register: result
- ignore_errors: true
-
- - name: assert failure with only name
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "Must provide description when state is present."'
-
- # ============================================================
- - name: test failure with only description
- ec2_group:
- description: '{{ec2_group_description}}'
- register: result
- ignore_errors: true
-
- - name: assert failure with only description
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "one of the following is required: name, group_id"'
-
- # ============================================================
- - name: test failure with empty description (AWS API requires non-empty string desc)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: ''
- region: '{{ec2_region}}'
- register: result
- ignore_errors: true
-
- - name: assert failure with empty description
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "Must provide description when state is present."'
-
- # ============================================================
- - name: test valid region parameter
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- region: '{{ec2_region}}'
- register: result
- ignore_errors: true
-
- - name: assert valid region parameter
- assert:
- that:
- - 'result.failed'
- - '"Unable to locate credentials" in result.msg'
-
- # ============================================================
- - name: test environment variable EC2_REGION
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- environment:
- EC2_REGION: '{{ec2_region}}'
- register: result
- ignore_errors: true
-
- - name: assert environment variable EC2_REGION
- assert:
- that:
- - 'result.failed'
- - '"Unable to locate credentials" in result.msg'
-
- # ============================================================
- - name: test invalid ec2_url parameter
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- environment:
- EC2_URL: bogus.example.com
- register: result
- ignore_errors: true
-
- - name: assert invalid ec2_url parameter
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("The ec2_group module requires a region")'
-
- # ============================================================
- - name: test valid ec2_url parameter
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- environment:
- EC2_URL: '{{ec2_url}}'
- register: result
- ignore_errors: true
-
- - name: assert valid ec2_url parameter
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("The ec2_group module requires a region")'
-
- # ============================================================
- - name: test credentials from environment
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- environment:
- EC2_REGION: '{{ec2_region}}'
- EC2_ACCESS_KEY: bogus_access_key
- EC2_SECRET_KEY: bogus_secret_key
- register: result
- ignore_errors: true
-
- - name: assert ec2_group with valid ec2_url
- assert:
- that:
- - 'result.failed'
- - '"validate the provided access credentials" in result.msg'
-
- # ============================================================
- - name: test credential parameters
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- ec2_region: '{{ec2_region}}'
- ec2_access_key: 'bogus_access_key'
- ec2_secret_key: 'bogus_secret_key'
- register: result
- ignore_errors: true
-
- - name: assert credential parameters
- assert:
- that:
- - 'result.failed'
- - '"validate the provided access credentials" in result.msg'
diff --git a/test/integration/targets/ec2_group/tasks/data_validation.yml b/test/integration/targets/ec2_group/tasks/data_validation.yml
deleted file mode 100644
index 9c37e64713..0000000000
--- a/test/integration/targets/ec2_group/tasks/data_validation.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-- block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
- - name: Create a group with only the default rule
- ec2_group:
- name: '{{ec2_group_name}}-input-tests'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
-
- - name: Run through some common weird port specs
- ec2_group:
- name: '{{ec2_group_name}}-input-tests'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- rules:
- - "{{ item }}"
- with_items:
- - proto: tcp
- from_port: "8182"
- to_port: 8182
- cidr_ipv6: "64:ff9b::/96"
- rule_desc: Mixed string and non-string ports
- - proto: tcp
- ports:
- - "9000"
- - 9001
- - 9002-9005
- cidr_ip: "1.2.3.0/24"
- always:
- - name: tidy up input testing group
- ec2_group:
- name: '{{ec2_group_name}}-input-tests'
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_group/tasks/diff_mode.yml b/test/integration/targets/ec2_group/tasks/diff_mode.yml
deleted file mode 100644
index c4bf13bc14..0000000000
--- a/test/integration/targets/ec2_group/tasks/diff_mode.yml
+++ /dev/null
@@ -1,184 +0,0 @@
----
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- # ============================================================
-
- - name: create a group with a rule (CHECK MODE + DIFF)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- rules_egress:
- - proto: all
- cidr_ip: 0.0.0.0/0
- <<: *aws_connection_info
- register: check_mode_result
- check_mode: true
- diff: true
-
- - assert:
- that:
- - check_mode_result.changed
-
- - name: create a group with a rule (DIFF)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- rules_egress:
- - proto: all
- cidr_ip: 0.0.0.0/0
- <<: *aws_connection_info
- register: result
- diff: true
-
- - assert:
- that:
- - result.changed
- - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions
- - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress
-
- - name: add rules to make sorting occur (CHECK MODE + DIFF)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 20.0.0.0/8
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 10.0.0.0/8
- rules_egress:
- - proto: all
- cidr_ip: 0.0.0.0/0
- <<: *aws_connection_info
- register: check_mode_result
- check_mode: true
- diff: true
-
- - assert:
- that:
- - check_mode_result.changed
-
- - name: add rules in a different order to test sorting consistency (DIFF)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 20.0.0.0/8
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 10.0.0.0/8
- rules_egress:
- - proto: all
- cidr_ip: 0.0.0.0/0
- <<: *aws_connection_info
- register: result
- diff: true
-
- - assert:
- that:
- - result.changed
- - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions
- - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress
-
- - name: purge rules (CHECK MODE + DIFF)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- rules_egress: []
- <<: *aws_connection_info
- register: check_mode_result
- check_mode: true
- diff: true
-
- - assert:
- that:
- - check_mode_result.changed
-
- - name: purge rules (DIFF)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- rules_egress: []
- <<: *aws_connection_info
- register: result
- diff: true
-
- - assert:
- that:
- - result.changed
- - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions
- - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress
-
- - name: delete the security group (CHECK MODE + DIFF)
- ec2_group:
- name: '{{ ec2_group_name }}'
- state: absent
- <<: *aws_connection_info
- register: check_mode_result
- diff: true
- check_mode: true
-
- - assert:
- that:
- - check_mode_result.changed
-
- - name: delete the security group (DIFF)
- ec2_group:
- name: '{{ ec2_group_name }}'
- state: absent
- <<: *aws_connection_info
- register: result
- diff: true
-
- - assert:
- that:
- - result.changed
- - not result.diff.0.after and not check_mode_result.diff.0.after
diff --git a/test/integration/targets/ec2_group/tasks/ec2_classic.yml b/test/integration/targets/ec2_group/tasks/ec2_classic.yml
deleted file mode 100644
index 9019af95d4..0000000000
--- a/test/integration/targets/ec2_group/tasks/ec2_classic.yml
+++ /dev/null
@@ -1,88 +0,0 @@
-- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- block:
- - name: Get available AZs
- aws_az_facts:
- aws_access_key: "{{ aws_connection_info['aws_access_key'] }}"
- aws_secret_key: "{{ aws_connection_info['aws_secret_key'] }}"
- filters:
- region-name: "{{ aws_connection_info['region'] }}"
- register: az_facts
-
- - name: Create a classic ELB with classic networking
- ec2_elb_lb:
- name: "{{ resource_prefix }}-elb"
- state: present
- zones:
- - "{{ az_facts['availability_zones'][0]['zone_name'] }}"
- - "{{ az_facts['availability_zones'][1]['zone_name'] }}"
- listeners:
- - protocol: http # options are http, https, ssl, tcp
- load_balancer_port: 80
- instance_port: 80
- proxy_protocol: True
- register: classic_elb
-
- - name: Assert the elb was created
- assert:
- that:
- - classic_elb.changed
-
- - name: Create a security group with a classic elb-sg rule
- ec2_group:
- name: "{{ resource_prefix }}-sg-a"
- description: "EC2 classic test security group"
- rules:
- - proto: tcp
- ports: 80
- group_id: amazon-elb/amazon-elb-sg
- state: present
- register: classic_sg
-
- - name: Assert the SG was created
- assert:
- that:
- - classic_sg.changed
- - "{{ classic_sg.ip_permissions | length }} == 1"
-
- - set_fact:
- elb_sg_id: "{{ classic_sg.ip_permissions[0].user_id_group_pairs[0].user_id }}/{{ classic_sg.ip_permissions[0].user_id_group_pairs[0].group_id }}/{{ classic_sg.ip_permissions[0].user_id_group_pairs[0].group_name }}"
-
- - name: Update the security group
- ec2_group:
- name: "{{ resource_prefix }}-sg-a"
- description: "EC2 classic test security group"
- rules:
- - proto: tcp
- ports: 8080
- group_id: "{{ elb_sg_id }}"
- - proto: tcp
- ports:
- - 80
- cidr_ip: 0.0.0.0/0
- state: present
- register: updated_classic_sg
-
-
- - name: Assert the SG was updated
- assert:
- that:
- - updated_classic_sg.changed
- - "{{ updated_classic_sg.ip_permissions | length }} == 2"
- - "{{ classic_sg.ip_permissions[0]}} not in {{ updated_classic_sg.ip_permissions }}"
-
- # ===========================================
- always:
- - name: Terminate classic ELB
- ec2_elb_lb:
- name: "{{ resource_prefix }}-classic-elb"
- state: absent
-
- - name: Delete security group
- ec2_group:
- name: "{{ resource_prefix }}-sg-a"
- state: absent
diff --git a/test/integration/targets/ec2_group/tasks/egress_tests.yml b/test/integration/targets/ec2_group/tasks/egress_tests.yml
deleted file mode 100644
index aafb16ec80..0000000000
--- a/test/integration/targets/ec2_group/tasks/egress_tests.yml
+++ /dev/null
@@ -1,198 +0,0 @@
----
-- block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
-
- - name: Create a group with only the default rule
- ec2_group:
- name: '{{ec2_group_name}}-egress-tests'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- register: result
-
- - name: assert default rule is in place (expected changed=true)
- assert:
- that:
- - result is changed
- - result.ip_permissions|length == 0
- - result.ip_permissions_egress|length == 1
- - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0'
-
- - name: Create a group with only the default rule
- ec2_group:
- name: '{{ec2_group_name}}-egress-tests'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ec2_group_description}}'
- purge_rules_egress: false
- <<: *aws_connection_info
- state: present
- register: result
-
- - name: assert default rule is not purged (expected changed=false)
- assert:
- that:
- - result is not changed
- - result.ip_permissions|length == 0
- - result.ip_permissions_egress|length == 1
- - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0'
-
- - name: Pass empty egress rules without purging, should leave default rule in place
- ec2_group:
- name: '{{ec2_group_name}}-egress-tests'
- description: '{{ec2_group_description}}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- purge_rules_egress: false
- rules_egress: []
- <<: *aws_connection_info
- state: present
- register: result
-
- - name: assert default rule is not purged (expected changed=false)
- assert:
- that:
- - result is not changed
- - result.ip_permissions|length == 0
- - result.ip_permissions_egress|length == 1
- - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0'
-
- - name: Purge rules, including the default
- ec2_group:
- name: '{{ec2_group_name}}-egress-tests'
- description: '{{ec2_group_description}}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- purge_rules_egress: true
- rules_egress: []
- <<: *aws_connection_info
- state: present
- register: result
-
- - name: assert default rule is not purged (expected changed=false)
- assert:
- that:
- - result is changed
- - result.ip_permissions|length == 0
- - result.ip_permissions_egress|length == 0
-
- - name: Add a custom egress rule
- ec2_group:
- name: '{{ec2_group_name}}-egress-tests'
- description: '{{ec2_group_description}}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- rules_egress:
- - proto: tcp
- ports:
- - 1212
- cidr_ip: 1.2.1.2/32
- <<: *aws_connection_info
- state: present
- register: result
-
- - name: assert first rule is here
- assert:
- that:
- - result.ip_permissions_egress|length == 1
-
- - name: Add a second custom egress rule
- ec2_group:
- name: '{{ec2_group_name}}-egress-tests'
- description: '{{ec2_group_description}}'
- purge_rules_egress: false
- vpc_id: '{{ vpc_result.vpc.id }}'
- rules_egress:
- - proto: tcp
- ports:
- - 2323
- cidr_ip: 2.3.2.3/32
- <<: *aws_connection_info
- state: present
- register: result
-
- - name: assert the first rule is not purged
- assert:
- that:
- - result.ip_permissions_egress|length == 2
-
- - name: Purge the second rule (CHECK MODE) (DIFF MODE)
- ec2_group:
- name: '{{ec2_group_name}}-egress-tests'
- description: '{{ec2_group_description}}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- rules_egress:
- - proto: tcp
- ports:
- - 1212
- cidr_ip: 1.2.1.2/32
- <<: *aws_connection_info
- state: present
- register: result
- check_mode: True
- diff: True
-
- - name: assert first rule will be left
- assert:
- that:
- - result.changed
- - result.diff.0.after.ip_permissions_egress|length == 1
- - result.diff.0.after.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '1.2.1.2/32'
-
- - name: Purge the second rule
- ec2_group:
- name: '{{ec2_group_name}}-egress-tests'
- description: '{{ec2_group_description}}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- rules_egress:
- - proto: tcp
- ports:
- - 1212
- cidr_ip: 1.2.1.2/32
- <<: *aws_connection_info
- state: present
- register: result
-
- - name: assert first rule is here
- assert:
- that:
- - result.ip_permissions_egress|length == 1
- - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '1.2.1.2/32'
-
- - name: add a rule for all TCP ports
- ec2_group:
- name: '{{ec2_group_name}}-egress-tests'
- description: '{{ec2_group_description}}'
- rules_egress:
- - proto: tcp
- ports: 0-65535
- cidr_ip: 0.0.0.0/0
- <<: *aws_connection_info
- state: present
- vpc_id: '{{ vpc_result.vpc.id }}'
- register: result
-
- - name: Re-add the default rule
- ec2_group:
- name: '{{ec2_group_name}}-egress-tests'
- description: '{{ec2_group_description}}'
- rules_egress:
- - proto: -1
- cidr_ip: 0.0.0.0/0
- <<: *aws_connection_info
- state: present
- vpc_id: '{{ vpc_result.vpc.id }}'
- register: result
- always:
- - name: tidy up egress rule test security group
- ec2_group:
- name: '{{ec2_group_name}}-egress-tests'
- state: absent
- vpc_id: '{{ vpc_result.vpc.id }}'
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_group/tasks/ipv6_default_tests.yml b/test/integration/targets/ec2_group/tasks/ipv6_default_tests.yml
deleted file mode 100644
index eadaf30f6b..0000000000
--- a/test/integration/targets/ec2_group/tasks/ipv6_default_tests.yml
+++ /dev/null
@@ -1,103 +0,0 @@
----
-- name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-# ============================================================
-- name: test state=present for ipv6 (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6: "64:ff9b::/96"
- check_mode: true
- register: result
-
-- name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
-# ============================================================
-- name: test state=present for ipv6 (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6: "64:ff9b::/96"
- register: result
-
-- name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.group_id.startswith("sg-")'
-
-# ============================================================
-- name: test rules_egress state=present for ipv6 (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6: "64:ff9b::/96"
- rules_egress:
- - proto: "tcp"
- from_port: 8181
- to_port: 8181
- cidr_ipv6: "64:ff9b::/96"
- check_mode: true
- register: result
-
-- name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
-# ============================================================
-- name: test rules_egress state=present for ipv6 (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6: "64:ff9b::/96"
- rules_egress:
- - proto: "tcp"
- from_port: 8181
- to_port: 8181
- cidr_ipv6: "64:ff9b::/96"
- register: result
-
-- name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.group_id.startswith("sg-")'
-- name: delete it
- ec2_group:
- name: '{{ec2_group_name}}'
- <<: *aws_connection_info
- state: absent
diff --git a/test/integration/targets/ec2_group/tasks/main.yml b/test/integration/targets/ec2_group/tasks/main.yml
deleted file mode 100644
index 9b558656cd..0000000000
--- a/test/integration/targets/ec2_group/tasks/main.yml
+++ /dev/null
@@ -1,1536 +0,0 @@
----
-# A Note about ec2 environment variable name preference:
-# - EC2_URL -> AWS_URL
-# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
-# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
-# - EC2_REGION -> AWS_REGION
-#
-
-# - include: ../../setup_ec2/tasks/common.yml module_name: ec2_group
-
-- include: ./credential_tests.yml
-# ============================================================
-# EC2 Classic tests can only be run on a pre-2013 AWS account with supported-platforms=EC2
-# Ansible CI does NOT have classic EC2 support; these tests are provided as-is for the
-# community and can be run if you have access to a classic account. To check if your account
-# has support for EC2 Classic you can use the `aws_account_attribute` plugin.
-
-- name: determine if this is an EC2 Classic account
- set_fact:
- has_ec2_classic: "{{ lookup('aws_account_attribute',
- attribute='has-ec2-classic',
- region=aws_region,
- aws_access_key=aws_access_key,
- aws_secret_key=aws_secret_key,
- aws_security_token=security_token,
- wantlist=True) }}"
-# ============================================================
--
-- name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
-# ============================================================
-- name: Run EC2 Classic accounts if account type is EC2
- include: ./ec2_classic.yml
- when: has_ec2_classic
-
-# ============================================================
-# Other tests depend on attribute='default-vpc', ie no vpc_id is set. This is
-# incompatible with EC2 classic accounts, so these tests can only be run in a
-# VPC-type account. See "Q. I really want a default VPC for my existing EC2
-# account. Is that possible?" in https://aws.amazon.com/vpc/faqs/#Default_VPCs
-- name: Run all other tests if account type is VPC
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- block:
- - name: determine if there is a default VPC
- set_fact:
- defaultvpc: "{{ lookup('aws_account_attribute',
- attribute='default-vpc',
- region=aws_region,
- aws_access_key=aws_access_key,
- aws_secret_key=aws_secret_key,
- aws_security_token=security_token) }}"
- register: default_vpc
-
- - name: create a VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: present
- cidr_block: "10.232.232.128/26"
- <<: *aws_connection_info
- tags:
- Name: "{{ resource_prefix }}-vpc"
- Description: "Created by ansible-test"
- register: vpc_result
- #TODO(ryansb): Update CI for VPC peering permissions
- #- include: ./multi_account.yml
- - include: ./diff_mode.yml
- - include: ./numeric_protos.yml
- - include: ./rule_group_create.yml
- - include: ./egress_tests.yml
- - include: ./data_validation.yml
- - include: ./multi_nested_target.yml
-
- # ============================================================
- - name: test state=absent (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: absent
- check_mode: true
- register: result
-
- - name: assert no changes would be made
- assert:
- that:
- - not result.changed
-
- # ===========================================================
- - name: test state=absent
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: absent
- register: result
-
- # ============================================================
- - name: test state=present (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test state=present (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.group_id.startswith("sg-")'
-
- # ============================================================
- - name: test state=present different description (expected changed=false) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}CHANGED'
- <<: *aws_connection_info
- state: present
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=false)
- assert:
- that:
- - 'not result.changed'
-
- # ============================================================
- - name: test state=present different description (expected changed=false)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}CHANGED'
- <<: *aws_connection_info
- state: present
- ignore_errors: true
- register: result
-
- - name: assert state=present (expected changed=false)
- assert:
- that:
- - 'not result.changed'
- - 'result.group_id.startswith("sg-")'
-
- # ============================================================
- - name: test state=present (expected changed=false)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- register: result
-
- - name: assert state=present (expected changed=false)
- assert:
- that:
- - 'not result.changed'
- - 'result.group_id.startswith("sg-")'
-
- # ============================================================
- - name: tests IPv6 with the default VPC
- include: ./ipv6_default_tests.yml
- when: default_vpc
-
- - name: test IPv6 with a specified VPC
- block:
-
- # ============================================================
- - name: test state=present (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ ec2_group_name }}-2'
- description: '{{ ec2_group_description }}-2'
- state: present
- vpc_id: '{{ vpc_result.vpc.id }}'
- <<: *aws_connection_info
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test state=present (expected changed=true)
- ec2_group:
- name: '{{ ec2_group_name }}-2'
- description: '{{ ec2_group_description }}-2'
- state: present
- vpc_id: '{{ vpc_result.vpc.id }}'
- <<: *aws_connection_info
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.group_id.startswith("sg-")'
-
- # ============================================================
- - name: test state=present for ipv6 (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ ec2_group_name }}-2'
- description: '{{ ec2_group_description }}-2'
- state: present
- vpc_id: '{{ vpc_result.vpc.id }}'
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6: "64:ff9b::/96"
- <<: *aws_connection_info
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test state=present for ipv6 (expected changed=true)
- ec2_group:
- name: '{{ ec2_group_name }}-2'
- description: '{{ ec2_group_description }}-2'
- state: present
- vpc_id: '{{ vpc_result.vpc.id }}'
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6: "64:ff9b::/96"
- <<: *aws_connection_info
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.group_id.startswith("sg-")'
-
- # ============================================================
- - name: test state=present for ipv6 (expected changed=false) (CHECK MODE)
- ec2_group:
- name: '{{ ec2_group_name }}-2'
- description: '{{ ec2_group_description }}-2'
- state: present
- vpc_id: '{{ vpc_result.vpc.id }}'
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6: "64:ff9b::/96"
- <<: *aws_connection_info
- check_mode: true
- register: result
-
- - name: assert nothing changed
- assert:
- that:
- - 'not result.changed'
-
- # ============================================================
- - name: test state=present for ipv6 (expected changed=false)
- ec2_group:
- name: '{{ ec2_group_name }}-2'
- description: '{{ ec2_group_description }}-2'
- state: present
- vpc_id: '{{ vpc_result.vpc.id }}'
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6: "64:ff9b::/96"
- <<: *aws_connection_info
- register: result
-
- - name: assert nothing changed
- assert:
- that:
- - 'not result.changed'
-
- # ============================================================
- - name: test rules_egress state=present for ipv6 (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ ec2_group_name }}-2'
- description: '{{ ec2_group_description }}-2'
- state: present
- vpc_id: '{{ vpc_result.vpc.id }}'
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6: "64:ff9b::/96"
- rules_egress:
- - proto: "tcp"
- from_port: 8181
- to_port: 8181
- cidr_ipv6: "64:ff9b::/96"
- <<: *aws_connection_info
- check_mode: true
- diff: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.diff.0.before.ip_permissions == result.diff.0.after.ip_permissions'
- - 'result.diff.0.before.ip_permissions_egress != result.diff.0.after.ip_permissions_egress'
-
- # ============================================================
- - name: test rules_egress state=present for ipv6 (expected changed=true)
- ec2_group:
- name: '{{ ec2_group_name }}-2'
- description: '{{ ec2_group_description }}-2'
- state: present
- vpc_id: '{{ vpc_result.vpc.id }}'
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6: "64:ff9b::/96"
- rules_egress:
- - proto: "tcp"
- from_port: 8181
- to_port: 8181
- cidr_ipv6: "64:ff9b::/96"
- <<: *aws_connection_info
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.group_id.startswith("sg-")'
-
- # ============================================================
- - name: test state=absent (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ ec2_group_name }}-2'
- description: '{{ ec2_group_description }}-2'
- state: absent
- vpc_id: '{{ vpc_result.vpc.id }}'
- <<: *aws_connection_info
- check_mode: true
- diff: true
- register: result
-
- - name: assert group was removed
- assert:
- that:
- - 'result.changed'
- - 'not result.diff.0.after'
-
- # ============================================================
- - name: test state=absent (expected changed=true)
- ec2_group:
- name: '{{ ec2_group_name }}-2'
- description: '{{ ec2_group_description }}-2'
- state: absent
- vpc_id: '{{ vpc_result.vpc.id }}'
- <<: *aws_connection_info
- register: result
-
- - name: assert group was removed
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test state=present for ipv4 (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test state=present for ipv4 (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.group_id.startswith("sg-")'
- - 'result.ip_permissions|length == 1'
- - 'result.ip_permissions_egress|length == 1'
-
- # ============================================================
- - name: add same rule to the existing group (expected changed=false) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- check_mode: true
- diff: true
- register: check_result
-
- - assert:
- that:
- - not check_result.changed
- - check_result.diff.0.before.ip_permissions.0 == check_result.diff.0.after.ip_permissions.0
-
- # ============================================================
- - name: add same rule to the existing group (expected changed=false)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- register: result
-
- - name: assert state=present (expected changed=false)
- assert:
- that:
- - 'not result.changed'
- - 'result.group_id.startswith("sg-")'
-
- - name: assert state=present (expected changed=false)
- assert:
- that:
- - 'not check_result.changed'
-
- # ============================================================
- - name: add a rule that auto creates another security group (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- purge_rules: no
- rules:
- - proto: "tcp"
- group_name: "{{ resource_prefix }} - Another security group"
- group_desc: Another security group
- ports: 7171
- check_mode: true
- register: result
-
- - name: check that there are now two rules
- assert:
- that:
- - result.changed
-
- # ============================================================
- - name: add a rule that auto creates another security group
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- purge_rules: no
- rules:
- - proto: "tcp"
- group_name: "{{ resource_prefix }} - Another security group"
- group_desc: Another security group
- ports: 7171
- register: result
-
- - name: check that there are now two rules
- assert:
- that:
- - result.changed
- - result.ip_permissions|length == 2
- - result.ip_permissions[0].user_id_group_pairs or
- result.ip_permissions[1].user_id_group_pairs
- - 'result.ip_permissions_egress[0].ip_protocol == "-1"'
-
- # ============================================================
- - name: test ip rules convert port numbers from string to int (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- rules:
- - proto: "tcp"
- from_port: "8183"
- to_port: "8183"
- cidr_ip: "1.1.1.1/32"
- rules_egress:
- - proto: "tcp"
- from_port: "8184"
- to_port: "8184"
- cidr_ip: "1.1.1.1/32"
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test ip rules convert port numbers from string to int (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- rules:
- - proto: "tcp"
- from_port: "8183"
- to_port: "8183"
- cidr_ip: "1.1.1.1/32"
- rules_egress:
- - proto: "tcp"
- from_port: "8184"
- to_port: "8184"
- cidr_ip: "1.1.1.1/32"
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.group_id.startswith("sg-")'
- - 'result.ip_permissions|length == 1'
- - 'result.ip_permissions_egress[0].ip_protocol == "tcp"'
-
-
- # ============================================================
- - name: test group rules convert port numbers from string to int (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- rules:
- - proto: "tcp"
- from_port: "8185"
- to_port: "8185"
- group_id: "{{result.group_id}}"
- rules_egress:
- - proto: "tcp"
- from_port: "8186"
- to_port: "8186"
- group_id: "{{result.group_id}}"
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test group rules convert port numbers from string to int (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- rules:
- - proto: "tcp"
- from_port: "8185"
- to_port: "8185"
- group_id: "{{result.group_id}}"
- rules_egress:
- - proto: "tcp"
- from_port: "8186"
- to_port: "8186"
- group_id: "{{result.group_id}}"
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.group_id.startswith("sg-")'
-
- # ============================================================
- - name: test adding a range of ports and ports given as strings (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- # set purge_rules to false so we don't get a false positive from previously added rules
- purge_rules: false
- rules:
- - proto: "tcp"
- ports:
- - 8183-8190
- - '8192'
- cidr_ip: 1.1.1.1/32
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test adding a range of ports and ports given as strings (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- # set purge_rules to false so we don't get a false positive from previously added rules
- purge_rules: false
- rules:
- - proto: "tcp"
- ports:
- - 8183-8190
- - '8192'
- cidr_ip: 1.1.1.1/32
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.group_id.startswith("sg-")'
-
- # ============================================================
- - name: test adding a rule with a IPv4 CIDR with host bits set (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- # set purge_rules to false so we don't get a false positive from previously added rules
- purge_rules: false
- rules:
- - proto: "tcp"
- ports:
- - 8195
- cidr_ip: 10.0.0.1/8
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test adding a rule with a IPv4 CIDR with host bits set (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- # set purge_rules to false so we don't get a false positive from previously added rules
- purge_rules: false
- rules:
- - proto: "tcp"
- ports:
- - 8195
- cidr_ip: 10.0.0.1/8
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.group_id.startswith("sg-")'
-
- # ============================================================
- - name: test adding the same rule with a IPv4 CIDR with host bits set (expected changed=false) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- # set purge_rules to false so we don't get a false positive from previously added rules
- purge_rules: false
- rules:
- - proto: "tcp"
- ports:
- - 8195
- cidr_ip: 10.0.0.1/8
- check_mode: true
- register: check_result
-
- # ============================================================
- - name: test adding the same rule with a IPv4 CIDR with host bits set (expected changed=false and a warning)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- # set purge_rules to false so we don't get a false positive from previously added rules
- purge_rules: false
- rules:
- - proto: "tcp"
- ports:
- - 8195
- cidr_ip: 10.0.0.1/8
- register: result
-
- - name: assert state=present (expected changed=false and a warning)
- assert:
- that:
- - 'not check_result.changed'
-
- - name: assert state=present (expected changed=false and a warning)
- assert:
- that:
- # No way to assert for warnings?
- - 'not result.changed'
- - 'result.group_id.startswith("sg-")'
-
- # ============================================================
- - name: test using the default VPC
- block:
-
- - name: test adding a rule with a IPv6 CIDR with host bits set (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- # set purge_rules to false so we don't get a false positive from previously added rules
- purge_rules: false
- rules:
- - proto: "tcp"
- ports:
- - 8196
- cidr_ipv6: '2001:db00::1/24'
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test adding a rule with a IPv6 CIDR with host bits set (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- # set purge_rules to false so we don't get a false positive from previously added rules
- purge_rules: false
- rules:
- - proto: "tcp"
- ports:
- - 8196
- cidr_ipv6: '2001:db00::1/24'
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.group_id.startswith("sg-")'
-
- # ============================================================
-
- - name: test adding a rule again with a IPv6 CIDR with host bits set (expected changed=false and a warning)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- # set purge_rules to false so we don't get a false positive from previously added rules
- purge_rules: false
- rules:
- - proto: "tcp"
- ports:
- - 8196
- cidr_ipv6: '2001:db00::1/24'
- register: result
-
- - name: assert state=present (expected changed=false and a warning)
- assert:
- that:
- # No way to assert for warnings?
- - 'not result.changed'
- - 'result.group_id.startswith("sg-")'
-
- when: default_vpc
-
- # ============================================================
- - name: test state=absent (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- state: absent
- <<: *aws_connection_info
- check_mode: true
- register: result
-
- - name: assert state=absent (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test state=absent (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- state: absent
- <<: *aws_connection_info
- register: result
-
- - name: assert state=absent (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'not result.group_id'
-
- # ============================================================
- - name: create security group in the VPC (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: create security group in the VPC
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.vpc_id == vpc_result.vpc.id'
- - 'result.group_id.startswith("sg-")'
-
- # ============================================================
- - name: test adding tags (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- tags:
- tag1: test1
- tag2: test2
- check_mode: true
- diff: true
- register: result
-
- - name: assert that tags were added (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'not result.diff.0.before.tags'
- - 'result.diff.0.after.tags.tag1 == "test1"'
- - 'result.diff.0.after.tags.tag2 == "test2"'
-
- # ============================================================
- - name: test adding tags (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- tags:
- tag1: test1
- tag2: test2
- register: result
-
- - name: assert that tags were added (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.tags == {"tag1": "test1", "tag2": "test2"}'
-
- # ============================================================
- - name: test that tags are present (expected changed=False) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- purge_rules_egress: false
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- tags:
- tag1: test1
- tag2: test2
- check_mode: true
- register: result
-
- - name: assert that tags were not changed (expected changed=False)
- assert:
- that:
- - 'not result.changed'
-
- # ============================================================
- - name: test that tags are present (expected changed=False)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- purge_rules_egress: false
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- tags:
- tag1: test1
- tag2: test2
- register: result
-
- - name: assert that tags were not changed (expected changed=False)
- assert:
- that:
- - 'not result.changed'
- - 'result.tags == {"tag1": "test1", "tag2": "test2"}'
-
- # ============================================================
- - name: test purging tags (expected changed=True) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- tags:
- tag1: test1
- check_mode: true
- register: result
-
- - name: assert that tag2 was removed (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test purging tags (expected changed=True)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- tags:
- tag1: test1
- register: result
-
- - name: assert that tag2 was removed (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.tags == {"tag1": "test1"}'
-
- # ============================================================
-
- - name: assert that tags are left as-is if not specified (expected changed=False)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- register: result
-
- - name: assert that the tags stayed the same (expected changed=false)
- assert:
- that:
- - 'not result.changed'
- - 'result.tags == {"tag1": "test1"}'
-
- # ============================================================
-
- - name: test purging all tags (expected changed=True)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ip: "1.1.1.1/32"
- tags: {}
- register: result
-
- - name: assert that tag1 was removed (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'not result.tags'
-
- # ============================================================
- - name: test adding a rule and egress rule descriptions (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- # purge the other rules so assertions work for the subsequent tests for rule descriptions
- purge_rules_egress: true
- purge_rules: true
- state: present
- rules:
- - proto: "tcp"
- ports:
- - 8281
- cidr_ipv6: 1001:d00::/24
- rule_desc: ipv6 rule desc 1
- rules_egress:
- - proto: "tcp"
- ports:
- - 8282
- cidr_ip: 2.2.2.2/32
- rule_desc: egress rule desc 1
- check_mode: true
- register: result
-
- - name: assert that rule descriptions are created (expected changed=true)
- # Only assert this if rule description is defined as the botocore version may < 1.7.2.
- # It's still helpful to have these tests run on older versions since it verifies backwards
- # compatibility with this feature.
- assert:
- that:
- - 'result.changed'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
-
- - name: if an older version of botocore is installed changes should still have changed due to purged rules (expected changed=true)
- assert:
- that:
- - 'result.changed'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined
-
- # =========================================================================================
- - name: add rules without descriptions ready for adding descriptions to existing rules
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- # purge the other rules so assertions work for the subsequent tests for rule descriptions
- purge_rules_egress: true
- purge_rules: true
- state: present
- rules:
- - proto: "tcp"
- ports:
- - 8281
- cidr_ipv6: 1001:d00::/24
- rules_egress:
- - proto: "tcp"
- ports:
- - 8282
- cidr_ip: 2.2.2.2/32
- register: result
-
- # ============================================================
- - name: test adding a rule and egress rule descriptions (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- # purge the other rules so assertions work for the subsequent tests for rule descriptions
- purge_rules_egress: true
- purge_rules: true
- state: present
- rules:
- - proto: "tcp"
- ports:
- - 8281
- cidr_ipv6: 1001:d00::/24
- rule_desc: ipv6 rule desc 1
- rules_egress:
- - proto: "tcp"
- ports:
- - 8282
- cidr_ip: 2.2.2.2/32
- rule_desc: egress rule desc 1
- register: result
-
- - name: assert that rule descriptions are created (expected changed=true)
- # Only assert this if rule description is defined as the botocore version may < 1.7.2.
- # It's still helpful to have these tests run on older versions since it verifies backwards
- # compatibility with this feature.
- assert:
- that:
- - 'result.changed'
- - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 1"'
- - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 1"'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
-
- - name: if an older version of botocore is installed changes should still have changed due to purged rules (expected changed=true)
- assert:
- that:
- - 'result.changed'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined
-
- # ============================================================
- - name: test modifying rule and egress rule descriptions (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- purge_rules_egress: false
- purge_rules: false
- state: present
- rules:
- - proto: "tcp"
- ports:
- - 8281
- cidr_ipv6: 1001:d00::/24
- rule_desc: ipv6 rule desc 2
- rules_egress:
- - proto: "tcp"
- ports:
- - 8282
- cidr_ip: 2.2.2.2/32
- rule_desc: egress rule desc 2
- check_mode: true
- register: result
-
- - name: assert that rule descriptions were modified (expected changed=true)
- # Only assert this if rule description is defined as the botocore version may < 1.7.2.
- # It's still helpful to have these tests run on older versions since it verifies backwards
- # compatibility with this feature.
- assert:
- that:
- - 'result.ip_permissions | length > 0'
- - 'result.changed'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
-
- - name: if an older version of botocore is installed everything should stay the same (expected changed=false)
- assert:
- that:
- - 'not result.changed'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined and result.ip_permissions_egress[1].ip_ranges[0].description is undefined
-
- # ============================================================
- - name: test modifying rule and egress rule descriptions (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- purge_rules_egress: false
- purge_rules: false
- state: present
- rules:
- - proto: "tcp"
- ports:
- - 8281
- cidr_ipv6: 1001:d00::/24
- rule_desc: ipv6 rule desc 2
- rules_egress:
- - proto: "tcp"
- ports:
- - 8282
- cidr_ip: 2.2.2.2/32
- rule_desc: egress rule desc 2
- register: result
-
- - name: assert that rule descriptions were modified (expected changed=true)
- # Only assert this if rule description is defined as the botocore version may < 1.7.2.
- # It's still helpful to have these tests run on older versions since it verifies backwards
- # compatibility with this feature.
- assert:
- that:
- - 'result.changed'
- - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 2"'
- - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 2"'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
-
- - name: if an older version of botocore is installed everything should stay the same (expected changed=false)
- assert:
- that:
- - 'not result.changed'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined
-
- # ============================================================
-
- - name: test creating rule in default vpc with egress rule (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}-default-vpc'
- description: '{{ec2_group_description}} default VPC'
- <<: *aws_connection_info
- purge_rules_egress: true
- state: present
- rules:
- - proto: "tcp"
- ports:
- - 8281
- cidr_ip: 1.1.1.1/24
- rule_desc: ipv4 rule desc
- rules_egress:
- - proto: "tcp"
- ports:
- - 8282
- cidr_ip: 2.2.2.2/32
- rule_desc: egress rule desc 2
- register: result
-
- - name: assert that rule descriptions were modified (expected changed=true)
- # Only assert this if rule description is defined as the botocore version may < 1.7.2.
- # It's still helpful to have these tests run on older versions since it verifies backwards
- # compatibility with this feature.
- assert:
- that:
- - 'result.changed'
- - 'result.ip_permissions_egress|length == 1'
-
- # ============================================================
- - name: test that keeping the same rule descriptions (expected changed=false) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- purge_rules_egress: false
- purge_rules: false
- state: present
- rules:
- - proto: "tcp"
- ports:
- - 8281
- cidr_ipv6: 1001:d00::/24
- rule_desc: ipv6 rule desc 2
- rules_egress:
- - proto: "tcp"
- ports:
- - 8282
- cidr_ip: 2.2.2.2/32
- rule_desc: egress rule desc 2
- check_mode: true
- register: result
-
- - name: assert that rule descriptions stayed the same (expected changed=false)
- # Only assert this if rule description is defined as the botocore version may < 1.7.2.
- # It's still helpful to have these tests run on older versions since it verifies backwards
- # compatibility with this feature.
- assert:
- that:
- - 'not result.changed'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
-
- - name: if an older version of botocore is installed everything should stay the same (expected changed=false)
- assert:
- that:
- - 'not result.changed'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined
-
- # ============================================================
- - name: test that keeping the same rule descriptions (expected changed=false)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- purge_rules_egress: false
- purge_rules: false
- state: present
- rules:
- - proto: "tcp"
- ports:
- - 8281
- cidr_ipv6: 1001:d00::/24
- rule_desc: ipv6 rule desc 2
- rules_egress:
- - proto: "tcp"
- ports:
- - 8282
- cidr_ip: 2.2.2.2/32
- rule_desc: egress rule desc 2
- register: result
-
- - name: assert that rule descriptions stayed the same (expected changed=false)
- # Only assert this if rule description is defined as the botocore version may < 1.7.2.
- # It's still helpful to have these tests run on older versions since it verifies backwards
- # compatibility with this feature.
- assert:
- that:
- - 'not result.changed'
- - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 2"'
- - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 2"'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
-
- - name: if an older version of botocore is installed everything should stay the same (expected changed=false)
- assert:
- that:
- - 'not result.changed'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined
-
- # ============================================================
- - name: test removing rule descriptions (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- purge_rules_egress: false
- purge_rules: false
- state: present
- rules:
- - proto: "tcp"
- ports:
- - 8281
- cidr_ipv6: 1001:d00::/24
- rule_desc:
- rules_egress:
- - proto: "tcp"
- ports:
- - 8282
- cidr_ip: 2.2.2.2/32
- rule_desc:
- check_mode: true
- register: result
-
- - name: assert that rule descriptions were removed (expected changed=true)
- # Only assert this if rule description is defined as the botocore version may < 1.7.2.
- # It's still helpful to have these tests run on older versions since it verifies backwards
- # compatibility with this feature.
- assert:
- that:
- - 'result.changed'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is defined
-
- - name: if an older version of botocore is installed everything should stay the same (expected changed=false)
- assert:
- that:
- - 'not result.changed'
- when: result.ip_permissions_egress[0].ip_ranges[0].description is undefined
-
- # ============================================================
- - name: test removing rule descriptions (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- vpc_id: '{{ vpc_result.vpc.id }}'
- purge_rules_egress: false
- purge_rules: false
- state: present
- rules:
- - proto: "tcp"
- ports:
- - 8281
- cidr_ipv6: 1001:d00::/24
- rule_desc:
- rules_egress:
- - proto: "tcp"
- ports:
- - 8282
- cidr_ip: 2.2.2.2/32
- rule_desc:
- register: result
- ignore_errors: true
-
- - name: assert that rule descriptions were removed (expected changed=true with newer botocore)
- # Only assert this if rule description is defined as the botocore version may < 1.7.2.
- # It's still helpful to have these tests run on older versions since it verifies backwards
- # compatibility with this feature.
- assert:
- that:
- - 'result.ip_permissions[0].ipv6_ranges[0].description is undefined'
- - 'result.ip_permissions_egress[0].ip_ranges[0].description is undefined'
- when: result is changed
-
- - name: if an older version of botocore is installed everything should stay the same (expected changed=false)
- assert:
- that:
- - 'not result.changed'
- when: result.failed
-
- # ============================================================
-
- - name: test state=absent (expected changed=true)
- ec2_group:
- name: '{{ec2_group_name}}'
- state: absent
- <<: *aws_connection_info
- register: result
-
- - name: assert state=absent (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'not result.group_id'
- when: not has_ec2_classic
-
- always:
- # ============================================================
- - name: tidy up security group
- ec2_group:
- name: '{{ec2_group_name}}'
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: tidy up security group for IPv6 EC2-Classic tests
- ec2_group:
- name: '{{ ec2_group_name }}-2'
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: tidy up default VPC security group
- ec2_group:
- name: '{{ec2_group_name}}-default-vpc'
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: tidy up automatically created SG
- ec2_group:
- name: "{{ resource_prefix }} - Another security group"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: tidy up VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: absent
- cidr_block: "10.232.232.128/26"
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_group/tasks/multi_account.yml b/test/integration/targets/ec2_group/tasks/multi_account.yml
deleted file mode 100644
index d557938350..0000000000
--- a/test/integration/targets/ec2_group/tasks/multi_account.yml
+++ /dev/null
@@ -1,124 +0,0 @@
-- block:
- - aws_caller_info:
- register: caller_facts
- - name: create a VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc-2"
- state: present
- cidr_block: "10.232.233.128/26"
- tags:
- Description: "Created by ansible-test"
- register: vpc_result_2
- - name: Peer the secondary-VPC to the main VPC
- ec2_vpc_peer:
- vpc_id: '{{ vpc_result_2.vpc.id }}'
- peer_vpc_id: '{{ vpc_result.vpc.id }}'
- peer_owner_id: '{{ caller_facts.account }}'
- peer_region: '{{ aws_region }}'
- register: peer_origin
- - name: Accept the secondary-VPC peering connection in the main VPC
- ec2_vpc_peer:
- peer_vpc_id: '{{ vpc_result_2.vpc.id }}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: accept
- peering_id: '{{ peer_origin.peering_id }}'
- peer_owner_id: '{{ caller_facts.account }}'
- peer_region: '{{ aws_region }}'
- - name: Create group in second VPC
- ec2_group:
- name: '{{ ec2_group_name }}-external'
- description: '{{ ec2_group_description }}'
- vpc_id: '{{ vpc_result_2.vpc.id }}'
- state: present
- rules:
- - proto: "tcp"
- cidr_ip: 0.0.0.0/0
- ports:
- - 80
- rule_desc: 'http whoo'
- register: external
- - name: Create group in internal VPC
- ec2_group:
- name: '{{ ec2_group_name }}-internal'
- description: '{{ ec2_group_description }}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- rules:
- - proto: "tcp"
- group_id: '{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external'
- ports:
- - 80
- - name: Re-make same rule, expecting changed=false in internal VPC
- ec2_group:
- name: '{{ ec2_group_name }}-internal'
- description: '{{ ec2_group_description }}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- rules:
- - proto: "tcp"
- group_id: '{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external'
- ports:
- - 80
- register: out
- - assert:
- that:
- - out is not changed
- - name: Try again with a bad group_id group in internal VPC
- ec2_group:
- name: '{{ ec2_group_name }}-internal'
- description: '{{ ec2_group_description }}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: present
- rules:
- - proto: "tcp"
- group_id: '{{ external.group_id }}/{{ caller_facts.account }}/{{ ec2_group_name }}-external'
- ports:
- - 80
- register: out
- ignore_errors: true
- - assert:
- that:
- - out is failed
- always:
- - pause: seconds=5
- - name: Delete secondary-VPC side of peer
- ec2_vpc_peer:
- vpc_id: '{{ vpc_result_2.vpc.id }}'
- peer_vpc_id: '{{ vpc_result.vpc.id }}'
- peering_id: '{{ peer_origin.peering_id }}'
- state: absent
- peer_owner_id: '{{ caller_facts.account }}'
- peer_region: '{{ aws_region }}'
- ignore_errors: yes
- - name: Delete main-VPC side of peer
- ec2_vpc_peer:
- peer_vpc_id: '{{ vpc_result_2.vpc.id }}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- state: absent
- peering_id: '{{ peer_origin.peering_id }}'
- peer_owner_id: '{{ caller_facts.account }}'
- peer_region: '{{ aws_region }}'
- ignore_errors: yes
- - name: Clean up group in second VPC
- ec2_group:
- name: '{{ ec2_group_name }}-external'
- description: '{{ ec2_group_description }}'
- state: absent
- vpc_id: '{{ vpc_result_2.vpc.id }}'
- ignore_errors: yes
- - name: Clean up group in second VPC
- ec2_group:
- name: '{{ ec2_group_name }}-internal'
- description: '{{ ec2_group_description }}'
- state: absent
- vpc_id: '{{ vpc_result.vpc.id }}'
- ignore_errors: yes
- - name: tidy up VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc-2"
- state: absent
- cidr_block: "10.232.233.128/26"
- ignore_errors: yes
- register: removed
- retries: 10
- until: removed is not failed
diff --git a/test/integration/targets/ec2_group/tasks/multi_nested_target.yml b/test/integration/targets/ec2_group/tasks/multi_nested_target.yml
deleted file mode 100644
index 876f2a30a3..0000000000
--- a/test/integration/targets/ec2_group/tasks/multi_nested_target.yml
+++ /dev/null
@@ -1,230 +0,0 @@
----
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- # ============================================================
-
- - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6:
- - "64:ff9b::/96"
- - ["2620::/32"]
- - proto: "tcp"
- ports: 5665
- cidr_ip:
- - 172.16.1.0/24
- - 172.16.17.0/24
- - ["10.0.0.0/24", "20.0.0.0/24"]
- <<: *aws_connection_info
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
-
- - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6:
- - "64:ff9b::/96"
- - ["2620::/32"]
- - proto: "tcp"
- ports: 5665
- cidr_ip:
- - 172.16.1.0/24
- - 172.16.17.0/24
- - ["10.0.0.0/24", "20.0.0.0/24"]
- <<: *aws_connection_info
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'result.changed'
- - 'result.ip_permissions | length == 2'
- - 'result.ip_permissions[0].ip_ranges | length == 4 or result.ip_permissions[1].ip_ranges | length == 4'
- - 'result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2'
-
- - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false) (CHECK MODE)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6:
- - "64:ff9b::/96"
- - ["2620::/32"]
- - proto: "tcp"
- ports: 5665
- cidr_ip:
- - 172.16.1.0/24
- - 172.16.17.0/24
- - ["10.0.0.0/24", "20.0.0.0/24"]
- <<: *aws_connection_info
- check_mode: true
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'not result.changed'
-
- - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6:
- - "64:ff9b::/96"
- - ["2620::/32"]
- - proto: "tcp"
- ports: 5665
- cidr_ip:
- - 172.16.1.0/24
- - 172.16.17.0/24
- - ["10.0.0.0/24", "20.0.0.0/24"]
- <<: *aws_connection_info
- register: result
-
- - name: assert state=present (expected changed=true)
- assert:
- that:
- - 'not result.changed'
-
- - name: test state=present purging a nested ipv4 target (expected changed=true) (CHECK MODE)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6:
- - "64:ff9b::/96"
- - ["2620::/32"]
- - proto: "tcp"
- ports: 5665
- cidr_ip:
- - 172.16.1.0/24
- - 172.16.17.0/24
- - ["10.0.0.0/24"]
- <<: *aws_connection_info
- check_mode: true
- register: result
-
- - assert:
- that:
- - result.changed
-
- - name: test state=present purging a nested ipv4 target (expected changed=true)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6:
- - "64:ff9b::/96"
- - ["2620::/32"]
- - proto: "tcp"
- ports: 5665
- cidr_ip:
- - 172.16.1.0/24
- - 172.16.17.0/24
- - ["10.0.0.0/24"]
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - 'result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3'
- - 'result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2'
-
- - name: test state=present with both associated ipv6 targets nested (expected changed=false)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6:
- - ["2620::/32", "64:ff9b::/96"]
- - proto: "tcp"
- ports: 5665
- cidr_ip:
- - 172.16.1.0/24
- - 172.16.17.0/24
- - ["10.0.0.0/24"]
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
-
- - name: test state=present add another nested ipv6 target (expected changed=true)
- ec2_group:
- name: '{{ ec2_group_name }}'
- description: '{{ ec2_group_description }}'
- state: present
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- cidr_ipv6:
- - ["2620::/32", "64:ff9b::/96"]
- - ["2001:DB8:A0B:12F0::1/64"]
- - proto: "tcp"
- ports: 5665
- cidr_ip:
- - 172.16.1.0/24
- - 172.16.17.0/24
- - ["10.0.0.0/24"]
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - 'result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3'
- - 'result.ip_permissions[0].ipv6_ranges | length == 3 or result.ip_permissions[1].ipv6_ranges | length == 3'
-
- - name: delete it
- ec2_group:
- name: '{{ ec2_group_name }}'
- state: absent
- <<: *aws_connection_info
diff --git a/test/integration/targets/ec2_group/tasks/numeric_protos.yml b/test/integration/targets/ec2_group/tasks/numeric_protos.yml
deleted file mode 100644
index ba4f7e90dc..0000000000
--- a/test/integration/targets/ec2_group/tasks/numeric_protos.yml
+++ /dev/null
@@ -1,71 +0,0 @@
----
-- block:
- - name: set up aws connection info
- set_fact:
- group_tmp_name: '{{ec2_group_name}}-numbered-protos'
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: Create a group with numbered protocol (GRE)
- ec2_group:
- name: '{{ group_tmp_name }}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ ec2_group_description }}'
- rules:
- - proto: 47
- to_port: -1
- from_port: -1
- cidr_ip: 0.0.0.0/0
- <<: *aws_connection_info
- state: present
- register: result
-
- - name: Create a group with a quoted proto
- ec2_group:
- name: '{{ group_tmp_name }}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ ec2_group_description }}'
- rules:
- - proto: '47'
- to_port: -1
- from_port: -1
- cidr_ip: 0.0.0.0/0
- <<: *aws_connection_info
- state: present
- register: result
- - assert:
- that:
- - result is not changed
- - name: Add a tag with a numeric value
- ec2_group:
- name: '{{ group_tmp_name }}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ ec2_group_description }}'
- tags:
- foo: 1
- <<: *aws_connection_info
- - name: Read a tag with a numeric value
- ec2_group:
- name: '{{ group_tmp_name }}'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ ec2_group_description }}'
- tags:
- foo: 1
- <<: *aws_connection_info
- register: result
- - assert:
- that:
- - result is not changed
-
- always:
- - name: tidy up egress rule test security group
- ec2_group:
- name: '{{group_tmp_name}}'
- state: absent
- vpc_id: '{{ vpc_result.vpc.id }}'
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_group/tasks/rule_group_create.yml b/test/integration/targets/ec2_group/tasks/rule_group_create.yml
deleted file mode 100644
index 465bdc569f..0000000000
--- a/test/integration/targets/ec2_group/tasks/rule_group_create.yml
+++ /dev/null
@@ -1,132 +0,0 @@
----
-- block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: Create a group with self-referring rule
- ec2_group:
- name: '{{ec2_group_name}}-auto-create-1'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ec2_group_description}}'
- rules:
- - proto: "tcp"
- from_port: 8000
- to_port: 8100
- group_name: '{{ec2_group_name}}-auto-create-1'
- <<: *aws_connection_info
- state: present
- register: result
-
- - name: Create a second group rule
- ec2_group:
- name: '{{ec2_group_name}}-auto-create-2'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
-
- - name: Create a series of rules with a recently created group as target
- ec2_group:
- name: '{{ec2_group_name}}-auto-create-1'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ec2_group_description}}'
- purge_rules: false
- rules:
- - proto: "tcp"
- from_port: "{{ item }}"
- to_port: "{{ item }}"
- group_name: '{{ec2_group_name}}-auto-create-2'
- <<: *aws_connection_info
- state: present
- register: result
- with_items:
- - 20
- - 40
- - 60
- - 80
-
- - name: Create a group with only the default rule
- ec2_group:
- name: '{{ec2_group_name}}-auto-create-1'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ec2_group_description}}'
- rules:
- - proto: "tcp"
- from_port: 8182
- to_port: 8182
- group_name: '{{ec2_group_name}}-auto-create-3'
- <<: *aws_connection_info
- state: present
- register: result
- ignore_errors: true
-
- - name: assert you can't create a new group from a rule target with no description
- assert:
- that:
- - result is failed
-
- - name: Create a group with a target of a separate group
- ec2_group:
- name: '{{ec2_group_name}}-auto-create-1'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ec2_group_description}}'
- rules:
- - proto: tcp
- ports:
- - 22
- - 80
- group_name: '{{ec2_group_name}}-auto-create-3'
- group_desc: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- register: result
-
- - name: Create a 4th group
- ec2_group:
- name: '{{ec2_group_name}}-auto-create-4'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ec2_group_description}}'
- <<: *aws_connection_info
- state: present
- rules:
- - proto: tcp
- ports:
- - 22
- cidr_ip: 0.0.0.0/0
-
- - name: use recently created group in a rule
- ec2_group:
- name: '{{ec2_group_name}}-auto-create-5'
- vpc_id: '{{ vpc_result.vpc.id }}'
- description: '{{ec2_group_description}}'
- rules:
- - proto: tcp
- ports:
- - 443
- group_name: '{{ec2_group_name}}-auto-create-4'
- <<: *aws_connection_info
- state: present
-
- always:
- - name: tidy up egress rule test security group
- ec2_group:
- name: '{{ec2_group_name}}-auto-create-{{ item }}'
- state: absent
- vpc_id: '{{ vpc_result.vpc.id }}'
- <<: *aws_connection_info
- ignore_errors: yes
- with_items: [5, 4, 3, 2, 1]
- - name: tidy up egress rule test security group
- ec2_group:
- name: '{{ec2_group_name}}-auto-create-{{ item }}'
- state: absent
- vpc_id: '{{ vpc_result.vpc.id }}'
- <<: *aws_connection_info
- ignore_errors: yes
- with_items: [1, 2, 3, 4, 5]
diff --git a/test/integration/targets/ec2_key/aliases b/test/integration/targets/ec2_key/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/ec2_key/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/ec2_key/defaults/main.yml b/test/integration/targets/ec2_key/defaults/main.yml
deleted file mode 100644
index df0082d999..0000000000
--- a/test/integration/targets/ec2_key/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# defaults file for test_ec2_key
-ec2_key_name: '{{resource_prefix}}'
diff --git a/test/integration/targets/ec2_key/meta/main.yml b/test/integration/targets/ec2_key/meta/main.yml
deleted file mode 100644
index 45f0cedf8e..0000000000
--- a/test/integration/targets/ec2_key/meta/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_sshkey
- - setup_ec2
diff --git a/test/integration/targets/ec2_key/tasks/main.yml b/test/integration/targets/ec2_key/tasks/main.yml
deleted file mode 100644
index c39bc5385d..0000000000
--- a/test/integration/targets/ec2_key/tasks/main.yml
+++ /dev/null
@@ -1,164 +0,0 @@
----
-# A Note about ec2 environment variable name preference:
-# - EC2_URL -> AWS_URL
-# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
-# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
-# - EC2_REGION -> AWS_REGION
-#
-# TODO - name: test 'validate_certs' parameter
-# TODO - name: test creating key pair with another_key_material with force=yes
-# ============================================================
-# - include: ../../setup_ec2/tasks/common.yml module_name=ec2_key
-
-- block:
-
- # ============================================================
- - name: test with no parameters
- ec2_key:
- register: result
- ignore_errors: true
-
- - name: assert failure when called with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "missing required arguments: name"'
-
- # ============================================================
- - name: test removing a non-existent key pair
- ec2_key:
- name='{{ec2_key_name}}'
- ec2_region={{ec2_region}}
- ec2_access_key={{ec2_access_key}}
- ec2_secret_key={{ec2_secret_key}}
- security_token={{security_token}}
- state=absent
- register: result
-
- - name: assert removing a non-existent key pair
- assert:
- that:
- - 'not result.changed'
-
- # ============================================================
- - name: test creating a new key pair
- ec2_key:
- name='{{ec2_key_name}}'
- ec2_region={{ec2_region}}
- ec2_access_key={{ec2_access_key}}
- ec2_secret_key={{ec2_secret_key}}
- security_token={{security_token}}
- state=present
- register: result
-
- - name: assert creating a new key pair
- assert:
- that:
- - 'result.changed'
- - '"key" in result'
- - '"name" in result.key'
- - '"fingerprint" in result.key'
- - '"private_key" in result.key'
- - 'result.key.name == "{{ec2_key_name}}"'
-
- # ============================================================
- - name: test removing an existent key
- ec2_key:
- name='{{ec2_key_name}}'
- state=absent
- environment:
- EC2_REGION: '{{ec2_region}}'
- EC2_ACCESS_KEY: '{{ec2_access_key}}'
- EC2_SECRET_KEY: '{{ec2_secret_key}}'
- EC2_SECURITY_TOKEN: '{{security_token|default("")}}'
- register: result
-
- - name: assert removing an existent key
- assert:
- that:
- - 'result.changed'
- - '"key" in result'
- - 'result.key == None'
-
- # ============================================================
- - name: test state=present with key_material
- ec2_key:
- name='{{ec2_key_name}}'
- key_material='{{key_material}}'
- state=present
- environment:
- EC2_REGION: '{{ec2_region}}'
- EC2_ACCESS_KEY: '{{ec2_access_key}}'
- EC2_SECRET_KEY: '{{ec2_secret_key}}'
- EC2_SECURITY_TOKEN: '{{security_token|default("")}}'
- register: result
-
- - name: assert state=present with key_material
- assert:
- that:
- - 'result.changed == True'
- - '"key" in result'
- - '"name" in result.key'
- - '"fingerprint" in result.key'
- - '"private_key" not in result.key'
- - 'result.key.name == "{{ec2_key_name}}"'
- - 'result.key.fingerprint == "{{fingerprint}}"'
-
- # ============================================================
-
- - name: test force=no with another_key_material (expect changed=false)
- ec2_key:
- name: '{{ ec2_key_name }}'
- ec2_region: '{{ ec2_region }}'
- ec2_access_key: '{{ ec2_access_key }}'
- ec2_secret_key: '{{ ec2_secret_key }}'
- security_token: '{{ security_token }}'
- key_material: '{{ another_key_material }}'
- force: no
- register: result
-
- - name: assert force=no with another_key_material (expect changed=false)
- assert:
- that:
- - 'not result.changed'
- - 'result.key.fingerprint == "{{ fingerprint }}"'
-
- # ============================================================
-
- - name: test updating a key pair using another_key_material (expect changed=True)
- ec2_key:
- name: '{{ ec2_key_name }}'
- ec2_region: '{{ ec2_region }}'
- ec2_access_key: '{{ ec2_access_key }}'
- ec2_secret_key: '{{ ec2_secret_key }}'
- security_token: '{{ security_token }}'
- key_material: '{{ another_key_material }}'
- register: result
-
- - name: assert updating a key pair using another_key_material (expect changed=True)
- assert:
- that:
- - 'result.changed'
- - 'result.key.fingerprint != "{{ fingerprint }}"'
-
- # ============================================================
-
- always:
-
- # ============================================================
- - name: test state=absent (expect changed=true)
- ec2_key:
- name='{{ec2_key_name}}'
- ec2_region='{{ec2_region}}'
- ec2_access_key='{{ec2_access_key}}'
- ec2_secret_key='{{ec2_secret_key}}'
- security_token='{{security_token}}'
- state=absent
- register: result
-
- - name: assert state=absent with key_material (expect changed=true)
- assert:
- that:
- - 'result.changed'
- - '"key" in result'
- - 'result.key == None'
diff --git a/test/integration/targets/ec2_metadata_facts/aliases b/test/integration/targets/ec2_metadata_facts/aliases
deleted file mode 100644
index f9961d9a2f..0000000000
--- a/test/integration/targets/ec2_metadata_facts/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-shippable/aws/group2
-shippable/aws/smoketest
diff --git a/test/integration/targets/ec2_metadata_facts/defaults/main.yml b/test/integration/targets/ec2_metadata_facts/defaults/main.yml
deleted file mode 100644
index c25743914a..0000000000
--- a/test/integration/targets/ec2_metadata_facts/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for test_ec2_facts
diff --git a/test/integration/targets/ec2_metadata_facts/meta/main.yml b/test/integration/targets/ec2_metadata_facts/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_metadata_facts/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_metadata_facts/tasks/main.yml b/test/integration/targets/ec2_metadata_facts/tasks/main.yml
deleted file mode 100644
index 8ea9fcf1f8..0000000000
--- a/test/integration/targets/ec2_metadata_facts/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# tasks file for test_ec2_facts
diff --git a/test/integration/targets/ec2_metadata_facts/vars/main.yml b/test/integration/targets/ec2_metadata_facts/vars/main.yml
deleted file mode 100644
index bb8f6c1875..0000000000
--- a/test/integration/targets/ec2_metadata_facts/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for test_ec2_facts
diff --git a/test/integration/targets/ec2_snapshot/aliases b/test/integration/targets/ec2_snapshot/aliases
deleted file mode 100644
index 1dcb36b283..0000000000
--- a/test/integration/targets/ec2_snapshot/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-shippable/aws/group4
-ec2_snapshot_info
diff --git a/test/integration/targets/ec2_snapshot/defaults/main.yml b/test/integration/targets/ec2_snapshot/defaults/main.yml
deleted file mode 100644
index dc1f0f703d..0000000000
--- a/test/integration/targets/ec2_snapshot/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for ec2_snapshot
diff --git a/test/integration/targets/ec2_snapshot/tasks/main.yml b/test/integration/targets/ec2_snapshot/tasks/main.yml
deleted file mode 100644
index b8cdec3045..0000000000
--- a/test/integration/targets/ec2_snapshot/tasks/main.yml
+++ /dev/null
@@ -1,256 +0,0 @@
----
-# Tests for EC2 Snapshot
-#
-# Tests ec2_snapshot:
-# - Snapshot creation
-# - Create with last_snapshot_min_age
-# - Snapshot deletion
-#
-# Tests ec2_snapshot_info:
-# - Listing snapshots for filter: tag
-#
-# Possible Bugs:
-# - check_mode not supported
-#
-- name: Integration testing for ec2_snapshot
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
-
- block:
- - ec2_ami_info:
- owners: amazon
- filters:
- architecture: x86_64
- virtualization-type: hvm
- root-device-type: ebs
- name: "amzn-ami-hvm*"
- register: amis
-
- - name: Setup an instance for testing
- ec2_instance:
- name: '{{ resource_prefix }}'
- instance_type: t2.nano
- image_id: "{{ (amis.images | sort(attribute='creation_date') | last).image_id }}"
- wait: yes
- volumes:
- - device_name: /dev/xvda
- ebs:
- volume_size: 8
- delete_on_termination: true
- register: instance
-
- - set_fact:
- volume_id: '{{ instance.instances[0].block_device_mappings[0].ebs.volume_id }}'
- instance_id: '{{ instance.instances[0].instance_id }}'
- device_name: '{{ instance.instances[0].block_device_mappings[0].device_name }}'
-
-# JR: Check mode not supported
-# - name: Take snapshot (check mode)
-# ec2_snapshot:
-# instance_id: '{{ instance_id }}'
-# check_mode: true
-# snapshot_tags:
-# Test: '{{ resource_prefix }}'
-# register: result
-# - assert:
-# that:
-# - result is changed
-
- - name: Take snapshot of volume
- ec2_snapshot:
- volume_id: '{{ volume_id }}'
- register: result
-
- # The Name tag is created automatically as the instance_name; ie the resource_prefix
- - name: Get info about snapshots
- ec2_snapshot_info:
- filters:
- "tag:Name": '{{ resource_prefix }}'
- register: info_result
-
- - assert:
- that:
- - result is changed
- - info_result.snapshots| length == 1
- - info_result.snapshots[0].snapshot_id == result.snapshot_id
- - info_result.snapshots[0].volume_id == result.volume_id
- - info_result.snapshots[0].volume_size == result.volume_size
- - info_result.snapshots[0].tags == result.tags
-
-# JR: Check mode not supported
-# - name: Take snapshot if most recent >1hr (False) (check mode)
-# ec2_snapshot:
-# volume_id: '{{ volume_id }}'
-# snapshot_tags:
-# Name: '{{ resource_prefix }}'
-# last_snapshot_min_age: 60
-# check_mode: true
-# register: result
-# - assert:
-# that:
-# - result is not changed
-
- - name: Take snapshot if most recent >1hr (False)
- ec2_snapshot:
- volume_id: '{{ volume_id }}'
- last_snapshot_min_age: 60
- register: result
-
- - name: Get info about snapshots
- ec2_snapshot_info:
- filters:
- "tag:Name": '{{ resource_prefix }}'
- register: info_result
-
- - assert:
- that:
- - result is not changed
- - info_result.snapshots| length == 1
-
- - name: Pause so we can do a last_snapshot_min_age test
- pause:
- minutes: 1
-
-# JR: Check mode not supported
-# - name: Take snapshot if most recent >1min (True) (check mode)
-# ec2_snapshot:
-# volume_id: '{{ volume_id }}'
-# snapshot_tags:
-# Name: '{{ resource_prefix }}'
-# last_snapshot_min_age: 1
-# check_mode: true
-# register: result
-# - assert:
-# that:
-# - result is changed
-
- - name: Take snapshot if most recent >1min (True)
- ec2_snapshot:
- volume_id: '{{ volume_id }}'
- last_snapshot_min_age: 1
- register: result
-
- - name: Get info about snapshots
- ec2_snapshot_info:
- filters:
- "tag:Name": '{{ resource_prefix }}'
- register: info_result
-
- - assert:
- that:
- - result is changed
- - info_result.snapshots| length == 2
- - '"{{ result.snapshot_id }}" in "{{ info_result| json_query("snapshots[].snapshot_id") }}"'
-
-# JR: Check mode not supported
-# - name: Take snapshot with a tag (check mode)
-# ec2_snapshot:
-# volume_id: '{{ volume_id }}'
-# snapshot_tags:
-# MyTag: '{{ resource_prefix }}'
-# register: result
-# - assert:
-# that:
-# - result is changed
-
- # Wait at least 15 seconds between concurrent volume snapshots.
- - name: Prevent SnapshotCreationPerVolumeRateExceeded errors
- pause:
- seconds: 15
-
- - name: Take snapshot and tag it
- ec2_snapshot:
- volume_id: '{{ volume_id }}'
- snapshot_tags:
- MyTag: '{{ resource_prefix }}'
- register: tagged_result
-
- - name: Get info about snapshots by tag
- ec2_snapshot_info:
- filters:
- "tag:MyTag": '{{ resource_prefix }}'
- register: tag_info_result
-
- - set_fact:
- tagged_snapshot_id: '{{ tag_info_result.snapshots[0].snapshot_id }}'
-
- - assert:
- that:
- - tagged_result is changed
- - tagged_result.tags| length == 2
- - tag_info_result.snapshots| length == 1
- - tagged_result.tags.MyTag == "{{ resource_prefix }}"
- - '"{{ tagged_result.snapshot_id }}" == "{{ tagged_snapshot_id }}"'
-
- - name: Get info about all snapshots for this test
- ec2_snapshot_info:
- filters:
- "tag:Name": '{{ resource_prefix }}'
- register: info_result
-
- - assert:
- that:
- - info_result.snapshots| length == 3
-
- - name: Delete the tagged snapshot
- ec2_snapshot:
- state: absent
- snapshot_id: '{{ tagged_snapshot_id }}'
-
- - name: Get info about all snapshots for this test
- ec2_snapshot_info:
- filters:
- "tag:Name": '{{ resource_prefix }}'
- register: info_result
-
- - assert:
- that:
- - info_result.snapshots| length == 2
- - '"{{ tagged_snapshot_id }}" not in "{{ info_result| json_query("snapshots[].snapshot_id") }}"'
-
- - name: Delete snapshots
- ec2_snapshot:
- state: absent
- snapshot_id: '{{ item.snapshot_id }}'
- with_items: '{{ info_result.snapshots }}'
-
- - name: Get info about all snapshots for this test
- ec2_snapshot_info:
- filters:
- "tag:Name": '{{ resource_prefix }}'
- register: info_result
-
- - assert:
- that:
- - info_result.snapshots| length == 0
-
- always:
-
- - name: Snapshots to delete
- ec2_snapshot_info:
- filters:
- "tag:Name": '{{ resource_prefix }}'
- register: tagged_snapshots
-
- - name: Delete tagged snapshots
- ec2_snapshot:
- state: absent
- snapshot_id: '{{ item.snapshot_id }}'
- with_items: '{{ tagged_snapshots.snapshots }}'
- ignore_errors: true
-
- - name: Delete instance
- ec2_instance:
- instance_ids: '{{ instance_id }}'
- state: absent
- ignore_errors: true
-
- - name: Delete volume
- ec2_vol:
- id: '{{ volume_id }}'
- state: absent
- ignore_errors: true \ No newline at end of file
diff --git a/test/integration/targets/ec2_tag/aliases b/test/integration/targets/ec2_tag/aliases
deleted file mode 100644
index be56eee894..0000000000
--- a/test/integration/targets/ec2_tag/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-shippable/aws/group2
-ec2_tag_info
diff --git a/test/integration/targets/ec2_tag/defaults/main.yml b/test/integration/targets/ec2_tag/defaults/main.yml
deleted file mode 100644
index 6aa39c7360..0000000000
--- a/test/integration/targets/ec2_tag/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for test_ec2_tag
diff --git a/test/integration/targets/ec2_tag/meta/main.yml b/test/integration/targets/ec2_tag/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_tag/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_tag/tasks/main.yml b/test/integration/targets/ec2_tag/tasks/main.yml
deleted file mode 100644
index 7e8cd8d128..0000000000
--- a/test/integration/targets/ec2_tag/tasks/main.yml
+++ /dev/null
@@ -1,144 +0,0 @@
----
-# tasks file for test_ec2_tag
-- name: Set up AWS connection info
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- - name: Create an EC2 volume so we have something to tag
- ec2_vol:
- name: "{{ resource_prefix }} ec2_tag volume"
- volume_size: 1
- state: present
- zone: "{{ aws_region }}a"
- register: volume
-
- - name: List the tags on the volume (ec2_tag)
- ec2_tag:
- resource: "{{ volume.volume_id }}"
- state: list
- register: result
- - name: List the tags on the volume (ec2_tag_info)
- ec2_tag_info:
- resource: "{{ volume.volume_id }}"
- register: result_info
-
- - assert:
- that:
- - result.tags | length == 1
- - result.tags.Name == '{{ resource_prefix }} ec2_tag volume'
- - result_info.tags | length == 1
- - result_info.tags.Name == '{{ resource_prefix }} ec2_tag volume'
-
- - name: Set some new tags on the volume
- ec2_tag:
- resource: "{{ volume.volume_id }}"
- state: present
- tags:
- foo: foo
- bar: baz
- baz: also baz
- register: result
- - name: List the new tags on the volume
- ec2_tag_info:
- resource: "{{ volume.volume_id }}"
- register: result_info
-
- - assert:
- that:
- - result is changed
- - result.tags | length == 4
- - result.added_tags | length == 3
- - result.tags.Name == '{{ resource_prefix }} ec2_tag volume'
- - result.tags.foo == 'foo'
- - result.tags.bar == 'baz'
- - result.tags.baz == 'also baz'
- - result_info.tags | length == 4
- - result_info.tags.Name == '{{ resource_prefix }} ec2_tag volume'
- - result_info.tags.foo == 'foo'
- - result_info.tags.bar == 'baz'
- - result_info.tags.baz == 'also baz'
-
- - name: Remove a tag by name
- ec2_tag:
- resource: "{{ volume.volume_id }}"
- state: absent
- tags:
- baz:
- register: result
-
- - assert:
- that:
- - result is changed
- - result.removed_tags | length == 1
- - "'baz' in result.removed_tags"
-
- - name: Don't remove a tag
- ec2_tag:
- resource: "{{ volume.volume_id }}"
- state: absent
- tags:
- foo: baz
- register: result
-
- - assert:
- that:
- - result is not changed
-
- - name: Remove a tag
- ec2_tag:
- resource: "{{ volume.volume_id }}"
- state: absent
- tags:
- foo: foo
- register: result
-
- - assert:
- that:
- - result is changed
- - result.tags | length == 2
- - "'added_tags' not in result"
- - result.removed_tags | length == 1
- - result.tags.Name == '{{ resource_prefix }} ec2_tag volume'
- - result.tags.bar == 'baz'
-
- - name: Set an exclusive tag
- ec2_tag:
- resource: "{{ volume.volume_id }}"
- purge_tags: true
- tags:
- baz: quux
- register: result
-
- - assert:
- that:
- - result is changed
- - result.tags | length == 1
- - result.added_tags | length == 1
- - result.removed_tags | length == 2
- - result.tags.baz == 'quux'
-
- - name: Remove all tags
- ec2_tag:
- resource: "{{ volume.volume_id }}"
- purge_tags: true
- tags: {}
- register: result
-
- - assert:
- that:
- - result is changed
- - result.tags | length == 0
-
- always:
- - name: Remove the volume
- ec2_vol:
- id: "{{ volume.volume_id }}"
- state: absent
- register: result
- until: result is not failed
- ignore_errors: yes
- retries: 10
diff --git a/test/integration/targets/ec2_tag/vars/main.yml b/test/integration/targets/ec2_tag/vars/main.yml
deleted file mode 100644
index c2d0654aef..0000000000
--- a/test/integration/targets/ec2_tag/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for test_ec2_tag
diff --git a/test/integration/targets/ec2_vol/aliases b/test/integration/targets/ec2_vol/aliases
deleted file mode 100644
index 157ce0c9d4..0000000000
--- a/test/integration/targets/ec2_vol/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group3
diff --git a/test/integration/targets/ec2_vol/defaults/main.yml b/test/integration/targets/ec2_vol/defaults/main.yml
deleted file mode 100644
index eb2594bc99..0000000000
--- a/test/integration/targets/ec2_vol/defaults/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-vpc_name: '{{ resource_prefix }}-vpc'
-vpc_seed: '{{ resource_prefix }}'
-vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
-subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
-ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2' \ No newline at end of file
diff --git a/test/integration/targets/ec2_vol/tasks/main.yml b/test/integration/targets/ec2_vol/tasks/main.yml
deleted file mode 100644
index aa81248e30..0000000000
--- a/test/integration/targets/ec2_vol/tasks/main.yml
+++ /dev/null
@@ -1,373 +0,0 @@
----
-
-- module_defaults:
- group/aws:
- aws_access_key: '{{ aws_access_key | default(omit) }}'
- aws_secret_key: '{{ aws_secret_key | default(omit) }}'
- security_token: '{{ security_token | default(omit) }}'
- region: '{{ aws_region | default(omit) }}'
-
- block:
-
- # ==== Env setup ==========================================================
- - name: list available AZs
- aws_az_info:
- register: region_azs
-
- - name: pick an AZ for testing
- set_fact:
- availability_zone: "{{ region_azs.availability_zones[0].zone_name }}"
-
- - name: Create a test VPC
- ec2_vpc_net:
- name: "{{ vpc_name }}"
- cidr_block: "{{ vpc_cidr }}"
- tags:
- Name: ec2_vol testing
- ResourcePrefix: "{{ resource_prefix }}"
- register: testing_vpc
-
- - name: Create a test subnet
- ec2_vpc_subnet:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: "{{ subnet_cidr }}"
- tags:
- Name: ec2_vol testing
- ResourcePrefix: "{{ resource_prefix }}"
- az: '{{ availability_zone }}'
- register: testing_subnet
-
- - name: Find AMI to use
- ec2_ami_info:
- owners: 'amazon'
- filters:
- name: '{{ ec2_ami_name }}'
- register: ec2_amis
-
- - name: Set fact with latest AMI
- vars:
- latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
- set_fact:
- ec2_ami_image: '{{ latest_ami.image_id }}'
-
- # ==== ec2_vol tests ===============================================
-
- - name: create a volume (validate module defaults)
- ec2_vol:
- volume_size: 1
- zone: "{{ availability_zone }}"
- tags:
- ResourcePrefix: "{{ resource_prefix }}"
- register: volume1
-
- - name: check task return attributes
- assert:
- that:
- - volume1.changed
- - "'volume' in volume1"
- - "'volume_id' in volume1"
- - "'volume_type' in volume1"
- - "'device' in volume1"
- - "volume1.volume.status == 'available'"
- - "volume1.volume_type == 'standard'"
- - "'attachment_set' in volume1.volume and 'instance_id' in volume1.volume.attachment_set and not volume1.volume.attachment_set.instance_id"
- - "not volume1.volume.encrypted"
-
- # no idempotency check needed here
-
- - name: create another volume (override module defaults)
- ec2_vol:
- encrypted: yes
- volume_size: 4
- volume_type: io1
- iops: 101
- name: "{{ resource_prefix }}"
- tags:
- ResourcePrefix: "{{ resource_prefix }}"
- zone: "{{ availability_zone }}"
- register: volume2
-
- - name: check task return attributes
- assert:
- that:
- - volume2.changed
- - "'volume' in volume2"
- - "'volume_id' in volume2"
- - "'volume_type' in volume2"
- - "'device' in volume2"
- - "volume2.volume.status == 'available'"
- - "volume2.volume_type == 'io1'"
- - "volume2.volume.iops == 101"
- - "volume2.volume.size == 4"
- - "volume2.volume.encrypted"
-
- - name: create another volume (override module defaults) (idempotent)
- ec2_vol:
- encrypted: yes
- volume_size: 4
- volume_type: io1
- iops: 101
- name: "{{ resource_prefix }}"
- tags:
- ResourcePrefix: "{{ resource_prefix }}"
- zone: "{{ availability_zone }}"
- register: volume2_idem
-
- - name: check task return attributes
- assert:
- that:
- - not volume2_idem.changed
-
- - name: create snapshot from volume
- ec2_snapshot:
- volume_id: "{{ volume1.volume_id }}"
- description: "Resource Prefix - {{ resource_prefix }}"
- snapshot_tags:
- ResourcePrefix: "{{ resource_prefix }}"
- register: vol1_snapshot
-
- - name: check task return attributes
- assert:
- that:
- - vol1_snapshot.changed
-
- - name: create a volume from a snapshot
- ec2_vol:
- snapshot: "{{ vol1_snapshot.snapshot_id }}"
- encrypted: yes
- volume_type: gp2
- volume_size: 1
- zone: "{{ availability_zone }}"
- tags:
- ResourcePrefix: "{{ resource_prefix }}"
- register: volume3
-
- - name: check task return attributes
- assert:
- that:
- - volume3.changed
- - "volume3.volume.snapshot_id == vol1_snapshot.snapshot_id"
-
- - name: create an ec2 instance
- ec2_instance:
- name: "{{ resource_prefix }}"
- vpc_subnet_id: "{{ testing_subnet.subnet.id }}"
- instance_type: t3.nano
- image_id: "{{ ec2_ami_image }}"
- tags:
- ResourcePrefix: "{{ resource_prefix }}"
- register: test_instance
-
- - name: check task return attributes
- assert:
- that:
- - test_instance.changed
-
- - name: attach existing volume to an instance
- ec2_vol:
- id: "{{ volume1.volume_id }}"
- instance: "{{ test_instance.instance_ids[0] }}"
- device_name: /dev/sdg
- delete_on_termination: no
- register: vol_attach_result
-
- - name: check task return attributes
- assert:
- that:
- - "vol_attach_result.changed"
- - "'device' in vol_attach_result and vol_attach_result.device == '/dev/sdg'"
- - "'volume' in vol_attach_result"
- - "vol_attach_result.volume.attachment_set.status == 'attached'"
- - "vol_attach_result.volume.attachment_set.instance_id == test_instance.instance_ids[0]"
- - "vol_attach_result.volume.attachment_set.device == '/dev/sdg'"
-
-# Failing
-# - "vol_attach_result.volume.attachment_set.deleteOnTermination"
-
- - name: attach existing volume to an instance (idempotent)
- ec2_vol:
- id: "{{ volume1.volume_id }}"
- instance: "{{ test_instance.instance_ids[0] }}"
- device_name: /dev/sdg
- delete_on_termination: no
- register: vol_attach_result
-
- - name: check task return attributes
- assert:
- that:
- - "not vol_attach_result.changed"
-
- - name: attach a new volume to an instance
- ec2_vol:
- instance: "{{ test_instance.instance_ids[0] }}"
- device_name: /dev/sdh
- volume_size: 1
- volume_type: gp2
- tags:
- ResourcePrefix: "{{ resource_prefix }}"
- register: new_vol_attach_result
-
- - name: check task return attributes
- assert:
- that:
- - "new_vol_attach_result.changed"
- - "'device' in new_vol_attach_result and new_vol_attach_result.device == '/dev/sdh'"
- - "'volume' in new_vol_attach_result"
- - "new_vol_attach_result.volume.attachment_set.status == 'attached'"
- - "new_vol_attach_result.volume.attachment_set.instance_id == test_instance.instance_ids[0]"
- - "new_vol_attach_result.volume.attachment_set.device == '/dev/sdh'"
-
- - name: attach a new volume to an instance (idempotent)
- ec2_vol:
- instance: "{{ test_instance.instance_ids[0] }}"
- device_name: /dev/sdh
- volume_size: 1
- volume_type: gp2
- tags:
- ResourcePrefix: "{{ resource_prefix }}"
- register: new_vol_attach_result_idem
-
- - name: check task return attributes
- assert:
- that:
- - "not new_vol_attach_result_idem.changed"
- - "'Volume mapping for /dev/sdh already exists' in new_vol_attach_result_idem.msg"
-
- - name: create a volume from a snapshot and attach to the instance
- ec2_vol:
- instance: "{{ test_instance.instance_ids[0] }}"
- device_name: /dev/sdi
- snapshot: "{{ vol1_snapshot.snapshot_id }}"
- tags:
- ResourcePrefix: "{{ resource_prefix }}"
- register: attach_new_vol_from_snapshot_result
-
- - name: check task return attributes
- assert:
- that:
- - "attach_new_vol_from_snapshot_result.changed"
- - "'device' in attach_new_vol_from_snapshot_result and attach_new_vol_from_snapshot_result.device == '/dev/sdi'"
- - "'volume' in attach_new_vol_from_snapshot_result"
- - "attach_new_vol_from_snapshot_result.volume.attachment_set.status == 'attached'"
- - "attach_new_vol_from_snapshot_result.volume.attachment_set.instance_id == test_instance.instance_ids[0]"
-
- - name: list volumes attached to instance
- ec2_vol:
- instance: "{{ test_instance.instance_ids[0] }}"
- state: list
- register: inst_vols
-
- - name: check task return attributes
- assert:
- that:
- - "not inst_vols.changed"
- - "'volumes' in inst_vols"
- - "inst_vols.volumes | length == 4"
-
- - name: get info on ebs volumes
- ec2_vol_info:
- register: ec2_vol_info
-
- - name: check task return attributes
- assert:
- that:
- - "not ec2_vol_info.failed"
-
- - name: get info on ebs volumes
- ec2_vol_info:
- filters:
- attachment.instance-id: "{{ test_instance.instance_ids[0] }}"
- register: ec2_vol_info
-
- - name: check task return attributes
- assert:
- that:
- - "{{ ec2_vol_info.volumes | length == 4 }}"
-
- - name: detach volume from the instance
- ec2_vol:
- id: "{{ new_vol_attach_result.volume_id }}"
- instance: ""
- register: new_vol_attach_result
-
- - name: check task return attributes
- assert:
- that:
- - "new_vol_attach_result.changed"
- - "new_vol_attach_result.volume.status == 'available'"
-
- - name: detach volume from the instance (idempotent)
- ec2_vol:
- id: "{{ new_vol_attach_result.volume_id }}"
- instance: ""
- register: new_vol_attach_result_idem
-
- - name: check task return attributes
- assert:
- that:
- - "not new_vol_attach_result_idem.changed"
-
- - name: delete volume
- ec2_vol:
- id: "{{ volume2.volume_id }}"
- state: absent
- register: delete_volume_result
-
- - name: check task return attributes
- assert:
- that:
- - "delete_volume_result.changed"
-
- - name: delete volume (idempotent)
- ec2_vol:
- id: "{{ volume2.volume_id }}"
- state: absent
- register: delete_volume_result_idem
-
- - name: check task return attributes
- assert:
- that:
- - "not delete_volume_result_idem.changed"
-
- # ==== Cleanup ============================================================
-
- always:
-
- - name: delete test instance
- ec2_instance:
- instance_ids:
- - "{{ test_instance.instance_ids[0] }}"
- state: terminated
- ignore_errors: yes
-
- - name: delete volumes
- ec2_vol:
- id: "{{ item.volume_id }}"
- state: absent
- ignore_errors: yes
- with_items:
- - "{{ volume1 }}"
- - "{{ volume2 }}"
- - "{{ volume3 }}"
- - "{{ new_vol_attach_result }}"
- - "{{ attach_new_vol_from_snapshot_result }}"
-
- - name: delete snapshot
- ec2_snapshot:
- snapshot_id: "{{ vol1_snapshot.snapshot_id }}"
- state: absent
- ignore_errors: yes
-
- - name: delete test subnet
- ec2_vpc_subnet:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: "{{ subnet_cidr }}"
- state: absent
- ignore_errors: yes
-
- - name: delete test VPC
- ec2_vpc_net:
- name: "{{ vpc_name }}"
- cidr_block: "{{ vpc_cidr }}"
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_vol_info/aliases b/test/integration/targets/ec2_vol_info/aliases
deleted file mode 100644
index 157ce0c9d4..0000000000
--- a/test/integration/targets/ec2_vol_info/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group3
diff --git a/test/integration/targets/ec2_vol_info/meta/main.yml b/test/integration/targets/ec2_vol_info/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_vol_info/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_vol_info/tasks/main.yml b/test/integration/targets/ec2_vol_info/tasks/main.yml
deleted file mode 100644
index 9bde030761..0000000000
--- a/test/integration/targets/ec2_vol_info/tasks/main.yml
+++ /dev/null
@@ -1,123 +0,0 @@
----
-# tasks file for test_ec2_vol_info
-- name: Set up AWS connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: true
-
-- block:
- - ec2_ami_info:
- owners: amazon
- <<: *aws_connection_info
- filters:
- architecture: x86_64
- virtualization-type: hvm
- root-device-type: ebs
- name: "amzn-ami-hvm*"
- register: amis
-
- - name: Create test instance
- ec2_instance:
- name: "{{ resource_prefix }}_ansible_ec2_vol_info_test"
- instance_type: t2.nano
- image_id: "{{ (amis.images | sort(attribute='creation_date') | last).image_id }}"
- wait: yes
- tags:
- Environment: test
- <<: *aws_connection_info
- register: instance
-
- - name: Ensure there's only one matching instance
- assert:
- that:
- - "instance.instance_ids|length == 1"
- - "instance.instances|length == 1"
-
- - name: Create test volume
- ec2_vol:
- instance: "{{ instance.instance_ids[0] }}"
- volume_size: 4
- name: "{{ resource_prefix }}_ansible_ec2_vol_info_test.db"
- device_name: /dev/xvdf
- iops: 100
- tags:
- Tag Name with Space-and-dash: Tag Value with Space-and-dash
- <<: *aws_connection_info
- delete_on_termination: yes
- register: volume
-
- - name: Gather volume info without any filters
- ec2_vol_info:
- <<: *aws_connection_info
- register: volume_facts_wo_filters
- check_mode: no
-
- - name: Check if facts are returned without filters
- assert:
- that:
- - "volume_facts_wo_filters.volumes is defined"
-
- - name: Gather volume info
- ec2_vol_info:
- <<: *aws_connection_info
- filters:
- "tag:Name": "{{ resource_prefix }}_ansible_ec2_vol_info_test.db"
- register: volume_facts
- check_mode: no
-
- - name: Format check
- assert:
- that:
- - "volume_facts.volumes|length == 1"
- - "v.attachment_set.attach_time is defined"
- - "v.attachment_set.device is defined and v.attachment_set.device == volume.device"
- - "v.attachment_set.instance_id is defined and v.attachment_set.instance_id == instance.instance_ids[0]"
- - "v.attachment_set.status is defined and v.attachment_set.status == 'attached'"
- - "v.create_time is defined"
- - "v.encrypted is defined and v.encrypted == false"
- - "v.id is defined and v.id == volume.volume_id"
- - "v.iops is defined and v.iops == 100"
- - "v.region is defined and v.region == aws_region"
- - "v.size is defined and v.size == 4"
- - "v.snapshot_id is defined and v.snapshot_id == ''"
- - "v.status is defined and v.status == 'in-use'"
- - "v.tags.Name is defined and v.tags.Name == resource_prefix + '_ansible_ec2_vol_info_test.db'"
- - "v.tags['Tag Name with Space-and-dash'] == 'Tag Value with Space-and-dash'"
- - "v.type is defined and v.type == 'io1'"
- - "v.zone is defined and v.zone == instance.instances[0].placement.availability_zone"
- vars:
- v: "{{ volume_facts.volumes[0] }}"
-
- - name: New format check
- assert:
- that:
- - "v.attachment_set.delete_on_termination is defined"
- vars:
- v: "{{ volume_facts.volumes[0] }}"
- when: ansible_version.full is version('2.7', '>=')
-
- always:
- - name: Remove the instance
- ec2_instance:
- state: absent
- filters:
- "tag:Name": "{{ resource_prefix }}_ansible_ec2_vol_info_test"
- <<: *aws_connection_info
- register: result
- until: result is not failed
- ignore_errors: yes
- retries: 10
-
- - name: Remove the volume
- ec2_vol:
- id: "{{ volume.volume_id }}"
- state: absent
- <<: *aws_connection_info
- register: result
- until: result is not failed
- ignore_errors: yes
- retries: 10
diff --git a/test/integration/targets/ec2_vpc_net/aliases b/test/integration/targets/ec2_vpc_net/aliases
deleted file mode 100644
index fb765ef767..0000000000
--- a/test/integration/targets/ec2_vpc_net/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-ec2_vpc_net_info
-cloud/aws
-shippable/aws/group1
diff --git a/test/integration/targets/ec2_vpc_net/defaults/main.yml b/test/integration/targets/ec2_vpc_net/defaults/main.yml
deleted file mode 100644
index 3289b27835..0000000000
--- a/test/integration/targets/ec2_vpc_net/defaults/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-# defaults file for ec2_vpc_net
-vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24'
-vpc_cidr_a: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24'
-vpc_cidr_b: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24'
diff --git a/test/integration/targets/ec2_vpc_net/meta/main.yml b/test/integration/targets/ec2_vpc_net/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_vpc_net/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_vpc_net/tasks/main.yml b/test/integration/targets/ec2_vpc_net/tasks/main.yml
deleted file mode 100644
index c0e5e1b45f..0000000000
--- a/test/integration/targets/ec2_vpc_net/tasks/main.yml
+++ /dev/null
@@ -1,1306 +0,0 @@
----
-- name: Setup AWS Environment
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
-
- block:
-
- # ============================================================
-
- - name: Get the current caller identity facts
- aws_caller_info:
- register: caller_facts
-
- - name: run the module without parameters
- ec2_vpc_net:
- ignore_errors: yes
- register: result
-
- - name: assert failure
- assert:
- that:
- - result is failed
- - result.msg.startswith("missing required arguments")
-
- # ============================================================
-
- - name: attempt to create a VPC without providing connnection information
- module_defaults: { group/aws: {} }
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- region: us-east-1
- ignore_errors: yes
- register: result
-
- - name: assert connection failure
- assert:
- that:
- - result is failed
- - '"Unable to locate credentials" in result.msg'
-
- # ============================================================
-
- - name: Fetch existing VPC info
- ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
- - name: Check no-one is using the Prefix before we start
- assert:
- that:
- - vpc_info.vpcs | length == 0
-
- - name: test check mode creating a VPC
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- check_mode: true
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: check for a change
- assert:
- that:
- - result is changed
- - vpc_info.vpcs | length == 0
-
- # ============================================================
-
- - name: create a VPC
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- ipv6_cidr: True
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the VPC was created successfully
- assert:
- that:
- - result is successful
- - result is changed
- - vpc_info.vpcs | length == 1
-
- - name: assert the output
- assert:
- that:
- - '"cidr_block" in result.vpc'
- - result.vpc.cidr_block == vpc_cidr
- - result.vpc.cidr_block_association_set | length == 1
- - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr
- - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - '"classic_link_enabled" in result.vpc'
- - result.vpc.dhcp_options_id.startswith("dopt-")
- - result.vpc.id.startswith("vpc-")
- - '"instance_tenancy" in result.vpc'
- - result.vpc.ipv6_cidr_block_association_set | length == 1
- - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ipv6
- - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
- - '"is_default" in result.vpc'
- - '"state" in result.vpc'
- - result.vpc.tags.keys() | length == 1
- - result.vpc.tags.Name == resource_prefix
-
- - name: set the first VPC's details as facts for comparison and cleanup
- set_fact:
- vpc_1_result: "{{ result }}"
- vpc_1: "{{ result.vpc.id }}"
- vpc_1_ipv6_cidr: "{{ result.vpc.ipv6_cidr_block_association_set.0.ipv6_cidr_block }}"
- default_dhcp_options_id: "{{ result.vpc.dhcp_options_id }}"
-
- - name: create a VPC (retry)
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- ipv6_cidr: True
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert nothing changed
- assert:
- that:
- - result is successful
- - result is not changed
- - vpc_info.vpcs | length == 1
- - '"cidr_block" in result.vpc'
- - result.vpc.cidr_block == vpc_cidr
- - result.vpc.cidr_block_association_set | length == 1
- - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr
- - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - '"classic_link_enabled" in result.vpc'
- - result.vpc.dhcp_options_id.startswith("dopt-")
- - result.vpc.id.startswith("vpc-")
- - '"instance_tenancy" in result.vpc'
- - result.vpc.ipv6_cidr_block_association_set | length == 1
- - result.vpc.ipv6_cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | ipv6
- - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
- - '"is_default" in result.vpc'
- - '"state" in result.vpc'
- - result.vpc.tags.keys() | length == 1
- - result.vpc.tags.Name == resource_prefix
- - result.vpc.id == vpc_1
-
- # ============================================================
-
- - name: VPC info (no filters)
- ec2_vpc_net_info:
- register: vpc_info
-
- - name: Test that our new VPC shows up in the results
- assert:
- that:
- - vpc_1 in ( vpc_info | json_query("vpcs[].vpc_id") | list )
-
- - name: VPC info (Simple tag filter)
- ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: Test vpc_info results
- assert:
- that:
- - vpc_info.vpcs[0].cidr_block == vpc_cidr
- - vpc_info.vpcs[0].cidr_block_association_set | length == 1
- - vpc_info.vpcs[0].cidr_block_association_set[0].association_id == result.vpc.cidr_block_association_set[0].association_id
- - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block == result.vpc.cidr_block_association_set[0].cidr_block
- - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - '"classic_link_dns_supported" in vpc_info.vpcs[0]'
- - '"classic_link_enabled" in vpc_info.vpcs[0]'
- - vpc_info.vpcs[0].dhcp_options_id == result.vpc.dhcp_options_id
- - ( vpc_info.vpcs[0].enable_dns_hostnames | bool ) == True
- - ( vpc_info.vpcs[0].enable_dns_support | bool ) == True
- - vpc_info.vpcs[0].id == result.vpc.id
- - '"instance_tenancy" in vpc_info.vpcs[0]'
- - vpc_info.vpcs[0].ipv6_cidr_block_association_set | length == 1
- - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].association_id == result.vpc.ipv6_cidr_block_association_set[0].association_id
- - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block == result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block
- - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
- - '"is_default" in vpc_info.vpcs[0]'
- - vpc_info.vpcs[0].owner_id == caller_facts.account
- - '"state" in vpc_info.vpcs[0]'
- - vpc_info.vpcs[0].vpc_id == result.vpc.id
-
- # ============================================================
-
- - name: Try to add IPv6 CIDR when one already exists
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- ipv6_cidr: True
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: Assert no changes made
- assert:
- that:
- - '"Only one IPv6 CIDR is permitted per VPC, {{ result.vpc.id }} already has CIDR {{ vpc_1_ipv6_cidr }}" in result.warnings'
- - result is not changed
- - vpc_info.vpcs | length == 1
-
- # ============================================================
-
- - name: test check mode creating an identical VPC (multi_ok)
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- ipv6_cidr: True
- multi_ok: yes
- check_mode: true
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert a change would be made
- assert:
- that:
- - result is changed
- - name: assert a change was not actually made
- assert:
- that:
- - vpc_info.vpcs | length == 1
-
- # ============================================================
-
- - name: create a VPC with a dedicated tenancy using the same CIDR and name
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- ipv6_cidr: True
- tenancy: dedicated
- multi_ok: yes
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert a new VPC was created
- assert:
- that:
- - result is successful
- - result is changed
- - result.vpc.instance_tenancy == "dedicated"
- - result.vpc.id != vpc_1
- - vpc_info.vpcs | length == 2
-
- - name: set the second VPC's details as facts for comparison and cleanup
- set_fact:
- vpc_2_result: "{{ result }}"
- vpc_2: "{{ result.vpc.id }}"
-
- # ============================================================
-
- - name: VPC info (Simple VPC-ID filter)
- ec2_vpc_net_info:
- filters:
- "vpc-id": "{{ vpc_2 }}"
- register: vpc_info
-
- - name: Test vpc_info results
- assert:
- that:
- - vpc_info.vpcs[0].cidr_block == vpc_cidr
- - vpc_info.vpcs[0].cidr_block_association_set | length == 1
- - vpc_info.vpcs[0].cidr_block_association_set[0].association_id == result.vpc.cidr_block_association_set[0].association_id
- - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block == result.vpc.cidr_block_association_set[0].cidr_block
- - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - '"classic_link_dns_supported" in vpc_info.vpcs[0]'
- - '"classic_link_enabled" in vpc_info.vpcs[0]'
- - vpc_info.vpcs[0].dhcp_options_id == result.vpc.dhcp_options_id
- - ( vpc_info.vpcs[0].enable_dns_hostnames | bool ) == True
- - ( vpc_info.vpcs[0].enable_dns_support | bool ) == True
- - vpc_info.vpcs[0].id == vpc_2
- - '"instance_tenancy" in vpc_info.vpcs[0]'
- - vpc_info.vpcs[0].ipv6_cidr_block_association_set | length == 1
- - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].association_id == result.vpc.ipv6_cidr_block_association_set[0].association_id
- - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block == result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block
- - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["associated", "associating"]
- - '"is_default" in vpc_info.vpcs[0]'
- - vpc_info.vpcs[0].owner_id == caller_facts.account
- - '"state" in vpc_info.vpcs[0]'
- - vpc_info.vpcs[0].vpc_id == vpc_2
-
- # ============================================================
-
- # This will only fail if there are already *2* vpcs otherwise ec2_vpc_net
- # assumes you want to update your existing VPC...
- - name: attempt to create another VPC with the same CIDR and name without multi_ok
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- ipv6_cidr: True
- tenancy: dedicated
- multi_ok: no
- register: new_result
- ignore_errors: yes
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert failure
- assert:
- that:
- - new_result is failed
- - '"If you would like to create the VPC anyway please pass True to the multi_ok param" in new_result.msg'
- - vpc_info.vpcs | length == 2
-
- # ============================================================
-
- # FIXME: right now if there are multiple matching VPCs they cannot be removed,
- # as there is no vpc_id option for idempotence. A workaround is to retag the VPC.
- - name: remove Name tag on new VPC
- ec2_tag:
- state: absent
- resource: "{{ vpc_2 }}"
- tags:
- Name: "{{ resource_prefix }}"
-
- - name: add a unique name tag
- ec2_tag:
- state: present
- resource: "{{ vpc_2 }}"
- tags:
- Name: "{{ resource_prefix }}-changed"
-
- - name: delete one of the VPCs
- ec2_vpc_net:
- state: absent
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}-changed"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert success
- assert:
- that:
- - result is changed
- - not result.vpc
- - vpc_info.vpcs | length == 1
-
- # ============================================================
-
- - name: attempt to delete a VPC that doesn't exist
- ec2_vpc_net:
- state: absent
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}-changed"
- register: result
-
- - name: assert no changes were made
- assert:
- that:
- - result is not changed
- - not result.vpc
-
- # ============================================================
-
- - name: create a DHCP option set to use in next test
- ec2_vpc_dhcp_option:
- dns_servers:
- - 4.4.4.4
- - 8.8.8.8
- tags:
- Name: "{{ resource_prefix }}"
- register: new_dhcp
- - name: assert the DHCP option set was successfully created
- assert:
- that:
- - new_dhcp is changed
-
- - name: modify the DHCP options set for a VPC (check_mode)
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}"
- register: result
- check_mode: True
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the DHCP option set changed but didn't update
- assert:
- that:
- - result is changed
- - result.vpc.id == vpc_1
- - vpc_info.vpcs | length == 1
- - vpc_info.vpcs[0].dhcp_options_id == default_dhcp_options_id
-
- - name: modify the DHCP options set for a VPC
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the DHCP option set changed
- assert:
- that:
- - result is changed
- - result.vpc.id == vpc_1
- - default_dhcp_options_id != result.vpc.dhcp_options_id
- - result.vpc.dhcp_options_id == new_dhcp.dhcp_options_id
- - vpc_info.vpcs | length == 1
- - vpc_info.vpcs[0].dhcp_options_id == new_dhcp.dhcp_options_id
-
- - name: modify the DHCP options set for a VPC (retry)
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the DHCP option set changed
- assert:
- that:
- - result is not changed
- - result.vpc.id == vpc_1
- - result.vpc.dhcp_options_id == new_dhcp.dhcp_options_id
- - vpc_info.vpcs | length == 1
- - vpc_info.vpcs[0].dhcp_options_id == new_dhcp.dhcp_options_id
-
- # ============================================================
-
- # XXX #62677
- #- name: disable dns_hostnames (check mode)
- # ec2_vpc_net:
- # state: present
- # cidr_block: "{{ vpc_cidr }}"
- # name: "{{ resource_prefix }}"
- # dns_hostnames: False
- # register: result
- # check_mode: True
- #- ec2_vpc_net_info:
- # filters:
- # "tag:Name": "{{ resource_prefix }}"
- # register: vpc_info
-
- #- name: assert changed was set but not made
- # assert:
- # that:
- # - result is successful
- # - result is changed
- # - vpc_info.vpcs | length == 1
- # - vpc_info.vpcs[0].enable_dns_hostnames | bool == True
- # - vpc_info.vpcs[0].enable_dns_support | bool == True
-
- - name: disable dns_hostnames
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- dns_hostnames: False
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert a change was made
- assert:
- that:
- - result is successful
- - result is changed
- - result.vpc.id == vpc_1
- - vpc_info.vpcs | length == 1
- - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
- - vpc_info.vpcs[0].enable_dns_support | bool == True
-
- - name: disable dns_hostnames (retry)
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- dns_hostnames: False
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert a change was made
- assert:
- that:
- - result is successful
- - result is not changed
- - result.vpc.id == vpc_1
- - vpc_info.vpcs | length == 1
- - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
- - vpc_info.vpcs[0].enable_dns_support | bool == True
-
- # XXX #62677
- #- name: disable dns_support (check mode)
- # ec2_vpc_net:
- # state: present
- # cidr_block: "{{ vpc_cidr }}"
- # name: "{{ resource_prefix }}"
- # dns_hostnames: False
- # dns_support: False
- # check_mode: True
- # register: result
- #- ec2_vpc_net_info:
- # filters:
- # "tag:Name": "{{ resource_prefix }}"
- # register: vpc_info
-
- #- name: assert changed was set but not made
- # assert:
- # that:
- # - result is successful
- # - result is changed
- # - result.vpc.id == vpc_1
- # - vpc_info.vpcs | length == 1
- # - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
- # - vpc_info.vpcs[0].enable_dns_support | bool == True
-
- - name: disable dns_support
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- dns_hostnames: False
- dns_support: False
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert a change was made
- assert:
- that:
- - result is successful
- - result is changed
- - result.vpc.id == vpc_1
- - vpc_info.vpcs | length == 1
- - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
- - vpc_info.vpcs[0].enable_dns_support | bool == False
-
- - name: disable dns_support (retry)
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- dns_hostnames: False
- dns_support: False
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert a change was not made
- assert:
- that:
- - result is successful
- - result is not changed
- - result.vpc.id == vpc_1
- - vpc_info.vpcs | length == 1
- - vpc_info.vpcs[0].enable_dns_hostnames | bool == False
- - vpc_info.vpcs[0].enable_dns_support | bool == False
-
- # XXX #62677
- #- name: re-enable dns_support (check mode)
- # ec2_vpc_net:
- # state: present
- # cidr_block: "{{ vpc_cidr }}"
- # name: "{{ resource_prefix }}"
- # register: result
- # check_mode: True
- #- ec2_vpc_net_info:
- # filters:
- # "tag:Name": "{{ resource_prefix }}"
- # register: vpc_info
-
- #- name: assert a change was made
- # assert:
- # that:
- # - result is successful
- # - result is changed
- # - result.vpc.id == vpc_1
- # - vpc_info.vpcs | length == 1
- # - vpc_info.vpcs[0].enable_dns_hostnames | bool == True
- # - vpc_info.vpcs[0].enable_dns_support | bool == True
-
- - name: re-enable dns_support
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert a change was made
- assert:
- that:
- - result is successful
- - result is changed
- - result.vpc.id == vpc_1
- - vpc_info.vpcs | length == 1
- - vpc_info.vpcs[0].enable_dns_hostnames | bool == True
- - vpc_info.vpcs[0].enable_dns_support | bool == True
-
- - name: re-enable dns_support (retry)
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert a change was not made
- assert:
- that:
- - result is successful
- - result is not changed
- - result.vpc.id == vpc_1
- - vpc_info.vpcs | length == 1
- - vpc_info.vpcs[0].enable_dns_hostnames | bool == True
- - vpc_info.vpcs[0].enable_dns_support | bool == True
-
- # ============================================================
-
- - name: modify tags (check mode)
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- tags:
- Ansible: Test
- check_mode: true
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the VPC has Name but not Ansible tag
- assert:
- that:
- - result is successful
- - result is changed
- - result.vpc.id == vpc_1
- - result.vpc.tags | length == 1
- - result.vpc.tags.Name == resource_prefix
- - vpc_info.vpcs | length == 1
- - vpc_info.vpcs[0].tags | length == 1
- - vpc_info.vpcs[0].tags.Name == resource_prefix
-
- - name: modify tags
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- tags:
- Ansible: Test
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the VPC has Name and Ansible tags
- assert:
- that:
- - result is successful
- - result is changed
- - result.vpc.id == vpc_1
- - result.vpc.tags | length == 2
- - result.vpc.tags.Ansible == "Test"
- - result.vpc.tags.Name == resource_prefix
- - vpc_info.vpcs | length == 1
- - vpc_info.vpcs[0].tags | length == 2
- - vpc_info.vpcs[0].tags.Ansible == "Test"
- - vpc_info.vpcs[0].tags.Name == resource_prefix
-
- - name: modify tags (no change)
- ec2_vpc_net:
- state: present
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- dns_support: True
- dns_hostnames: True
- tags:
- Ansible: Test
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the VPC has Name and Ansible tags
- assert:
- that:
- - result is successful
- - result is not changed
- - result.vpc.id == vpc_1
- - result.vpc.tags|length == 2
- - result.vpc.tags.Ansible == "Test"
- - result.vpc.tags.Name == resource_prefix
- - vpc_info.vpcs | length == 1
- - vpc_info.vpcs[0].tags|length == 2
- - vpc_info.vpcs[0].tags.Ansible == "Test"
- - vpc_info.vpcs[0].tags.Name == resource_prefix
-
- # ============================================================
-
- # #62678
- #- name: modify CIDR (check mode)
- # ec2_vpc_net:
- # state: present
- # cidr_block:
- # - "{{ vpc_cidr }}"
- # - "{{ vpc_cidr_a }}"
- # name: "{{ resource_prefix }}"
- # check_mode: true
- # register: result
- #- ec2_vpc_net_info:
- # filters:
- # "tag:Name": "{{ resource_prefix }}"
- # register: vpc_info
-
- #- name: Check the CIDRs weren't changed
- # assert:
- # that:
- # - result is successful
- # - result is changed
- # - result.vpc.id == vpc_1
- # - vpc_info.vpcs | length == 1
- # - vpc_info.vpcs[0].cidr_block == vpc_cidr
- # - vpc_cidr in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- # - vpc_cidr_a not in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- # - vpc_cidr_b not in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- # - vpc_info.vpcs[0].cidr_block_association_set | length == 1
- # - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- # - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- # - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- # - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- # - vpc_cidr in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- # - vpc_cidr_a not in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- # - vpc_cidr_b not in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
-
- - name: modify CIDR
- ec2_vpc_net:
- state: present
- cidr_block:
- - "{{ vpc_cidr }}"
- - "{{ vpc_cidr_a }}"
- name: "{{ resource_prefix }}"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the CIDRs changed
- assert:
- that:
- - result is successful
- - result is changed
- - result.vpc.id == vpc_1
- - vpc_info.vpcs | length == 1
- - result.vpc.cidr_block == vpc_cidr
- - vpc_info.vpcs[0].cidr_block == vpc_cidr
- - result.vpc.cidr_block_association_set | length == 2
- - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b not in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_info.vpcs[0].cidr_block_association_set | length == 2
- - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b not in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
-
- - name: modify CIDR (no change)
- ec2_vpc_net:
- state: present
- cidr_block:
- - "{{ vpc_cidr }}"
- - "{{ vpc_cidr_a }}"
- name: "{{ resource_prefix }}"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the CIDRs didn't change
- assert:
- that:
- - result is successful
- - result is not changed
- - result.vpc.id == vpc_1
- - vpc_info.vpcs | length == 1
- - result.vpc.cidr_block == vpc_cidr
- - vpc_info.vpcs[0].cidr_block == vpc_cidr
- - result.vpc.cidr_block_association_set | length == 2
- - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b not in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_info.vpcs[0].cidr_block_association_set | length == 2
- - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b not in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
-
- # #62678
- #- name: modify CIDR - no purge (check mode)
- # ec2_vpc_net:
- # state: present
- # cidr_block:
- # - "{{ vpc_cidr }}"
- # - "{{ vpc_cidr_b }}"
- # name: "{{ resource_prefix }}"
- # check_mode: true
- # register: result
- #- ec2_vpc_net_info:
- # filters:
- # "tag:Name": "{{ resource_prefix }}"
- # register: vpc_info
-
- #- name: Check the CIDRs weren't changed
- # assert:
- # that:
- # - result is successful
- # - result is changed
- # - vpc_info.vpcs | length == 1
- # - vpc_info.vpcs[0].cidr_block == vpc_cidr
- # - vpc_cidr in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- # - vpc_cidr_a in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- # - vpc_cidr_b not in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- # - vpc_info.vpcs[0].cidr_block_association_set | length == 2
- # - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- # - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- # - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- # - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- # - vpc_cidr in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- # - vpc_cidr_a in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- # - vpc_cidr_b not in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
-
- - name: modify CIDR - no purge
- ec2_vpc_net:
- state: present
- cidr_block:
- - "{{ vpc_cidr }}"
- - "{{ vpc_cidr_b }}"
- name: "{{ resource_prefix }}"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the CIDRs changed
- assert:
- that:
- - result is successful
- - result is changed
- - result.vpc.id == vpc_1
- - vpc_info.vpcs | length == 1
- - result.vpc.cidr_block == vpc_cidr
- - vpc_info.vpcs[0].cidr_block == vpc_cidr
- - result.vpc.cidr_block_association_set | length == 3
- - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_info.vpcs[0].cidr_block_association_set | length == 3
- - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
-
- - name: modify CIDR - no purge (no change)
- ec2_vpc_net:
- state: present
- cidr_block:
- - "{{ vpc_cidr }}"
- - "{{ vpc_cidr_b }}"
- name: "{{ resource_prefix }}"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the CIDRs didn't change
- assert:
- that:
- - result is successful
- - result is not changed
- - vpc_info.vpcs | length == 1
- - result.vpc.cidr_block == vpc_cidr
- - vpc_info.vpcs[0].cidr_block == vpc_cidr
- - result.vpc.cidr_block_association_set | length == 3
- - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_info.vpcs[0].cidr_block_association_set | length == 3
- - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
-
- - name: modify CIDR - no purge (no change - list all - check mode)
- ec2_vpc_net:
- state: present
- cidr_block:
- - "{{ vpc_cidr }}"
- - "{{ vpc_cidr_a }}"
- - "{{ vpc_cidr_b }}"
- name: "{{ resource_prefix }}"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the CIDRs didn't change
- assert:
- that:
- - result is successful
- - result is not changed
- - vpc_info.vpcs | length == 1
- - result.vpc.cidr_block == vpc_cidr
- - vpc_info.vpcs[0].cidr_block == vpc_cidr
- - result.vpc.cidr_block_association_set | length == 3
- - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_info.vpcs[0].cidr_block_association_set | length == 3
- - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
-
- - name: modify CIDR - no purge (no change - list all)
- ec2_vpc_net:
- state: present
- cidr_block:
- - "{{ vpc_cidr }}"
- - "{{ vpc_cidr_a }}"
- - "{{ vpc_cidr_b }}"
- name: "{{ resource_prefix }}"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the CIDRs didn't change
- assert:
- that:
- - result is successful
- - result is not changed
- - vpc_info.vpcs | length == 1
- - result.vpc.cidr_block == vpc_cidr
- - vpc_info.vpcs[0].cidr_block == vpc_cidr
- - result.vpc.cidr_block_association_set | length == 3
- - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_info.vpcs[0].cidr_block_association_set | length == 3
- - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
-
- - name: modify CIDR - no purge (no change - different order - check mode)
- ec2_vpc_net:
- state: present
- cidr_block:
- - "{{ vpc_cidr }}"
- - "{{ vpc_cidr_b }}"
- - "{{ vpc_cidr_a }}"
- name: "{{ resource_prefix }}"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the CIDRs didn't change
- assert:
- that:
- - result is successful
- - result is not changed
- - vpc_info.vpcs | length == 1
- - result.vpc.cidr_block == vpc_cidr
- - vpc_info.vpcs[0].cidr_block == vpc_cidr
- - result.vpc.cidr_block_association_set | length == 3
- - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_info.vpcs[0].cidr_block_association_set | length == 3
- - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
-
- - name: modify CIDR - no purge (no change - different order)
- ec2_vpc_net:
- state: present
- cidr_block:
- - "{{ vpc_cidr }}"
- - "{{ vpc_cidr_b }}"
- - "{{ vpc_cidr_a }}"
- name: "{{ resource_prefix }}"
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the CIDRs didn't change
- assert:
- that:
- - result is successful
- - result is not changed
- - vpc_info.vpcs | length == 1
- - result.vpc.cidr_block == vpc_cidr
- - vpc_info.vpcs[0].cidr_block == vpc_cidr
- - result.vpc.cidr_block_association_set | length == 3
- - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - result.vpc.cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b in (result.vpc | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_info.vpcs[0].cidr_block_association_set | length == 3
- - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- - vpc_cidr in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_a in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- - vpc_cidr_b in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
-
- # #62678
- #- name: modify CIDR - purge (check mode)
- # ec2_vpc_net:
- # state: present
- # cidr_block:
- # - "{{ vpc_cidr }}"
- # - "{{ vpc_cidr_b }}"
- # name: "{{ resource_prefix }}"
- # purge_cidrs: yes
- # check_mode: true
- # register: result
- #- ec2_vpc_net_info:
- # filters:
- # "tag:Name": "{{ resource_prefix }}"
- # register: vpc_info
-
- #- name: Check the CIDRs weren't changed
- # assert:
- # that:
- # - result is successful
- # - result is changed
- # - vpc_info.vpcs | length == 1
- # - vpc_info.vpcs[0].cidr_block == vpc_cidr
- # - vpc_info.vpcs[0].cidr_block_association_set | length == 3
- # - vpc_info.vpcs[0].cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-")
- # - vpc_info.vpcs[0].cidr_block_association_set[1].association_id.startswith("vpc-cidr-assoc-")
- # - vpc_info.vpcs[0].cidr_block_association_set[2].association_id.startswith("vpc-cidr-assoc-")
- # - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"]
- # - vpc_info.vpcs[0].cidr_block_association_set[1].cidr_block_state.state in ["associated", "associating"]
- # - vpc_info.vpcs[0].cidr_block_association_set[2].cidr_block_state.state in ["associated", "associating"]
- # - vpc_cidr in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- # - vpc_cidr_a in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
- # - vpc_cidr_b in (vpc_info.vpcs[0] | json_query("cidr_block_association_set[*].cidr_block") | list)
-
- - name: modify CIDR - purge
- ec2_vpc_net:
- state: present
- cidr_block:
- - "{{ vpc_cidr }}"
- - "{{ vpc_cidr_b }}"
- name: "{{ resource_prefix }}"
- purge_cidrs: yes
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the CIDRs changed
- vars:
- cidr_query: 'cidr_block_association_set[?cidr_block_state.state == `associated`].cidr_block'
- assert:
- that:
- - result is successful
- - result is changed
- - result.vpc.id == vpc_1
- - vpc_info.vpcs | length == 1
- - result.vpc.cidr_block == vpc_cidr
- - vpc_info.vpcs[0].cidr_block == vpc_cidr
- - result.vpc | json_query(cidr_query) | list | length == 2
- - vpc_cidr in (result.vpc | json_query(cidr_query) | list)
- - vpc_cidr_a not in (result.vpc | json_query(cidr_query) | list)
- - vpc_cidr_b in (result.vpc | json_query(cidr_query) | list)
- - vpc_info.vpcs[0] | json_query(cidr_query) | list | length == 2
- - vpc_cidr in (vpc_info.vpcs[0] | json_query(cidr_query) | list)
- - vpc_cidr_a not in (vpc_info.vpcs[0] | json_query(cidr_query) | list)
- - vpc_cidr_b in (vpc_info.vpcs[0] | json_query(cidr_query) | list)
-
- - name: modify CIDR - purge (no change)
- ec2_vpc_net:
- state: present
- cidr_block:
- - "{{ vpc_cidr }}"
- - "{{ vpc_cidr_b }}"
- name: "{{ resource_prefix }}"
- purge_cidrs: yes
- register: result
- - ec2_vpc_net_info:
- filters:
- "tag:Name": "{{ resource_prefix }}"
- register: vpc_info
-
- - name: assert the CIDRs didn't change
- vars:
- cidr_query: 'cidr_block_association_set[?cidr_block_state.state == `associated`].cidr_block'
- assert:
- that:
- - result is successful
- - result is not changed
- - result.vpc.id == vpc_1
- - vpc_info.vpcs | length == 1
- - result.vpc.cidr_block == vpc_cidr
- - vpc_info.vpcs[0].cidr_block == vpc_cidr
- - result.vpc | json_query(cidr_query) | list | length == 2
- - vpc_cidr in (result.vpc | json_query(cidr_query) | list)
- - vpc_cidr_a not in (result.vpc | json_query(cidr_query) | list)
- - vpc_cidr_b in (result.vpc | json_query(cidr_query) | list)
- - vpc_info.vpcs[0] | json_query(cidr_query) | list | length == 2
- - vpc_cidr in (vpc_info.vpcs[0] | json_query(cidr_query) | list)
- - vpc_cidr_a not in (vpc_info.vpcs[0] | json_query(cidr_query) | list)
- - vpc_cidr_b in (vpc_info.vpcs[0] | json_query(cidr_query) | list)
-
- # ============================================================
-
- - name: test check mode to delete a VPC
- ec2_vpc_net:
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- state: absent
- check_mode: true
- register: result
-
- - name: assert that a change would have been made
- assert:
- that:
- - result is changed
-
- # ============================================================
-
- always:
-
- - name: replace the DHCP options set so the new one can be deleted
- ec2_vpc_net:
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- state: present
- multi_ok: no
- dhcp_opts_id: "{{ default_dhcp_options_id }}"
- ignore_errors: true
-
- - name: remove the DHCP option set
- ec2_vpc_dhcp_option:
- dhcp_options_id: "{{ new_dhcp.dhcp_options_id }}"
- state: absent
- ignore_errors: true
-
- - name: remove the VPC
- ec2_vpc_net:
- cidr_block: "{{ vpc_cidr }}"
- name: "{{ resource_prefix }}"
- state: absent
- ignore_errors: true
diff --git a/test/integration/targets/ec2_vpc_subnet/aliases b/test/integration/targets/ec2_vpc_subnet/aliases
deleted file mode 100644
index 5e7a8d3877..0000000000
--- a/test/integration/targets/ec2_vpc_subnet/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-shippable/aws/group2
-unstable
diff --git a/test/integration/targets/ec2_vpc_subnet/defaults/main.yml b/test/integration/targets/ec2_vpc_subnet/defaults/main.yml
deleted file mode 100644
index 9c529aff02..0000000000
--- a/test/integration/targets/ec2_vpc_subnet/defaults/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# defaults file for ec2_vpc_subnet
-ec2_vpc_subnet_name: '{{resource_prefix}}'
-ec2_vpc_subnet_description: 'Created by ansible integration tests'
diff --git a/test/integration/targets/ec2_vpc_subnet/meta/main.yml b/test/integration/targets/ec2_vpc_subnet/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_vpc_subnet/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_vpc_subnet/tasks/main.yml b/test/integration/targets/ec2_vpc_subnet/tasks/main.yml
deleted file mode 100644
index fa79901db0..0000000000
--- a/test/integration/targets/ec2_vpc_subnet/tasks/main.yml
+++ /dev/null
@@ -1,618 +0,0 @@
----
-- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
-
- - name: list available AZs
- aws_az_info:
- register: region_azs
-
- - name: pick an AZ for testing
- set_fact:
- subnet_az: "{{ region_azs.availability_zones[0].zone_name }}"
-
- # ============================================================
- - name: create a VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: present
- cidr_block: "10.232.232.128/26"
- ipv6_cidr: True
- tags:
- Name: "{{ resource_prefix }}-vpc"
- Description: "Created by ansible-test"
- register: vpc_result
-
- - set_fact:
- vpc_ipv6_cidr: "{{ vpc_result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block }}"
-
- # ============================================================
- - name: create subnet (expected changed=true) (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- state: present
- check_mode: true
- register: vpc_subnet_create
-
- - name: assert creation would happen
- assert:
- that:
- - vpc_subnet_create is changed
-
- - name: create subnet (expected changed=true)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- state: present
- register: vpc_subnet_create
-
- - name: assert creation happened (expected changed=true)
- assert:
- that:
- - 'vpc_subnet_create'
- - 'vpc_subnet_create.subnet.id.startswith("subnet-")'
- - '"Name" in vpc_subnet_create.subnet.tags and vpc_subnet_create.subnet.tags["Name"] == ec2_vpc_subnet_name'
- - '"Description" in vpc_subnet_create.subnet.tags and vpc_subnet_create.subnet.tags["Description"] == ec2_vpc_subnet_description'
- # ============================================================
- - name: recreate subnet (expected changed=false) (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- state: present
- check_mode: true
- register: vpc_subnet_recreate
-
- - name: assert recreation changed nothing (expected changed=false)
- assert:
- that:
- - vpc_subnet_recreate is not changed
-
- - name: recreate subnet (expected changed=false)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- state: present
- register: vpc_subnet_recreate
-
- - name: assert recreation changed nothing (expected changed=false)
- assert:
- that:
- - vpc_subnet_recreate is not changed
- - 'vpc_subnet_recreate.subnet == vpc_subnet_create.subnet'
-
- # ============================================================
- - name: update subnet so instances launched in it are assigned an IP (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- state: present
- map_public: true
- check_mode: true
- register: vpc_subnet_modify
-
- - name: assert subnet changed
- assert:
- that:
- - vpc_subnet_modify is changed
-
- - name: update subnet so instances launched in it are assigned an IP
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- state: present
- map_public: true
- register: vpc_subnet_modify
-
- - name: assert subnet changed
- assert:
- that:
- - vpc_subnet_modify is changed
- - vpc_subnet_modify.subnet.map_public_ip_on_launch
-
- # ============================================================
- - name: add invalid ipv6 block to subnet (expected failed)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- ipv6_cidr: 2001:db8::/64
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- state: present
- register: vpc_subnet_ipv6_failed
- ignore_errors: yes
-
- - name: assert failure happened (expected failed)
- assert:
- that:
- - vpc_subnet_ipv6_failed is failed
- - "'Couldn\\'t associate ipv6 cidr' in vpc_subnet_ipv6_failed.msg"
-
- # ============================================================
- - name: add a tag (expected changed=true) (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- AnotherTag: SomeValue
- state: present
- check_mode: true
- register: vpc_subnet_add_a_tag
-
- - name: assert tag addition happened (expected changed=true)
- assert:
- that:
- - vpc_subnet_add_a_tag is changed
-
- - name: add a tag (expected changed=true)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- AnotherTag: SomeValue
- state: present
- register: vpc_subnet_add_a_tag
-
- - name: assert tag addition happened (expected changed=true)
- assert:
- that:
- - vpc_subnet_add_a_tag is changed
- - '"Name" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["Name"] == ec2_vpc_subnet_name'
- - '"Description" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["Description"] == ec2_vpc_subnet_description'
- - '"AnotherTag" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["AnotherTag"] == "SomeValue"'
-
- # ============================================================
- - name: remove tags with default purge_tags=true (expected changed=true) (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- AnotherTag: SomeValue
- state: present
- check_mode: true
- register: vpc_subnet_remove_tags
-
- - name: assert tag removal happened (expected changed=true)
- assert:
- that:
- - vpc_subnet_remove_tags is changed
-
- - name: remove tags with default purge_tags=true (expected changed=true)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- AnotherTag: SomeValue
- state: present
- register: vpc_subnet_remove_tags
-
- - name: assert tag removal happened (expected changed=true)
- assert:
- that:
- - vpc_subnet_remove_tags is changed
- - '"Name" not in vpc_subnet_remove_tags.subnet.tags'
- - '"Description" not in vpc_subnet_remove_tags.subnet.tags'
- - '"AnotherTag" in vpc_subnet_remove_tags.subnet.tags and vpc_subnet_remove_tags.subnet.tags["AnotherTag"] == "SomeValue"'
-
- # ============================================================
- - name: change tags with purge_tags=false (expected changed=true) (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- state: present
- purge_tags: false
- check_mode: true
- register: vpc_subnet_change_tags
-
- - name: assert tag addition happened (expected changed=true)
- assert:
- that:
- - vpc_subnet_change_tags is changed
-
- - name: change tags with purge_tags=false (expected changed=true)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- state: present
- purge_tags: false
- register: vpc_subnet_change_tags
-
- - name: assert tag addition happened (expected changed=true)
- assert:
- that:
- - vpc_subnet_change_tags is changed
- - '"Name" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["Name"] == ec2_vpc_subnet_name'
- - '"Description" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["Description"] == ec2_vpc_subnet_description'
- - '"AnotherTag" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["AnotherTag"] == "SomeValue"'
-
- # ============================================================
- - name: test state=absent (expected changed=true) (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: absent
- check_mode: true
- register: result
-
- - name: assert state=absent (expected changed=true)
- assert:
- that:
- - result is changed
-
- - name: test state=absent (expected changed=true)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: absent
- register: result
-
- - name: assert state=absent (expected changed=true)
- assert:
- that:
- - result is changed
-
- # ============================================================
- - name: test state=absent (expected changed=false) (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: absent
- check_mode: true
- register: result
-
- - name: assert state=absent (expected changed=false)
- assert:
- that:
- - result is not changed
-
- - name: test state=absent (expected changed=false)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: absent
- register: result
-
- - name: assert state=absent (expected changed=false)
- assert:
- that:
- - result is not changed
-
- # ============================================================
- - name: create subnet without AZ (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: present
- check_mode: true
- register: subnet_without_az
-
- - name: check that subnet without AZ works fine
- assert:
- that:
- - subnet_without_az is changed
-
- - name: create subnet without AZ
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: present
- register: subnet_without_az
-
- - name: check that subnet without AZ works fine
- assert:
- that:
- - subnet_without_az is changed
-
- # ============================================================
- - name: remove subnet without AZ (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: absent
- check_mode: true
- register: result
-
- - name: assert state=absent (expected changed=true)
- assert:
- that:
- - result is changed
-
- - name: remove subnet without AZ
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: absent
- register: result
-
- - name: assert state=absent (expected changed=true)
- assert:
- that:
- - result is changed
-
-
- # ============================================================
- - name: create subnet with IPv6 (expected changed=true) (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}"
- assign_instances_ipv6: true
- state: present
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- check_mode: true
- register: vpc_subnet_ipv6_create
-
- - name: assert creation with IPv6 happened (expected changed=true)
- assert:
- that:
- - vpc_subnet_ipv6_create is changed
-
- - name: create subnet with IPv6 (expected changed=true)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}"
- assign_instances_ipv6: true
- state: present
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- register: vpc_subnet_ipv6_create
-
- - name: assert creation with IPv6 happened (expected changed=true)
- assert:
- that:
- - vpc_subnet_ipv6_create is changed
- - 'vpc_subnet_ipv6_create.subnet.id.startswith("subnet-")'
- - "vpc_subnet_ipv6_create.subnet.ipv6_cidr_block == '{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}'"
- - '"Name" in vpc_subnet_ipv6_create.subnet.tags and vpc_subnet_ipv6_create.subnet.tags["Name"] == ec2_vpc_subnet_name'
- - '"Description" in vpc_subnet_ipv6_create.subnet.tags and vpc_subnet_ipv6_create.subnet.tags["Description"] == ec2_vpc_subnet_description'
- - 'vpc_subnet_ipv6_create.subnet.assign_ipv6_address_on_creation'
-
- # ============================================================
- - name: recreate subnet (expected changed=false) (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}"
- assign_instances_ipv6: true
- state: present
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- check_mode: true
- register: vpc_subnet_ipv6_recreate
-
- - name: assert recreation changed nothing (expected changed=false)
- assert:
- that:
- - vpc_subnet_ipv6_recreate is not changed
-
- - name: recreate subnet (expected changed=false)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}"
- assign_instances_ipv6: true
- state: present
- tags:
- Name: '{{ec2_vpc_subnet_name}}'
- Description: '{{ec2_vpc_subnet_description}}'
- register: vpc_subnet_ipv6_recreate
-
- - name: assert recreation changed nothing (expected changed=false)
- assert:
- that:
- - vpc_subnet_ipv6_recreate is not changed
- - 'vpc_subnet_ipv6_recreate.subnet == vpc_subnet_ipv6_create.subnet'
-
- # ============================================================
- - name: change subnet ipv6 attribute (expected changed=true) (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}"
- assign_instances_ipv6: false
- state: present
- purge_tags: false
- check_mode: true
- register: vpc_change_attribute
-
- - name: assert assign_instances_ipv6 attribute changed (expected changed=true)
- assert:
- that:
- - vpc_change_attribute is changed
-
- - name: change subnet ipv6 attribute (expected changed=true)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}"
- assign_instances_ipv6: false
- state: present
- purge_tags: false
- register: vpc_change_attribute
-
- - name: assert assign_instances_ipv6 attribute changed (expected changed=true)
- assert:
- that:
- - vpc_change_attribute is changed
- - 'not vpc_change_attribute.subnet.assign_ipv6_address_on_creation'
-
- # ============================================================
- - name: add second subnet with duplicate ipv6 cidr (expected failure)
- ec2_vpc_subnet:
- cidr: "10.232.232.144/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}"
- state: present
- purge_tags: false
- register: vpc_add_duplicate_ipv6
- ignore_errors: true
-
- - name: assert graceful failure (expected failed)
- assert:
- that:
- - vpc_add_duplicate_ipv6 is failed
- - "'The IPv6 CIDR \\'{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}\\' conflicts with another subnet' in vpc_add_duplicate_ipv6.msg"
-
- # ============================================================
- - name: remove subnet ipv6 cidr (expected changed=true) (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: present
- purge_tags: false
- check_mode: true
- register: vpc_remove_ipv6_cidr
-
- - name: assert subnet ipv6 cidr removed (expected changed=true)
- assert:
- that:
- - vpc_remove_ipv6_cidr is changed
-
- - name: remove subnet ipv6 cidr (expected changed=true)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: present
- purge_tags: false
- register: vpc_remove_ipv6_cidr
-
- - name: assert subnet ipv6 cidr removed (expected changed=true)
- assert:
- that:
- - vpc_remove_ipv6_cidr is changed
- - "vpc_remove_ipv6_cidr.subnet.ipv6_cidr_block == ''"
- - 'not vpc_remove_ipv6_cidr.subnet.assign_ipv6_address_on_creation'
-
- # ============================================================
- - name: test adding a tag that looks like a boolean to the subnet (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: present
- purge_tags: false
- tags:
- looks_like_boolean: true
- check_mode: true
- register: vpc_subnet_info
-
- - name: assert a tag was added
- assert:
- that:
- - vpc_subnet_info is changed
-
- - name: test adding a tag that looks like a boolean to the subnet
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: present
- purge_tags: false
- tags:
- looks_like_boolean: true
- register: vpc_subnet_info
-
- - name: assert a tag was added
- assert:
- that:
- - vpc_subnet_info is changed
- - 'vpc_subnet_info.subnet.tags.looks_like_boolean == "True"'
-
- # ============================================================
- - name: test idempotence adding a tag that looks like a boolean (CHECK MODE)
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: present
- purge_tags: false
- tags:
- looks_like_boolean: true
- check_mode: true
- register: vpc_subnet_info
-
- - name: assert tags haven't changed
- assert:
- that:
- - vpc_subnet_info is not changed
-
- - name: test idempotence adding a tag that looks like a boolean
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: present
- purge_tags: false
- tags:
- looks_like_boolean: true
- register: vpc_subnet_info
-
- - name: assert tags haven't changed
- assert:
- that:
- - vpc_subnet_info is not changed
-
- always:
-
- ################################################
- # TEARDOWN STARTS HERE
- ################################################
-
- - name: tidy up subnet
- ec2_vpc_subnet:
- cidr: "10.232.232.128/28"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: absent
-
- - name: tidy up VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: absent
- cidr_block: "10.232.232.128/26"
diff --git a/test/integration/targets/inventory_aws_ec2/aliases b/test/integration/targets/inventory_aws_ec2/aliases
deleted file mode 100644
index a112c3d1bb..0000000000
--- a/test/integration/targets/inventory_aws_ec2/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group1
diff --git a/test/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml b/test/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml
deleted file mode 100644
index 8680c38d01..0000000000
--- a/test/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- vars:
- template_name: "../templates/{{ template | default('inventory.yml') }}"
- tasks:
- - name: write inventory config file
- copy:
- dest: ../test.aws_ec2.yml
- content: "{{ lookup('template', template_name) }}"
diff --git a/test/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml b/test/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml
deleted file mode 100644
index f67fff1a93..0000000000
--- a/test/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- tasks:
- - name: write inventory config file
- copy:
- dest: ../test.aws_ec2.yml
- content: ""
diff --git a/test/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml b/test/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml
deleted file mode 100644
index 07b0eec4c5..0000000000
--- a/test/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- environment: "{{ ansible_test.environment }}"
- tasks:
-
- - block:
-
- # Create VPC, subnet, security group, and find image_id to create instance
-
- - include_tasks: setup.yml
-
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "not groups.aws_ec2"
-
- # Create new host, add it to inventory and then terminate it without updating the cache
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- region: '{{ aws_region }}'
- no_log: yes
-
- - name: create a new host
- ec2:
- image: '{{ image_id }}'
- exact_count: 1
- count_tag:
- Name: '{{ resource_prefix }}'
- instance_tags:
- Name: '{{ resource_prefix }}'
- instance_type: t2.micro
- wait: yes
- group_id: '{{ sg_id }}'
- vpc_subnet_id: '{{ subnet_id }}'
- <<: *aws_connection_info
- register: setup_instance
-
- - meta: refresh_inventory
-
- always:
-
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- Name: '{{ resource_prefix }}'
- group_id: '{{ sg_id }}'
- vpc_subnet_id: '{{ subnet_id }}'
- <<: *aws_connection_info
- ignore_errors: yes
- when: setup_instance is defined
-
- - include_tasks: tear_down.yml
diff --git a/test/integration/targets/inventory_aws_ec2/playbooks/setup.yml b/test/integration/targets/inventory_aws_ec2/playbooks/setup.yml
deleted file mode 100644
index 8a9b88937f..0000000000
--- a/test/integration/targets/inventory_aws_ec2/playbooks/setup.yml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- region: '{{ aws_region }}'
- no_log: yes
-
-- name: get image ID to create an instance
- ec2_ami_info:
- filters:
- architecture: x86_64
- owner-id: '125523088429'
- virtualization-type: hvm
- root-device-type: ebs
- name: 'Fedora-Atomic-27*'
- <<: *aws_connection_info
- register: fedora_images
-
-- set_fact:
- image_id: '{{ fedora_images.images.0.image_id }}'
-
-- name: create a VPC to work in
- ec2_vpc_net:
- cidr_block: 10.10.0.0/24
- state: present
- name: '{{ resource_prefix }}_setup'
- resource_tags:
- Name: '{{ resource_prefix }}_setup'
- <<: *aws_connection_info
- register: setup_vpc
-
-- set_fact:
- vpc_id: '{{ setup_vpc.vpc.id }}'
-
-- name: create a subnet to use for creating an ec2 instance
- ec2_vpc_subnet:
- az: '{{ aws_region }}a'
- tags: '{{ resource_prefix }}_setup'
- vpc_id: '{{ setup_vpc.vpc.id }}'
- cidr: 10.10.0.0/24
- state: present
- resource_tags:
- Name: '{{ resource_prefix }}_setup'
- <<: *aws_connection_info
- register: setup_subnet
-
-- set_fact:
- subnet_id: '{{ setup_subnet.subnet.id }}'
-
-- name: create a security group to use for creating an ec2 instance
- ec2_group:
- name: '{{ resource_prefix }}_setup'
- description: 'created by Ansible integration tests'
- state: present
- vpc_id: '{{ setup_vpc.vpc.id }}'
- <<: *aws_connection_info
- register: setup_sg
-
-- set_fact:
- sg_id: '{{ setup_sg.group_id }}'
diff --git a/test/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml b/test/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml
deleted file mode 100644
index 4c8240e46d..0000000000
--- a/test/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-- name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- region: '{{ aws_region }}'
- no_log: yes
-
-- name: remove setup security group
- ec2_group:
- name: '{{ resource_prefix }}_setup'
- description: 'created by Ansible integration tests'
- state: absent
- vpc_id: '{{ vpc_id }}'
- <<: *aws_connection_info
- ignore_errors: yes
-
-- name: remove setup subnet
- ec2_vpc_subnet:
- az: '{{ aws_region }}a'
- tags: '{{ resource_prefix }}_setup'
- vpc_id: '{{ vpc_id }}'
- cidr: 10.10.0.0/24
- state: absent
- resource_tags:
- Name: '{{ resource_prefix }}_setup'
- <<: *aws_connection_info
- ignore_errors: yes
-
-- name: remove setup VPC
- ec2_vpc_net:
- cidr_block: 10.10.0.0/24
- state: absent
- name: '{{ resource_prefix }}_setup'
- resource_tags:
- Name: '{{ resource_prefix }}_setup'
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml b/test/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml
deleted file mode 100644
index cc1b9a5a5e..0000000000
--- a/test/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- tasks:
- - name: assert inventory was not populated by aws_ec2 inventory plugin
- assert:
- that:
- - "'aws_ec2' not in groups"
diff --git a/test/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml b/test/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml
deleted file mode 100644
index d83cb0bfe6..0000000000
--- a/test/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- tasks:
- - name: assert cache was used to populate inventory
- assert:
- that:
- - "'aws_ec2' in groups"
- - "groups.aws_ec2 | length == 1"
-
- - meta: refresh_inventory
-
- - name: assert refresh_inventory updated the cache
- assert:
- that:
- - "'aws_ec2' in groups"
- - "not groups.aws_ec2"
diff --git a/test/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml b/test/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml
deleted file mode 100644
index 73a67db065..0000000000
--- a/test/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml
+++ /dev/null
@@ -1,91 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- environment: "{{ ansible_test.environment }}"
- tasks:
-
- - block:
-
- # Create VPC, subnet, security group, and find image_id to create instance
-
- - include_tasks: setup.yml
-
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "not groups.aws_ec2"
-
- # Create new host, refresh inventory, remove host, refresh inventory
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- region: '{{ aws_region }}'
- no_log: yes
-
- - name: create a new host
- ec2:
- image: '{{ image_id }}'
- exact_count: 1
- count_tag:
- Name: '{{ resource_prefix }}'
- instance_tags:
- Name: '{{ resource_prefix }}'
- instance_type: t2.micro
- wait: yes
- group_id: '{{ sg_id }}'
- vpc_subnet_id: '{{ subnet_id }}'
- <<: *aws_connection_info
- register: setup_instance
-
- - meta: refresh_inventory
-
- - name: assert group was populated with inventory and is no longer empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "groups.aws_ec2 | length == 1"
- - "groups.aws_ec2.0 == '{{ resource_prefix }}'"
-
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- Name: '{{ resource_prefix }}'
- group_id: '{{ sg_id }}'
- vpc_subnet_id: '{{ subnet_id }}'
- <<: *aws_connection_info
-
- - meta: refresh_inventory
-
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "not groups.aws_ec2"
-
- always:
-
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- Name: '{{ resource_prefix }}'
- group_id: '{{ sg_id }}'
- vpc_subnet_id: '{{ subnet_id }}'
- <<: *aws_connection_info
- ignore_errors: yes
- when: setup_instance is defined
-
- - include_tasks: tear_down.yml
diff --git a/test/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml b/test/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml
deleted file mode 100644
index fdeeeeff42..0000000000
--- a/test/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml
+++ /dev/null
@@ -1,79 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- environment: "{{ ansible_test.environment }}"
- tasks:
-
- - block:
-
- # Create VPC, subnet, security group, and find image_id to create instance
-
- - include_tasks: setup.yml
-
- # Create new host, refresh inventory
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- region: '{{ aws_region }}'
- no_log: yes
-
- - name: create a new host
- ec2:
- image: '{{ image_id }}'
- exact_count: 1
- count_tag:
- Name: '{{ resource_prefix }}'
- instance_tags:
- Name: '{{ resource_prefix }}'
- tag1: value1
- tag2: value2
- instance_type: t2.micro
- wait: yes
- group_id: '{{ sg_id }}'
- vpc_subnet_id: '{{ subnet_id }}'
- <<: *aws_connection_info
- register: setup_instance
-
- - meta: refresh_inventory
-
- - name: register the keyed sg group name
- set_fact:
- sg_group_name: "security_groups_{{ sg_id | replace('-', '_') }}"
-
- - name: register one of the keyed tag groups name
- set_fact:
- tag_group_name: "tag_Name_{{ resource_prefix | replace('-', '_') }}"
-
- - name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars
- assert:
- that:
- # There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group
- - "groups | length == 9"
- - "groups[tag_group_name] | length == 1"
- - "groups[sg_group_name] | length == 1"
- - "groups.arch_x86_64 | length == 1"
- - "groups.tag_with_name_key | length == 1"
- - vars.hostvars[groups.aws_ec2.0]['test_compose_var_sum'] == 'value1value2'
-
- always:
-
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- Name: '{{ resource_prefix }}'
- group_id: "{{ sg_id }}"
- vpc_subnet_id: "{{ subnet_id }}"
- <<: *aws_connection_info
- ignore_errors: yes
- when: setup_instance is defined
-
- - include_tasks: tear_down.yml
diff --git a/test/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml b/test/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml
deleted file mode 100644
index 6b46599b5b..0000000000
--- a/test/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml
+++ /dev/null
@@ -1,74 +0,0 @@
-- name: test updating inventory
- block:
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "not groups.aws_ec2"
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: create a new host
- ec2:
- image: "{{ images[aws_region] }}"
- exact_count: 1
- count_tag:
- Name: '{{ resource_prefix }}'
- instance_tags:
- Name: '{{ resource_prefix }}'
- instance_type: t2.micro
- wait: yes
- group_id: '{{ setup_sg.group_id }}'
- vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
- <<: *aws_connection_info
- register: setup_instance
-
- - meta: refresh_inventory
-
- - name: assert group was populated with inventory and is no longer empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "groups.aws_ec2 | length == 1"
- - "groups.aws_ec2.0 == '{{ resource_prefix }}'"
-
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- Name: '{{ resource_prefix }}'
- group_id: '{{ setup_sg.group_id }}'
- vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
- <<: *aws_connection_info
-
- - meta: refresh_inventory
-
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_ec2' in groups"
- - "not groups.aws_ec2"
-
- always:
- - name: remove setup ec2 instance
- ec2:
- instance_type: t2.micro
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- instance_tags:
- Name: '{{ resource_prefix }}'
- group_id: '{{ setup_sg.group_id }}'
- vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/inventory_aws_ec2/runme.sh b/test/integration/targets/inventory_aws_ec2/runme.sh
deleted file mode 100755
index 916f7e8f7a..0000000000
--- a/test/integration/targets/inventory_aws_ec2/runme.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-
-# ensure test config is empty
-ansible-playbook playbooks/empty_inventory_config.yml "$@"
-
-export ANSIBLE_INVENTORY_ENABLED=aws_ec2
-
-# test with default inventory file
-ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
-
-export ANSIBLE_INVENTORY=test.aws_ec2.yml
-
-# test empty inventory config
-ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
-
-# generate inventory config and test using it
-ansible-playbook playbooks/create_inventory_config.yml "$@"
-ansible-playbook playbooks/test_populating_inventory.yml "$@"
-
-# generate inventory config with caching and test using it
-ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml'" "$@"
-ansible-playbook playbooks/populate_cache.yml "$@"
-ansible-playbook playbooks/test_inventory_cache.yml "$@"
-
-# remove inventory cache
-rm -r aws_ec2_cache_dir/
-
-# generate inventory config with constructed features and test using it
-ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.yml'" "$@"
-ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@"
-
-# cleanup inventory config
-ansible-playbook playbooks/empty_inventory_config.yml "$@"
diff --git a/test/integration/targets/inventory_aws_ec2/templates/inventory.yml b/test/integration/targets/inventory_aws_ec2/templates/inventory.yml
deleted file mode 100644
index 942edb309b..0000000000
--- a/test/integration/targets/inventory_aws_ec2/templates/inventory.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-plugin: aws_ec2
-aws_access_key_id: '{{ aws_access_key }}'
-aws_secret_access_key: '{{ aws_secret_key }}'
-aws_security_token: '{{ security_token }}'
-regions:
- - '{{ aws_region }}'
-filters:
- tag:Name:
- - '{{ resource_prefix }}'
-hostnames:
- - tag:Name
- - dns-name
diff --git a/test/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml b/test/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml
deleted file mode 100644
index e35bf9010b..0000000000
--- a/test/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-plugin: aws_ec2
-cache: True
-cache_plugin: jsonfile
-cache_connection: aws_ec2_cache_dir
-aws_access_key_id: '{{ aws_access_key }}'
-aws_secret_access_key: '{{ aws_secret_key }}'
-aws_security_token: '{{ security_token }}'
-regions:
- - '{{ aws_region }}'
-filters:
- tag:Name:
- - '{{ resource_prefix }}'
diff --git a/test/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml b/test/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml
deleted file mode 100644
index 6befb4e339..0000000000
--- a/test/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-plugin: aws_ec2
-aws_access_key_id: '{{ aws_access_key }}'
-aws_secret_access_key: '{{ aws_secret_key }}'
-aws_security_token: '{{ security_token }}'
-regions:
- - '{{ aws_region }}'
-filters:
- tag:Name:
- - '{{ resource_prefix }}'
-keyed_groups:
- - key: 'security_groups|json_query("[].group_id")'
- prefix: 'security_groups'
- - key: 'tags'
- prefix: 'tag'
- - prefix: 'arch'
- key: "architecture"
-compose:
- test_compose_var_sum: tags.tag1 + tags.tag2
-groups:
- tag_with_name_key: "'Name' in (tags | list)"
diff --git a/test/integration/targets/inventory_aws_ec2/test.aws_ec2.yml b/test/integration/targets/inventory_aws_ec2/test.aws_ec2.yml
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/inventory_aws_ec2/test.aws_ec2.yml
+++ /dev/null
diff --git a/test/integration/targets/inventory_aws_rds/aliases b/test/integration/targets/inventory_aws_rds/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/inventory_aws_rds/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml b/test/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml
deleted file mode 100644
index f0a9030a0f..0000000000
--- a/test/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- vars:
- template_name: "../templates/{{ template | default('inventory.j2') }}"
- tasks:
- - name: write inventory config file
- copy:
- dest: ../test.aws_rds.yml
- content: "{{ lookup('template', template_name) }}"
diff --git a/test/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml b/test/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml
deleted file mode 100644
index d7e2cda3a7..0000000000
--- a/test/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- tasks:
- - name: write inventory config file
- copy:
- dest: ../test.aws_rds.yml
- content: ""
diff --git a/test/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml b/test/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml
deleted file mode 100644
index bd7dc6b494..0000000000
--- a/test/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- environment: "{{ ansible_test.environment }}"
- tasks:
-
- - module_defaults:
- group/aws:
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token | default(omit) }}'
- region: '{{ aws_region }}'
- block:
- - set_fact:
- instance_id: '{{ resource_prefix }}-mariadb'
-
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_rds' in groups"
- - "not groups.aws_rds"
-
- # Create new host, add it to inventory and then terminate it without updating the cache
-
- - name: create minimal mariadb instance in default VPC and default subnet group
- rds_instance:
- state: present
- engine: mariadb
- db_instance_class: db.t2.micro
- allocated_storage: 20
- instance_id: '{{ instance_id }}'
- master_username: 'ansibletestuser'
- master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
- tags:
- workload_type: other
- register: setup_instance
-
- - meta: refresh_inventory
-
- - assert:
- that:
- - groups.aws_rds
-
- always:
-
- - name: remove mariadb instance
- rds_instance:
- state: absent
- engine: mariadb
- skip_final_snapshot: yes
- instance_id: '{{ instance_id }}'
- ignore_errors: yes
- when: setup_instance is defined
diff --git a/test/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml b/test/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml
deleted file mode 100644
index 499513570b..0000000000
--- a/test/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- tasks:
- - name: assert inventory was not populated by aws_rds inventory plugin
- assert:
- that:
- - "'aws_rds' not in groups"
diff --git a/test/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml b/test/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml
deleted file mode 100644
index 7eadbad853..0000000000
--- a/test/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- tasks:
- - name: assert cache was used to populate inventory
- assert:
- that:
- - "'aws_rds' in groups"
- - "groups.aws_rds | length == 1"
-
- - meta: refresh_inventory
-
- - name: assert refresh_inventory updated the cache
- assert:
- that:
- - "'aws_rds' in groups"
- - "not groups.aws_rds"
diff --git a/test/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml b/test/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml
deleted file mode 100644
index d79f2a01a9..0000000000
--- a/test/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- environment: "{{ ansible_test.environment }}"
- tasks:
-
- - module_defaults:
- group/aws:
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token | default(omit) }}'
- region: '{{ aws_region }}'
- block:
-
- - set_fact:
- instance_id: "{{ resource_prefix }}-mariadb"
-
- - debug: var=groups
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_rds' in groups"
- - "not groups.aws_rds"
-
- # Create new host, refresh inventory, remove host, refresh inventory
-
- - name: create minimal mariadb instance in default VPC and default subnet group
- rds_instance:
- state: present
- engine: mariadb
- db_instance_class: db.t2.micro
- allocated_storage: 20
- instance_id: '{{ instance_id }}'
- master_username: 'ansibletestuser'
- master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
- tags:
- workload_type: other
- register: setup_instance
-
- - meta: refresh_inventory
-
- - name: assert group was populated with inventory and is no longer empty
- assert:
- that:
- - "'aws_rds' in groups"
- - "groups.aws_rds | length == 1"
- - "groups.aws_rds.0 == '{{ instance_id }}'"
-
- - name: remove mariadb instance
- rds_instance:
- state: absent
- engine: mariadb
- skip_final_snapshot: yes
- instance_id: '{{ instance_id }}'
-
- - meta: refresh_inventory
-
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_rds' in groups"
- - "not groups.aws_rds"
-
- always:
-
- - name: remove mariadb instance
- rds_instance:
- state: absent
- engine: mariadb
- skip_final_snapshot: yes
- instance_id: '{{ instance_id }}'
- ignore_errors: yes
- when: setup_instance is defined
diff --git a/test/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml b/test/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml
deleted file mode 100644
index c6ddb57340..0000000000
--- a/test/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml
+++ /dev/null
@@ -1,62 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- gather_facts: no
- environment: "{{ ansible_test.environment }}"
- tasks:
-
- - module_defaults:
- group/aws:
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token | default(omit) }}'
- region: '{{ aws_region }}'
- block:
-
- - set_fact:
- instance_id: "{{ resource_prefix }}-mariadb"
-
- - name: create minimal mariadb instance in default VPC and default subnet group
- rds_instance:
- state: present
- engine: mariadb
- db_instance_class: db.t2.micro
- allocated_storage: 20
- instance_id: '{{ resource_prefix }}-mariadb'
- master_username: 'ansibletestuser'
- master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
- tags:
- workload_type: other
- register: setup_instance
-
- - meta: refresh_inventory
- - debug: var=groups
-
- - name: 'generate expected group name based off the db parameter groups'
- vars:
- parameter_group_name: '{{ setup_instance.db_parameter_groups[0].db_parameter_group_name }}'
- set_fact:
- parameter_group_key: 'rds_parameter_group_{{ parameter_group_name | replace(".", "_") }}'
-
- - name: assert the keyed groups from constructed config were added to inventory
- assert:
- that:
- # There are 6 groups: all, ungrouped, aws_rds, tag keyed group, engine keyed group, parameter group keyed group
- - "groups | length == 6"
- - '"all" in groups'
- - '"ungrouped" in groups'
- - '"aws_rds" in groups'
- - '"tag_workload_type_other" in groups'
- - '"rds_mariadb" in groups'
- - 'parameter_group_key in groups'
-
- always:
-
- - name: remove mariadb instance
- rds_instance:
- state: absent
- engine: mariadb
- skip_final_snapshot: yes
- instance_id: '{{ instance_id }}'
- ignore_errors: yes
- when: setup_instance is defined
diff --git a/test/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml b/test/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml
deleted file mode 100644
index 565803800c..0000000000
--- a/test/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml
+++ /dev/null
@@ -1,64 +0,0 @@
-- name: test updating inventory
- module_defaults:
- group/aws:
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token | default(omit) }}'
- region: '{{ aws_region }}'
- block:
- - set_fact:
- instance_id: "{{ resource_prefix }}update"
-
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_rds' in groups"
- - "not groups.aws_rds"
-
- - name: create minimal mariadb instance in default VPC and default subnet group
- rds_instance:
- state: present
- engine: mariadb
- db_instance_class: db.t2.micro
- allocated_storage: 20
- instance_id: 'rds-mariadb-{{ resource_prefix }}'
- master_username: 'ansibletestuser'
- master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}'
- tags:
- workload_type: other
- register: setup_instance
-
- - meta: refresh_inventory
-
- - name: assert group was populated with inventory and is no longer empty
- assert:
- that:
- - "'aws_rds' in groups"
- - "groups.aws_rds | length == 1"
- - "groups.aws_rds.0 == '{{ resource_prefix }}'"
-
- - name: remove mariadb instance
- rds_instance:
- state: absent
- engine: mariadb
- skip_final_snapshot: yes
- instance_id: ansible-rds-mariadb-example
-
- - meta: refresh_inventory
-
- - name: assert group was populated with inventory but is empty
- assert:
- that:
- - "'aws_rds' in groups"
- - "not groups.aws_rds"
-
- always:
-
- - name: remove mariadb instance
- rds_instance:
- state: absent
- engine: mariadb
- skip_final_snapshot: yes
- instance_id: ansible-rds-mariadb-example
- ignore_errors: yes
- when: setup_instance is defined
diff --git a/test/integration/targets/inventory_aws_rds/runme.sh b/test/integration/targets/inventory_aws_rds/runme.sh
deleted file mode 100755
index d759349e76..0000000000
--- a/test/integration/targets/inventory_aws_rds/runme.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-
-# ensure test config is empty
-ansible-playbook playbooks/empty_inventory_config.yml "$@"
-
-export ANSIBLE_INVENTORY_ENABLED=aws_rds
-
-# test with default inventory file
-ansible-playbook playbooks/test_invalid_aws_rds_inventory_config.yml "$@"
-
-export ANSIBLE_INVENTORY=test.aws_rds.yml
-
-# test empty inventory config
-ansible-playbook playbooks/test_invalid_aws_rds_inventory_config.yml "$@"
-
-# generate inventory config and test using it
-ansible-playbook playbooks/create_inventory_config.yml "$@"
-ansible-playbook playbooks/test_populating_inventory.yml "$@"
-
-# generate inventory config with caching and test using it
-ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.j2'" "$@"
-ansible-playbook playbooks/populate_cache.yml "$@"
-ansible-playbook playbooks/test_inventory_cache.yml "$@"
-
-# remove inventory cache
-rm -r aws_rds_cache_dir/
-
-# generate inventory config with constructed features and test using it
-ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.j2'" "$@"
-ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@"
-
-# cleanup inventory config
-ansible-playbook playbooks/empty_inventory_config.yml "$@"
diff --git a/test/integration/targets/inventory_aws_rds/templates/inventory.j2 b/test/integration/targets/inventory_aws_rds/templates/inventory.j2
deleted file mode 100644
index 3d9df9affc..0000000000
--- a/test/integration/targets/inventory_aws_rds/templates/inventory.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-plugin: aws_rds
-aws_access_key_id: '{{ aws_access_key }}'
-aws_secret_access_key: '{{ aws_secret_key }}'
-{% if security_token | default(false) %}
-aws_security_token: '{{ security_token }}'
-{% endif %}
-regions:
- - '{{ aws_region }}'
-filters:
- db-instance-id: "{{ resource_prefix }}-mariadb"
diff --git a/test/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2 b/test/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2
deleted file mode 100644
index ba227e3082..0000000000
--- a/test/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-plugin: aws_rds
-cache: True
-cache_plugin: jsonfile
-cache_connection: aws_rds_cache_dir
-aws_access_key_id: '{{ aws_access_key }}'
-aws_secret_access_key: '{{ aws_secret_key }}'
-{% if security_token | default(false) %}
-aws_security_token: '{{ security_token }}'
-{% endif %}
-regions:
- - '{{ aws_region }}'
-filters:
- db-instance-id: "{{ resource_prefix }}-mariadb"
diff --git a/test/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2 b/test/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2
deleted file mode 100644
index 7239497478..0000000000
--- a/test/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-plugin: aws_rds
-aws_access_key_id: '{{ aws_access_key }}'
-aws_secret_access_key: '{{ aws_secret_key }}'
-{% if security_token | default(false) %}
-aws_security_token: '{{ security_token }}'
-{% endif %}
-regions:
- - '{{ aws_region }}'
-keyed_groups:
- - key: 'db_parameter_groups|json_query("[].db_parameter_group_name")'
- prefix: rds_parameter_group
- - key: tags
- prefix: tag
- - key: engine
- prefix: rds
-filters:
- db-instance-id: "{{ resource_prefix }}-mariadb"
diff --git a/test/integration/targets/inventory_aws_rds/test.aws_rds.yml b/test/integration/targets/inventory_aws_rds/test.aws_rds.yml
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/inventory_aws_rds/test.aws_rds.yml
+++ /dev/null
diff --git a/test/integration/targets/s3_bucket/aliases b/test/integration/targets/s3_bucket/aliases
deleted file mode 100644
index a112c3d1bb..0000000000
--- a/test/integration/targets/s3_bucket/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group1
diff --git a/test/integration/targets/s3_bucket/inventory b/test/integration/targets/s3_bucket/inventory
deleted file mode 100644
index 2968f764cf..0000000000
--- a/test/integration/targets/s3_bucket/inventory
+++ /dev/null
@@ -1,12 +0,0 @@
-[tests]
-missing
-simple
-complex
-dotted
-tags
-encryption_kms
-encryption_sse
-
-[all:vars]
-ansible_connection=local
-ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/s3_bucket/main.yml b/test/integration/targets/s3_bucket/main.yml
deleted file mode 100644
index 22fc0d64f7..0000000000
--- a/test/integration/targets/s3_bucket/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-# Beware: most of our tests here are run in parallel.
-# To add new tests you'll need to add a new host to the inventory and a matching
-# '{{ inventory_hostname }}'.yml file in roles/s3_bucket/tasks/
-
-# VPC should get cleaned up once all hosts have run
-- hosts: all
- gather_facts: no
- strategy: free
- #serial: 10
- roles:
- - s3_bucket
diff --git a/test/integration/targets/s3_bucket/meta/main.yml b/test/integration/targets/s3_bucket/meta/main.yml
deleted file mode 100644
index 38b31be072..0000000000
--- a/test/integration/targets/s3_bucket/meta/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
- - setup_remote_tmp_dir
diff --git a/test/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml b/test/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml
deleted file mode 100644
index b4fd58adfc..0000000000
--- a/test/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-bucket_name: '{{ resource_prefix }}-{{ inventory_hostname | regex_replace("_","-") }}'
diff --git a/test/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml b/test/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml
deleted file mode 100644
index 38b31be072..0000000000
--- a/test/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
- - setup_remote_tmp_dir
diff --git a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml b/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml
deleted file mode 100644
index 41a03a4a55..0000000000
--- a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml
+++ /dev/null
@@ -1,146 +0,0 @@
----
-- block:
- - name: 'Create more complex s3_bucket'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- policy: "{{ lookup('template','policy.json') }}"
- requester_pays: yes
- versioning: yes
- tags:
- example: tag1
- another: tag2
- register: output
-
- - assert:
- that:
- - output is changed
- - output.name == '{{ bucket_name }}'
- - output.requester_pays
- - output.versioning.MfaDelete == 'Disabled'
- - output.versioning.Versioning == 'Enabled'
- - output.tags.example == 'tag1'
- - output.tags.another == 'tag2'
- - output.policy.Statement[0].Action == 's3:GetObject'
- - output.policy.Statement[0].Effect == 'Allow'
- - output.policy.Statement[0].Principal == '*'
- - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ bucket_name }}/*'
- - output.policy.Statement[0].Sid == 'AddPerm'
-
- # ============================================================
-
- - name: 'Pause to help with s3 bucket eventual consistency'
- wait_for:
- timeout: 10
- delegate_to: localhost
-
- - name: 'Try to update the same complex s3_bucket'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- policy: "{{ lookup('template','policy.json') }}"
- requester_pays: yes
- versioning: yes
- tags:
- example: tag1
- another: tag2
- register: output
-
- - assert:
- that:
- - output is not changed
- - output.name == '{{ bucket_name }}'
- - output.requester_pays
- - output.versioning.MfaDelete == 'Disabled'
- - output.versioning.Versioning == 'Enabled'
- - output.tags.example == 'tag1'
- - output.tags.another == 'tag2'
- - output.policy.Statement[0].Action == 's3:GetObject'
- - output.policy.Statement[0].Effect == 'Allow'
- - output.policy.Statement[0].Principal == '*'
- - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ bucket_name }}/*'
- - output.policy.Statement[0].Sid == 'AddPerm'
-
- # ============================================================
- - name: 'Update bucket policy on complex bucket'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- policy: "{{ lookup('template','policy-updated.json') }}"
- requester_pays: yes
- versioning: yes
- tags:
- example: tag1
- another: tag2
- register: output
-
- - assert:
- that:
- - output is changed
- - output.policy.Statement[0].Action == 's3:GetObject'
- - output.policy.Statement[0].Effect == 'Deny'
- - output.policy.Statement[0].Principal == '*'
- - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ bucket_name }}/*'
- - output.policy.Statement[0].Sid == 'AddPerm'
-
- # ============================================================
-
- - name: 'Pause to help with s3 bucket eventual consistency'
- wait_for:
- timeout: 10
- delegate_to: localhost
-
- - name: Update attributes for s3_bucket
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- policy: "{{ lookup('template','policy.json') }}"
- requester_pays: no
- versioning: no
- tags:
- example: tag1-udpated
- another: tag2
- register: output
-
- - assert:
- that:
- - output is changed
- - output.name == '{{ bucket_name }}'
- - not output.requester_pays
- - output.versioning.MfaDelete == 'Disabled'
- - output.versioning.Versioning in ['Suspended', 'Disabled']
- - output.tags.example == 'tag1-udpated'
- - output.tags.another == 'tag2'
- - output.policy.Statement[0].Action == 's3:GetObject'
- - output.policy.Statement[0].Effect == 'Allow'
- - output.policy.Statement[0].Principal == '*'
- - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ bucket_name }}/*'
- - output.policy.Statement[0].Sid == 'AddPerm'
-
- - name: 'Delete complex test bucket'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- register: output
-
- - assert:
- that:
- - output is changed
-
- - name: 'Re-delete complex test bucket'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- register: output
-
- - assert:
- that:
- - output is not changed
-
- # ============================================================
- always:
- - name: 'Ensure all buckets are deleted'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml b/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml
deleted file mode 100644
index 7d4e0ae9ea..0000000000
--- a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-- block:
- - name: 'Ensure bucket_name contains a .'
- set_fact:
- bucket_name: '{{ bucket_name }}.something'
-
- # ============================================================
- #
- - name: 'Create bucket with dot in name'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- register: output
-
- - assert:
- that:
- - output is changed
- - output.name == '{{ bucket_name }}'
-
-
- # ============================================================
-
- - name: 'Pause to help with s3 bucket eventual consistency'
- wait_for:
- timeout: 10
- delegate_to: localhost
-
- - name: 'Delete s3_bucket with dot in name'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- register: output
-
- - assert:
- that:
- - output is changed
-
- - name: 'Re-delete s3_bucket with dot in name'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- register: output
-
- - assert:
- that:
- - output is not changed
-
- # ============================================================
- always:
- - name: 'Ensure all buckets are deleted'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml b/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml
deleted file mode 100644
index 869dd40236..0000000000
--- a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml
+++ /dev/null
@@ -1,88 +0,0 @@
----
-- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
-
- # ============================================================
-
- - name: 'Create a simple bucket'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- register: output
-
- - name: 'Enable aws:kms encryption with KMS master key'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- encryption: "aws:kms"
- register: output
-
- - assert:
- that:
- - output.changed
- - output.encryption
- - output.encryption.SSEAlgorithm == 'aws:kms'
-
- - name: 'Re-enable aws:kms encryption with KMS master key (idempotent)'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- encryption: "aws:kms"
- register: output
-
- - assert:
- that:
- - not output.changed
- - output.encryption
- - output.encryption.SSEAlgorithm == 'aws:kms'
-
- # ============================================================
-
- - name: Disable encryption from bucket
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- encryption: "none"
- register: output
-
- - assert:
- that:
- - output.changed
- - not output.encryption
-
- - name: Disable encryption from bucket
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- encryption: "none"
- register: output
-
- - assert:
- that:
- - output is not changed
- - not output.encryption
-
- # ============================================================
-
- - name: Delete encryption test s3 bucket
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- register: output
-
- - assert:
- that:
- - output.changed
-
- # ============================================================
- always:
- - name: Ensure all buckets are deleted
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml b/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml
deleted file mode 100644
index 699e8ae410..0000000000
--- a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml
+++ /dev/null
@@ -1,88 +0,0 @@
----
-- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
-
- # ============================================================
-
- - name: 'Create a simple bucket'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- register: output
-
- - name: 'Enable AES256 encryption'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- encryption: 'AES256'
- register: output
-
- - assert:
- that:
- - output.changed
- - output.encryption
- - output.encryption.SSEAlgorithm == 'AES256'
-
- - name: 'Re-enable AES256 encryption (idempotency)'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- encryption: 'AES256'
- register: output
-
- - assert:
- that:
- - not output.changed
- - output.encryption
- - output.encryption.SSEAlgorithm == 'AES256'
-
- # ============================================================
-
- - name: Disable encryption from bucket
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- encryption: "none"
- register: output
-
- - assert:
- that:
- - output.changed
- - not output.encryption
-
- - name: Disable encryption from bucket
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- encryption: "none"
- register: output
-
- - assert:
- that:
- - output is not changed
- - not output.encryption
-
- # ============================================================
-
- - name: Delete encryption test s3 bucket
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- register: output
-
- - assert:
- that:
- - output.changed
-
- # ============================================================
- always:
- - name: Ensure all buckets are deleted
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml b/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml
deleted file mode 100644
index 8eba03ba1a..0000000000
--- a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-# Beware: most of our tests here are run in parallel.
-# To add new tests you'll need to add a new host to the inventory and a matching
-# '{{ inventory_hostname }}'.yml file in roles/ec2_roles/tasks/
-#
-# ###############################################################################
-
-- name: "Wrap up all tests and setup AWS credentials"
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- - debug:
- msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}"
- - include_tasks: '{{ inventory_hostname }}.yml'
- - debug:
- msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}"
diff --git a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml b/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml
deleted file mode 100644
index 4d827680ee..0000000000
--- a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: 'Attempt to delete non-existent buckets'
- block:
- # ============================================================
- #
- # While in theory the 'simple' test case covers this there are
- # ways in which eventual-consistency could catch us out.
- #
- - name: 'Delete non-existstent s3_bucket (never created)'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- register: output
-
- - assert:
- that:
- - output is success
- - output is not changed
-
- # ============================================================
- always:
- - name: 'Ensure all buckets are deleted'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml b/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml
deleted file mode 100644
index 3c39c5b4cb..0000000000
--- a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- name: 'Run simple tests'
- block:
- # Note: s3_bucket doesn't support check_mode
-
- # ============================================================
- - name: 'Create a simple s3_bucket'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- register: output
-
- - assert:
- that:
- - output is success
- - output is changed
- - output.name == '{{ bucket_name }}'
- - not output.requester_pays
-
- # ============================================================
- - name: 'Try to update the simple bucket with the same values'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- register: output
-
- - assert:
- that:
- - output is success
- - output is not changed
- - output.name == '{{ bucket_name }}'
- - not output.requester_pays
-
- # ============================================================
- - name: 'Delete the simple s3_bucket'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- register: output
-
- - assert:
- that:
- - output is success
- - output is changed
-
- # ============================================================
- - name: 'Re-delete the simple s3_bucket (idemoptency)'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- register: output
-
- - assert:
- that:
- - output is success
- - output is not changed
-
- # ============================================================
- always:
- - name: 'Ensure all buckets are deleted'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml b/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml
deleted file mode 100644
index 437dd2ca5f..0000000000
--- a/test/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml
+++ /dev/null
@@ -1,256 +0,0 @@
----
-- name: 'Run tagging tests'
- block:
-
- # ============================================================
- - name: 'Create simple s3_bucket for testing tagging'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- register: output
-
- - assert:
- that:
- - output.changed
- - output.name == '{{ bucket_name }}'
-
- # ============================================================
-
- - name: 'Add tags to s3 bucket'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- tags:
- example: tag1
- another: tag2
- register: output
-
- - assert:
- that:
- - output.changed
- - output.name == '{{ bucket_name }}'
- - output.tags.example == 'tag1'
- - output.tags.another == 'tag2'
-
- - name: 'Re-Add tags to s3 bucket'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- tags:
- example: tag1
- another: tag2
- register: output
-
- - assert:
- that:
- - output is not changed
- - output.name == '{{ bucket_name }}'
- - output.tags.example == 'tag1'
- - output.tags.another == 'tag2'
-
- # ============================================================
-
- - name: Remove a tag from an s3_bucket
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- tags:
- example: tag1
- register: output
-
- - assert:
- that:
- - output.changed
- - output.name == '{{ bucket_name }}'
- - output.tags.example == 'tag1'
- - "'another' not in output.tags"
-
- - name: Re-remove the tag from an s3_bucket
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- tags:
- example: tag1
- register: output
-
- - assert:
- that:
- - output is not changed
- - output.name == '{{ bucket_name }}'
- - output.tags.example == 'tag1'
- - "'another' not in output.tags"
-
- ## ============================================================
-
- #- name: 'Pause to help with s3 bucket eventual consistency'
- # wait_for:
- # timeout: 10
- # delegate_to: localhost
-
- ## ============================================================
-
- - name: 'Add a tag for s3_bucket with purge_tags False'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- purge_tags: no
- tags:
- anewtag: here
- register: output
-
- - assert:
- that:
- - output.changed
- - output.name == '{{ bucket_name }}'
- - output.tags.example == 'tag1'
- - output.tags.anewtag == 'here'
-
- - name: 'Re-add a tag for s3_bucket with purge_tags False'
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- purge_tags: no
- tags:
- anewtag: here
- register: output
-
- - assert:
- that:
- - output is not changed
- - output.name == '{{ bucket_name }}'
- - output.tags.example == 'tag1'
- - output.tags.anewtag == 'here'
-
- ## ============================================================
-
- #- name: 'Pause to help with s3 bucket eventual consistency'
- # wait_for:
- # timeout: 10
- # delegate_to: localhost
-
- ## ============================================================
-
- - name: Update a tag for s3_bucket with purge_tags False
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- purge_tags: no
- tags:
- anewtag: next
- register: output
-
- - assert:
- that:
- - output.changed
- - output.name == '{{ bucket_name }}'
- - output.tags.example == 'tag1'
- - output.tags.anewtag == 'next'
-
- - name: Re-update a tag for s3_bucket with purge_tags False
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- purge_tags: no
- tags:
- anewtag: next
- register: output
-
- - assert:
- that:
- - output is not changed
- - output.name == '{{ bucket_name }}'
- - output.tags.example == 'tag1'
- - output.tags.anewtag == 'next'
-
- ## ============================================================
-
- #- name: 'Pause to help with s3 bucket eventual consistency'
- # wait_for:
- # timeout: 10
- # delegate_to: localhost
-
- ## ============================================================
-
- - name: Pass empty tags dict for s3_bucket with purge_tags False
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- purge_tags: no
- tags: {}
- register: output
-
- - assert:
- that:
- - output is not changed
- - output.name == '{{ bucket_name }}'
- - output.tags.example == 'tag1'
- - output.tags.anewtag == 'next'
-
- ## ============================================================
-
- #- name: 'Pause to help with s3 bucket eventual consistency'
- # wait_for:
- # timeout: 10
- # delegate_to: localhost
-
- ## ============================================================
-
- - name: Do not specify any tag to ensure previous tags are not removed
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- register: output
-
- - assert:
- that:
- - not output.changed
- - output.name == '{{ bucket_name }}'
- - output.tags.example == 'tag1'
-
- # ============================================================
-
- - name: Remove all tags
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- tags: {}
- register: output
-
- - assert:
- that:
- - output.changed
- - output.name == '{{ bucket_name }}'
- - output.tags == {}
-
- - name: Re-remove all tags
- s3_bucket:
- name: '{{ bucket_name }}'
- state: present
- tags: {}
- register: output
-
- - assert:
- that:
- - output is not changed
- - output.name == '{{ bucket_name }}'
- - output.tags == {}
-
- # ============================================================
-
- - name: Delete bucket
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- register: output
-
- - assert:
- that:
- - output.changed
-
- # ============================================================
- always:
- - name: Ensure all buckets are deleted
- s3_bucket:
- name: '{{ bucket_name }}'
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json b/test/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json
deleted file mode 100644
index 5775c5eb2c..0000000000
--- a/test/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "Version":"2012-10-17",
- "Statement":[
- {
- "Sid":"AddPerm",
- "Effect":"Deny",
- "Principal": "*",
- "Action":["s3:GetObject"],
- "Resource":["arn:aws:s3:::{{bucket_name}}/*"]
- }
- ]
-}
diff --git a/test/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json b/test/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json
deleted file mode 100644
index a2720aed60..0000000000
--- a/test/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "Version":"2012-10-17",
- "Statement":[
- {
- "Sid":"AddPerm",
- "Effect":"Allow",
- "Principal": "*",
- "Action":["s3:GetObject"],
- "Resource":["arn:aws:s3:::{{bucket_name}}/*"]
- }
- ]
-}
diff --git a/test/integration/targets/s3_bucket/runme.sh b/test/integration/targets/s3_bucket/runme.sh
deleted file mode 100755
index aa324772bb..0000000000
--- a/test/integration/targets/s3_bucket/runme.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-#
-# Beware: most of our tests here are run in parallel.
-# To add new tests you'll need to add a new host to the inventory and a matching
-# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
-
-
-set -eux
-
-export ANSIBLE_ROLES_PATH=../
-
-ansible-playbook main.yml -i inventory "$@"
diff --git a/test/sanity/ignore.txt b/test/sanity/ignore.txt
index e6bf7868ed..3448786ddd 100644
--- a/test/sanity/ignore.txt
+++ b/test/sanity/ignore.txt
@@ -234,35 +234,6 @@ lib/ansible/module_utils/urls.py replace-urlopen
lib/ansible/module_utils/yumdnf.py future-import-boilerplate
lib/ansible/module_utils/yumdnf.py metaclass-boilerplate
lib/ansible/modules/cloud/amazon/aws_netapp_cvs_FileSystems.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_s3.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_s3.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/cloudformation.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/cloudformation.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_ami.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_ami.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_ami_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_ami_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_elb_lb.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_eni.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_eni.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_group.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_group.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_snapshot_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_snapshot_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_tag.py validate-modules:parameter-state-invalid-choice
-lib/ansible/modules/cloud/amazon/ec2_vol.py validate-modules:parameter-state-invalid-choice
-lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_net.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_net.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_net_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_net_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_info.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/azure/azure_rm_acs.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/azure/azure_rm_acs.py validate-modules:parameter-type-not-in-doc
lib/ansible/modules/cloud/azure/azure_rm_acs.py validate-modules:required_if-requirements-unknown
@@ -4013,12 +3984,6 @@ lib/ansible/plugins/action/vyos.py action-plugin-docs # base class for deprecate
lib/ansible/plugins/cache/base.py ansible-doc!skip # not a plugin, but a stub for backwards compatibility
lib/ansible/plugins/doc_fragments/asa.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/asa.py metaclass-boilerplate
-lib/ansible/plugins/doc_fragments/aws.py future-import-boilerplate
-lib/ansible/plugins/doc_fragments/aws.py metaclass-boilerplate
-lib/ansible/plugins/doc_fragments/aws_credentials.py future-import-boilerplate
-lib/ansible/plugins/doc_fragments/aws_credentials.py metaclass-boilerplate
-lib/ansible/plugins/doc_fragments/aws_region.py future-import-boilerplate
-lib/ansible/plugins/doc_fragments/aws_region.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/azure.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/azure.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/azure_tags.py future-import-boilerplate
@@ -4037,8 +4002,6 @@ lib/ansible/plugins/doc_fragments/dellos6.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/dellos6.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/dellos9.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/dellos9.py metaclass-boilerplate
-lib/ansible/plugins/doc_fragments/ec2.py future-import-boilerplate
-lib/ansible/plugins/doc_fragments/ec2.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/eos.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/eos.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/f5.py future-import-boilerplate
@@ -4314,7 +4277,6 @@ test/units/mock/path.py future-import-boilerplate
test/units/mock/path.py metaclass-boilerplate
test/units/mock/yaml_helper.py future-import-boilerplate
test/units/mock/yaml_helper.py metaclass-boilerplate
-test/units/module_utils/aws/test_aws_module.py metaclass-boilerplate
test/units/module_utils/basic/test__symbolic_mode_to_octal.py future-import-boilerplate
test/units/module_utils/basic/test_deprecate_warn.py future-import-boilerplate
test/units/module_utils/basic/test_deprecate_warn.py metaclass-boilerplate
diff --git a/test/units/module_utils/aws/test_aws_module.py b/test/units/module_utils/aws/test_aws_module.py
deleted file mode 100644
index 425282cc54..0000000000
--- a/test/units/module_utils/aws/test_aws_module.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2017, Michael De La Rue
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-from pytest import importorskip
-import unittest
-from ansible.module_utils import basic
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils._text import to_bytes
-from units.compat.mock import Mock, patch
-import json
-
-importorskip("boto3")
-botocore = importorskip("botocore")
-
-
-class AWSModuleTestCase(unittest.TestCase):
-
- basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {'_ansible_tmpdir': '/tmp/ansible-abc'}}))
-
- def test_create_aws_module_should_set_up_params(self):
- m = AnsibleAWSModule(argument_spec=dict(
- win_string_arg=dict(type='list', default=['win'])
- ))
- m_noretry_no_customargs = AnsibleAWSModule(
- auto_retry=False, default_args=False,
- argument_spec=dict(
- success_string_arg=dict(type='list', default=['success'])
- )
- )
- assert m, "module wasn't true!!"
- assert m_noretry_no_customargs, "module wasn't true!!"
-
- m_params = m.params
- m_no_defs_params = m_noretry_no_customargs.params
- assert 'region' in m_params
- assert 'win' in m_params["win_string_arg"]
- assert 'success' in m_no_defs_params["success_string_arg"]
- assert 'aws_secret_key' not in m_no_defs_params
-
-
-class ErrorReportingTestcase(unittest.TestCase):
-
- def test_botocore_exception_reports_nicely_via_fail_json_aws(self):
-
- basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {'_ansible_tmpdir': '/tmp/ansible-abc'}}))
- module = AnsibleAWSModule(argument_spec=dict(
- fail_mode=dict(type='list', default=['success'])
- ))
-
- fail_json_double = Mock()
- err_msg = {'Error': {'Code': 'FakeClass.FakeError'}}
- with patch.object(basic.AnsibleModule, 'fail_json', fail_json_double):
- try:
- raise botocore.exceptions.ClientError(err_msg, 'Could not find you')
- except Exception as e:
- print("exception is " + str(e))
- module.fail_json_aws(e, msg="Fake failure for testing boto exception messages")
-
- assert(len(fail_json_double.mock_calls) >
- 0), "failed to call fail_json when should have"
- assert(len(fail_json_double.mock_calls) <
- 2), "called fail_json multiple times when once would do"
- assert("test_botocore_exception_reports_nicely"
- in fail_json_double.mock_calls[0][2]["exception"]), \
- "exception traceback doesn't include correct function, fail call was actually: " \
- + str(fail_json_double.mock_calls[0])
-
- assert("Fake failure for testing boto exception messages:"
- in fail_json_double.mock_calls[0][2]["msg"]), \
- "error message doesn't include the local message; was: " \
- + str(fail_json_double.mock_calls[0])
- assert("Could not find you" in fail_json_double.mock_calls[0][2]["msg"]), \
- "error message doesn't include the botocore exception message; was: " \
- + str(fail_json_double.mock_calls[0])
- try:
- fail_json_double.mock_calls[0][2]["error"]
- except KeyError:
- raise Exception("error was missing; call was: " + str(fail_json_double.mock_calls[0]))
- assert("FakeClass.FakeError" == fail_json_double.mock_calls[0][2]["error"]["code"]), \
- "Failed to find error/code; was: " + str(fail_json_double.mock_calls[0])
-
- def test_botocore_exception_without_response_reports_nicely_via_fail_json_aws(self):
- basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {'_ansible_tmpdir': '/tmp/ansible-abc'}}))
- module = AnsibleAWSModule(argument_spec=dict(
- fail_mode=dict(type='list', default=['success'])
- ))
-
- fail_json_double = Mock()
- err_msg = None
- with patch.object(basic.AnsibleModule, 'fail_json', fail_json_double):
- try:
- raise botocore.exceptions.ClientError(err_msg, 'Could not find you')
- except Exception as e:
- print("exception is " + str(e))
- module.fail_json_aws(e, msg="Fake failure for testing boto exception messages")
-
- assert(len(fail_json_double.mock_calls) > 0), "failed to call fail_json when should have"
- assert(len(fail_json_double.mock_calls) < 2), "called fail_json multiple times"
-
- assert("test_botocore_exception_without_response_reports_nicely_via_fail_json_aws"
- in fail_json_double.mock_calls[0][2]["exception"]), \
- "exception traceback doesn't include correct function, fail call was actually: " \
- + str(fail_json_double.mock_calls[0])
-
- assert("Fake failure for testing boto exception messages"
- in fail_json_double.mock_calls[0][2]["msg"]), \
- "error message doesn't include the local message; was: " \
- + str(fail_json_double.mock_calls[0])
-
- # I would have thought this should work, however the botocore exception comes back with
- # "argument of type 'NoneType' is not iterable" so it's probably not really designed
- # to handle "None" as an error response.
- #
- # assert("Could not find you" in fail_json_double.mock_calls[0][2]["msg"]), \
- # "error message doesn't include the botocore exception message; was: " \
- # + str(fail_json_double.mock_calls[0])
-
-
-# TODO:
-# - an exception without a message
-# - plain boto exception
-# - socket errors and other standard things.
diff --git a/test/units/module_utils/ec2/test_aws.py b/test/units/module_utils/ec2/test_aws.py
deleted file mode 100644
index 7c66442264..0000000000
--- a/test/units/module_utils/ec2/test_aws.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2015, Allen Sanabria <asanabria@linuxdynasty.org>
-#
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-try:
- import boto3
- import botocore
- HAS_BOTO3 = True
-except Exception:
- HAS_BOTO3 = False
-
-import pytest
-
-from units.compat import unittest
-from ansible.module_utils.ec2 import AWSRetry
-
-if not HAS_BOTO3:
- pytestmark = pytest.mark.skip("test_aws.py requires the python modules 'boto3' and 'botocore'")
-
-
-class RetryTestCase(unittest.TestCase):
-
- def test_no_failures(self):
- self.counter = 0
-
- @AWSRetry.backoff(tries=2, delay=0.1)
- def no_failures():
- self.counter += 1
-
- r = no_failures()
- self.assertEqual(self.counter, 1)
-
- def test_extend_boto3_failures(self):
- self.counter = 0
- err_msg = {'Error': {'Code': 'MalformedPolicyDocument'}}
-
- @AWSRetry.backoff(tries=2, delay=0.1, catch_extra_error_codes=['MalformedPolicyDocument'])
- def extend_failures():
- self.counter += 1
- if self.counter < 2:
- raise botocore.exceptions.ClientError(err_msg, 'You did something wrong.')
- else:
- return 'success'
-
- r = extend_failures()
- self.assertEqual(r, 'success')
- self.assertEqual(self.counter, 2)
-
- def test_retry_once(self):
- self.counter = 0
- err_msg = {'Error': {'Code': 'InternalFailure'}}
-
- @AWSRetry.backoff(tries=2, delay=0.1)
- def retry_once():
- self.counter += 1
- if self.counter < 2:
- raise botocore.exceptions.ClientError(err_msg, 'Something went wrong!')
- else:
- return 'success'
-
- r = retry_once()
- self.assertEqual(r, 'success')
- self.assertEqual(self.counter, 2)
-
- def test_reached_limit(self):
- self.counter = 0
- err_msg = {'Error': {'Code': 'RequestLimitExceeded'}}
-
- @AWSRetry.backoff(tries=4, delay=0.1)
- def fail():
- self.counter += 1
- raise botocore.exceptions.ClientError(err_msg, 'toooo fast!!')
-
- # with self.assertRaises(botocore.exceptions.ClientError):
- try:
- fail()
- except Exception as e:
- self.assertEqual(e.response['Error']['Code'], 'RequestLimitExceeded')
- self.assertEqual(self.counter, 4)
-
- def test_unexpected_exception_does_not_retry(self):
- self.counter = 0
- err_msg = {'Error': {'Code': 'AuthFailure'}}
-
- @AWSRetry.backoff(tries=4, delay=0.1)
- def raise_unexpected_error():
- self.counter += 1
- raise botocore.exceptions.ClientError(err_msg, 'unexpected error')
-
- # with self.assertRaises(botocore.exceptions.ClientError):
- try:
- raise_unexpected_error()
- except Exception as e:
- self.assertEqual(e.response['Error']['Code'], 'AuthFailure')
-
- self.assertEqual(self.counter, 1)
diff --git a/test/units/module_utils/test_ec2.py b/test/units/module_utils/test_ec2.py
deleted file mode 100644
index dc748276e2..0000000000
--- a/test/units/module_utils/test_ec2.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# (c) 2017 Red Hat Inc.
-#
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import unittest
-
-from ansible.module_utils.ec2 import map_complex_type, compare_policies
-
-
-class Ec2Utils(unittest.TestCase):
-
- def setUp(self):
- # A pair of simple IAM Trust relationships using bools, the first a
- # native bool the second a quoted string
- self.bool_policy_bool = {
- 'Version': '2012-10-17',
- 'Statement': [
- {
- "Action": "sts:AssumeRole",
- "Condition": {
- "Bool": {"aws:MultiFactorAuthPresent": True}
- },
- "Effect": "Allow",
- "Principal": {"AWS": "arn:aws:iam::XXXXXXXXXXXX:root"},
- "Sid": "AssumeRoleWithBoolean"
- }
- ]
- }
-
- self.bool_policy_string = {
- 'Version': '2012-10-17',
- 'Statement': [
- {
- "Action": "sts:AssumeRole",
- "Condition": {
- "Bool": {"aws:MultiFactorAuthPresent": "true"}
- },
- "Effect": "Allow",
- "Principal": {"AWS": "arn:aws:iam::XXXXXXXXXXXX:root"},
- "Sid": "AssumeRoleWithBoolean"
- }
- ]
- }
-
- # A pair of simple bucket policies using numbers, the first a
- # native int the second a quoted string
- self.numeric_policy_number = {
- 'Version': '2012-10-17',
- 'Statement': [
- {
- "Action": "s3:ListBucket",
- "Condition": {
- "NumericLessThanEquals": {"s3:max-keys": 15}
- },
- "Effect": "Allow",
- "Resource": "arn:aws:s3:::examplebucket",
- "Sid": "s3ListBucketWithNumericLimit"
- }
- ]
- }
-
- self.numeric_policy_string = {
- 'Version': '2012-10-17',
- 'Statement': [
- {
- "Action": "s3:ListBucket",
- "Condition": {
- "NumericLessThanEquals": {"s3:max-keys": "15"}
- },
- "Effect": "Allow",
- "Resource": "arn:aws:s3:::examplebucket",
- "Sid": "s3ListBucketWithNumericLimit"
- }
- ]
- }
-
- self.small_policy_one = {
- 'Version': '2012-10-17',
- 'Statement': [
- {
- 'Action': 's3:PutObjectAcl',
- 'Sid': 'AddCannedAcl2',
- 'Resource': 'arn:aws:s3:::test_policy/*',
- 'Effect': 'Allow',
- 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
- }
- ]
- }
-
- # The same as small_policy_one, except the single resource is in a list and the contents of Statement are jumbled
- self.small_policy_two = {
- 'Version': '2012-10-17',
- 'Statement': [
- {
- 'Effect': 'Allow',
- 'Action': 's3:PutObjectAcl',
- 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']},
- 'Resource': ['arn:aws:s3:::test_policy/*'],
- 'Sid': 'AddCannedAcl2'
- }
- ]
- }
-
- self.larger_policy_one = {
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "Test",
- "Effect": "Allow",
- "Principal": {
- "AWS": [
- "arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
- "arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
- ]
- },
- "Action": "s3:PutObjectAcl",
- "Resource": "arn:aws:s3:::test_policy/*"
- },
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": "arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
- },
- "Action": [
- "s3:PutObject",
- "s3:PutObjectAcl"
- ],
- "Resource": "arn:aws:s3:::test_policy/*"
- }
- ]
- }
-
- # The same as larger_policy_one, except having a list of length 1 and jumbled contents
- self.larger_policy_two = {
- "Version": "2012-10-17",
- "Statement": [
- {
- "Principal": {
- "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"]
- },
- "Effect": "Allow",
- "Resource": "arn:aws:s3:::test_policy/*",
- "Action": [
- "s3:PutObject",
- "s3:PutObjectAcl"
- ]
- },
- {
- "Action": "s3:PutObjectAcl",
- "Principal": {
- "AWS": [
- "arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
- "arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
- ]
- },
- "Sid": "Test",
- "Resource": "arn:aws:s3:::test_policy/*",
- "Effect": "Allow"
- }
- ]
- }
-
- # Different than larger_policy_two: a different principal is given
- self.larger_policy_three = {
- "Version": "2012-10-17",
- "Statement": [
- {
- "Principal": {
- "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"]
- },
- "Effect": "Allow",
- "Resource": "arn:aws:s3:::test_policy/*",
- "Action": [
- "s3:PutObject",
- "s3:PutObjectAcl"]
- },
- {
- "Action": "s3:PutObjectAcl",
- "Principal": {
- "AWS": [
- "arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
- "arn:aws:iam::XXXXXXXXXXXX:user/testuser3"
- ]
- },
- "Sid": "Test",
- "Resource": "arn:aws:s3:::test_policy/*",
- "Effect": "Allow"
- }
- ]
- }
-
- def test_map_complex_type_over_dict(self):
- complex_type = {'minimum_healthy_percent': "75", 'maximum_percent': "150"}
- type_map = {'minimum_healthy_percent': 'int', 'maximum_percent': 'int'}
- complex_type_mapped = map_complex_type(complex_type, type_map)
- complex_type_expected = {'minimum_healthy_percent': 75, 'maximum_percent': 150}
- self.assertEqual(complex_type_mapped, complex_type_expected)
-
- def test_compare_small_policies_without_differences(self):
- """ Testing two small policies which are identical except for:
- * The contents of the statement are in different orders
- * The second policy contains a list of length one whereas in the first it is a string
- """
- self.assertFalse(compare_policies(self.small_policy_one, self.small_policy_two))
-
- def test_compare_large_policies_without_differences(self):
- """ Testing two larger policies which are identical except for:
- * The statements are in different orders
- * The contents of the statements are also in different orders
- * The second contains a list of length one for the Principal whereas in the first it is a string
- """
- self.assertFalse(compare_policies(self.larger_policy_one, self.larger_policy_two))
-
- def test_compare_larger_policies_with_difference(self):
- """ Testing two larger policies which are identical except for:
- * one different principal
- """
- self.assertTrue(compare_policies(self.larger_policy_two, self.larger_policy_three))
-
- def test_compare_smaller_policy_with_larger(self):
- """ Testing two policies of different sizes """
- self.assertTrue(compare_policies(self.larger_policy_one, self.small_policy_one))
-
- def test_compare_boolean_policy_bool_and_string_are_equal(self):
- """ Testing two policies one using a quoted boolean, the other a bool """
- self.assertFalse(compare_policies(self.bool_policy_string, self.bool_policy_bool))
-
- def test_compare_numeric_policy_number_and_string_are_equal(self):
- """ Testing two policies one using a quoted number, the other an int """
- self.assertFalse(compare_policies(self.numeric_policy_string, self.numeric_policy_number))
diff --git a/test/units/modules/cloud/amazon/test_aws_s3.py b/test/units/modules/cloud/amazon/test_aws_s3.py
deleted file mode 100644
index a752c67fcb..0000000000
--- a/test/units/modules/cloud/amazon/test_aws_s3.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-
-import unittest
-
-try:
- import ansible.modules.cloud.amazon.aws_s3 as s3
-except ImportError:
- pytestmark = pytest.mark.skip("This test requires the s3 Python libraries")
-
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-
-boto3 = pytest.importorskip("boto3")
-
-
-class TestUrlparse(unittest.TestCase):
-
- def test_urlparse(self):
- actual = urlparse("http://test.com/here")
- self.assertEqual("http", actual.scheme)
- self.assertEqual("test.com", actual.netloc)
- self.assertEqual("/here", actual.path)
-
- def test_is_fakes3(self):
- actual = s3.is_fakes3("fakes3://bla.blubb")
- self.assertEqual(True, actual)
-
- def test_get_s3_connection(self):
- aws_connect_kwargs = dict(aws_access_key_id="access_key",
- aws_secret_access_key="secret_key")
- location = None
- rgw = True
- s3_url = "http://bla.blubb"
- actual = s3.get_s3_connection(None, aws_connect_kwargs, location, rgw, s3_url)
- self.assertEqual(bool("bla.blubb" in str(actual._endpoint)), True)
diff --git a/test/units/modules/cloud/amazon/test_cloudformation.py b/test/units/modules/cloud/amazon/test_cloudformation.py
deleted file mode 100644
index fe99a8510a..0000000000
--- a/test/units/modules/cloud/amazon/test_cloudformation.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# (c) 2017 Red Hat Inc.
-#
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-
-from units.utils.amazon_placebo_fixtures import placeboify, maybe_sleep
-from ansible.modules.cloud.amazon import cloudformation as cfn_module
-
-basic_yaml_tpl = """
----
-AWSTemplateFormatVersion: '2010-09-09'
-Description: 'Basic template that creates an S3 bucket'
-Resources:
- MyBucket:
- Type: "AWS::S3::Bucket"
-Outputs:
- TheName:
- Value:
- !Ref MyBucket
-"""
-
-bad_json_tpl = """{
- "AWSTemplateFormatVersion": "2010-09-09",
- "Description": "Broken template, no comma here ->"
- "Resources": {
- "MyBucket": {
- "Type": "AWS::S3::Bucket"
- }
- }
-}"""
-
-failing_yaml_tpl = """
----
-AWSTemplateFormatVersion: 2010-09-09
-Resources:
- ECRRepo:
- Type: AWS::ECR::Repository
- Properties:
- RepositoryPolicyText:
- Version: 3000-10-17 # <--- invalid version
- Statement:
- - Effect: Allow
- Action:
- - 'ecr:*'
- Principal:
- AWS: !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:root
-"""
-
-default_events_limit = 10
-
-
-class FakeModule(object):
- def __init__(self, **kwargs):
- self.params = kwargs
-
- def fail_json(self, *args, **kwargs):
- self.exit_args = args
- self.exit_kwargs = kwargs
- raise Exception('FAIL')
-
- def exit_json(self, *args, **kwargs):
- self.exit_args = args
- self.exit_kwargs = kwargs
- raise Exception('EXIT')
-
-
-def test_invalid_template_json(placeboify):
- connection = placeboify.client('cloudformation')
- params = {
- 'StackName': 'ansible-test-wrong-json',
- 'TemplateBody': bad_json_tpl,
- }
- m = FakeModule(disable_rollback=False)
- with pytest.raises(Exception) as exc_info:
- cfn_module.create_stack(m, params, connection, default_events_limit)
- pytest.fail('Expected malformed JSON to have caused the call to fail')
-
- assert exc_info.match('FAIL')
- assert "ValidationError" in m.exit_kwargs['msg']
-
-
-def test_client_request_token_s3_stack(maybe_sleep, placeboify):
- connection = placeboify.client('cloudformation')
- params = {
- 'StackName': 'ansible-test-client-request-token-yaml',
- 'TemplateBody': basic_yaml_tpl,
- 'ClientRequestToken': '3faf3fb5-b289-41fc-b940-44151828f6cf',
- }
- m = FakeModule(disable_rollback=False)
- result = cfn_module.create_stack(m, params, connection, default_events_limit)
- assert result['changed']
- assert len(result['events']) > 1
- # require that the final recorded stack state was CREATE_COMPLETE
- # events are retrieved newest-first, so 0 is the latest
- assert 'CREATE_COMPLETE' in result['events'][0]
- connection.delete_stack(StackName='ansible-test-client-request-token-yaml')
-
-
-def test_basic_s3_stack(maybe_sleep, placeboify):
- connection = placeboify.client('cloudformation')
- params = {
- 'StackName': 'ansible-test-basic-yaml',
- 'TemplateBody': basic_yaml_tpl
- }
- m = FakeModule(disable_rollback=False)
- result = cfn_module.create_stack(m, params, connection, default_events_limit)
- assert result['changed']
- assert len(result['events']) > 1
- # require that the final recorded stack state was CREATE_COMPLETE
- # events are retrieved newest-first, so 0 is the latest
- assert 'CREATE_COMPLETE' in result['events'][0]
- connection.delete_stack(StackName='ansible-test-basic-yaml')
-
-
-def test_delete_nonexistent_stack(maybe_sleep, placeboify):
- connection = placeboify.client('cloudformation')
- result = cfn_module.stack_operation(connection, 'ansible-test-nonexist', 'DELETE', default_events_limit)
- assert result['changed']
- assert 'Stack does not exist.' in result['log']
-
-
-def test_get_nonexistent_stack(placeboify):
- connection = placeboify.client('cloudformation')
- assert cfn_module.get_stack_facts(connection, 'ansible-test-nonexist') is None
-
-
-def test_missing_template_body():
- m = FakeModule()
- with pytest.raises(Exception) as exc_info:
- cfn_module.create_stack(
- module=m,
- stack_params={},
- cfn=None,
- events_limit=default_events_limit
- )
- pytest.fail('Expected module to have failed with no template')
-
- assert exc_info.match('FAIL')
- assert not m.exit_args
- assert "Either 'template', 'template_body' or 'template_url' is required when the stack does not exist." == m.exit_kwargs['msg']
-
-
-def test_on_create_failure_delete(maybe_sleep, placeboify):
- m = FakeModule(
- on_create_failure='DELETE',
- disable_rollback=False,
- )
- connection = placeboify.client('cloudformation')
- params = {
- 'StackName': 'ansible-test-on-create-failure-delete',
- 'TemplateBody': failing_yaml_tpl
- }
- result = cfn_module.create_stack(m, params, connection, default_events_limit)
- assert result['changed']
- assert result['failed']
- assert len(result['events']) > 1
- # require that the final recorded stack state was DELETE_COMPLETE
- # events are retrieved newest-first, so 0 is the latest
- assert 'DELETE_COMPLETE' in result['events'][0]
-
-
-def test_on_create_failure_rollback(maybe_sleep, placeboify):
- m = FakeModule(
- on_create_failure='ROLLBACK',
- disable_rollback=False,
- )
- connection = placeboify.client('cloudformation')
- params = {
- 'StackName': 'ansible-test-on-create-failure-rollback',
- 'TemplateBody': failing_yaml_tpl
- }
- result = cfn_module.create_stack(m, params, connection, default_events_limit)
- assert result['changed']
- assert result['failed']
- assert len(result['events']) > 1
- # require that the final recorded stack state was ROLLBACK_COMPLETE
- # events are retrieved newest-first, so 0 is the latest
- assert 'ROLLBACK_COMPLETE' in result['events'][0]
- connection.delete_stack(StackName=params['StackName'])
-
-
-def test_on_create_failure_do_nothing(maybe_sleep, placeboify):
- m = FakeModule(
- on_create_failure='DO_NOTHING',
- disable_rollback=False,
- )
- connection = placeboify.client('cloudformation')
- params = {
- 'StackName': 'ansible-test-on-create-failure-do-nothing',
- 'TemplateBody': failing_yaml_tpl
- }
- result = cfn_module.create_stack(m, params, connection, default_events_limit)
- assert result['changed']
- assert result['failed']
- assert len(result['events']) > 1
- # require that the final recorded stack state was CREATE_FAILED
- # events are retrieved newest-first, so 0 is the latest
- assert 'CREATE_FAILED' in result['events'][0]
- connection.delete_stack(StackName=params['StackName'])
diff --git a/test/units/modules/cloud/amazon/test_ec2_group.py b/test/units/modules/cloud/amazon/test_ec2_group.py
deleted file mode 100644
index 14f597f69d..0000000000
--- a/test/units/modules/cloud/amazon/test_ec2_group.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.modules.cloud.amazon import ec2_group as group_module
-
-
-def test_from_permission():
- internal_http = {
- u'FromPort': 80,
- u'IpProtocol': 'tcp',
- u'IpRanges': [
- {
- u'CidrIp': '10.0.0.0/8',
- u'Description': 'Foo Bar Baz'
- },
- ],
- u'Ipv6Ranges': [
- {u'CidrIpv6': 'fe80::94cc:8aff:fef6:9cc/64'},
- ],
- u'PrefixListIds': [],
- u'ToPort': 80,
- u'UserIdGroupPairs': [],
- }
- perms = list(group_module.rule_from_group_permission(internal_http))
- assert len(perms) == 2
- assert perms[0].target == '10.0.0.0/8'
- assert perms[0].target_type == 'ipv4'
- assert perms[0].description == 'Foo Bar Baz'
- assert perms[1].target == 'fe80::94cc:8aff:fef6:9cc/64'
-
- global_egress = {
- 'IpProtocol': '-1',
- 'IpRanges': [{'CidrIp': '0.0.0.0/0'}],
- 'Ipv6Ranges': [],
- 'PrefixListIds': [],
- 'UserIdGroupPairs': []
- }
- perms = list(group_module.rule_from_group_permission(global_egress))
- assert len(perms) == 1
- assert perms[0].target == '0.0.0.0/0'
- assert perms[0].port_range == (None, None)
-
- internal_prefix_http = {
- u'FromPort': 80,
- u'IpProtocol': 'tcp',
- u'PrefixListIds': [
- {'PrefixListId': 'p-1234'}
- ],
- u'ToPort': 80,
- u'UserIdGroupPairs': [],
- }
- perms = list(group_module.rule_from_group_permission(internal_prefix_http))
- assert len(perms) == 1
- assert perms[0].target == 'p-1234'
-
-
-def test_rule_to_permission():
- tests = [
- group_module.Rule((22, 22), 'udp', 'sg-1234567890', 'group', None),
- group_module.Rule((1, 65535), 'tcp', '0.0.0.0/0', 'ipv4', "All TCP from everywhere"),
- group_module.Rule((443, 443), 'tcp', 'ip-123456', 'ip_prefix', "Traffic to privatelink IPs"),
- group_module.Rule((443, 443), 'tcp', 'feed:dead:::beef/64', 'ipv6', None),
- ]
- for test in tests:
- perm = group_module.to_permission(test)
- assert perm['FromPort'], perm['ToPort'] == test.port_range
- assert perm['IpProtocol'] == test.protocol
-
-
-def test_validate_ip():
- class Warner(object):
- def warn(self, msg):
- return
- ips = [
- ('1.1.1.1/24', '1.1.1.0/24'),
- ('192.168.56.101/16', '192.168.0.0/16'),
- # Don't modify IPv6 CIDRs, AWS supports /128 and device ranges
- ('1203:8fe0:fe80:b897:8990:8a7c:99bf:323d/128', '1203:8fe0:fe80:b897:8990:8a7c:99bf:323d/128'),
- ]
-
- for ip, net in ips:
- assert group_module.validate_ip(Warner(), ip) == net
diff --git a/test/units/plugins/inventory/test_aws_ec2.py b/test/units/plugins/inventory/test_aws_ec2.py
deleted file mode 100644
index 06137a3ec2..0000000000
--- a/test/units/plugins/inventory/test_aws_ec2.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2017 Sloane Hertel <shertel@redhat.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-import datetime
-
-# Just to test that we have the prerequisite for InventoryModule and instance_data_filter_to_boto_attr
-boto3 = pytest.importorskip('boto3')
-botocore = pytest.importorskip('botocore')
-
-from ansible.errors import AnsibleError
-from ansible.plugins.inventory.aws_ec2 import InventoryModule, instance_data_filter_to_boto_attr
-
-instances = {
- u'Instances': [
- {u'Monitoring': {u'State': 'disabled'},
- u'PublicDnsName': 'ec2-12-345-67-890.compute-1.amazonaws.com',
- u'State': {u'Code': 16, u'Name': 'running'},
- u'EbsOptimized': False,
- u'LaunchTime': datetime.datetime(2017, 10, 31, 12, 59, 25),
- u'PublicIpAddress': '12.345.67.890',
- u'PrivateIpAddress': '098.76.54.321',
- u'ProductCodes': [],
- u'VpcId': 'vpc-12345678',
- u'StateTransitionReason': '',
- u'InstanceId': 'i-00000000000000000',
- u'EnaSupport': True,
- u'ImageId': 'ami-12345678',
- u'PrivateDnsName': 'ip-098-76-54-321.ec2.internal',
- u'KeyName': 'testkey',
- u'SecurityGroups': [{u'GroupName': 'default', u'GroupId': 'sg-12345678'}],
- u'ClientToken': '',
- u'SubnetId': 'subnet-12345678',
- u'InstanceType': 't2.micro',
- u'NetworkInterfaces': [
- {u'Status': 'in-use',
- u'MacAddress': '12:a0:50:42:3d:a4',
- u'SourceDestCheck': True,
- u'VpcId': 'vpc-12345678',
- u'Description': '',
- u'NetworkInterfaceId': 'eni-12345678',
- u'PrivateIpAddresses': [
- {u'PrivateDnsName': 'ip-098-76-54-321.ec2.internal',
- u'PrivateIpAddress': '098.76.54.321',
- u'Primary': True,
- u'Association':
- {u'PublicIp': '12.345.67.890',
- u'PublicDnsName': 'ec2-12-345-67-890.compute-1.amazonaws.com',
- u'IpOwnerId': 'amazon'}}],
- u'PrivateDnsName': 'ip-098-76-54-321.ec2.internal',
- u'Attachment':
- {u'Status': 'attached',
- u'DeviceIndex': 0,
- u'DeleteOnTermination': True,
- u'AttachmentId': 'eni-attach-12345678',
- u'AttachTime': datetime.datetime(2017, 10, 31, 12, 59, 25)},
- u'Groups': [
- {u'GroupName': 'default',
- u'GroupId': 'sg-12345678'}],
- u'Ipv6Addresses': [],
- u'OwnerId': '123456789000',
- u'PrivateIpAddress': '098.76.54.321',
- u'SubnetId': 'subnet-12345678',
- u'Association':
- {u'PublicIp': '12.345.67.890',
- u'PublicDnsName': 'ec2-12-345-67-890.compute-1.amazonaws.com',
- u'IpOwnerId': 'amazon'}}],
- u'SourceDestCheck': True,
- u'Placement':
- {u'Tenancy': 'default',
- u'GroupName': '',
- u'AvailabilityZone': 'us-east-1c'},
- u'Hypervisor': 'xen',
- u'BlockDeviceMappings': [
- {u'DeviceName': '/dev/xvda',
- u'Ebs':
- {u'Status': 'attached',
- u'DeleteOnTermination': True,
- u'VolumeId': 'vol-01234567890000000',
- u'AttachTime': datetime.datetime(2017, 10, 31, 12, 59, 26)}}],
- u'Architecture': 'x86_64',
- u'RootDeviceType': 'ebs',
- u'RootDeviceName': '/dev/xvda',
- u'VirtualizationType': 'hvm',
- u'Tags': [{u'Value': 'test', u'Key': 'ansible'}, {u'Value': 'aws_ec2', u'Key': 'name'}],
- u'AmiLaunchIndex': 0}],
- u'ReservationId': 'r-01234567890000000',
- u'Groups': [],
- u'OwnerId': '123456789000'
-}
-
-
-@pytest.fixture(scope="module")
-def inventory():
- return InventoryModule()
-
-
-def test_compile_values(inventory):
- found_value = instances['Instances'][0]
- chain_of_keys = instance_data_filter_to_boto_attr['instance.group-id']
- for attr in chain_of_keys:
- found_value = inventory._compile_values(found_value, attr)
- assert found_value == "sg-12345678"
-
-
-def test_get_boto_attr_chain(inventory):
- instance = instances['Instances'][0]
- assert inventory._get_boto_attr_chain('network-interface.addresses.private-ip-address', instance) == "098.76.54.321"
-
-
-def test_boto3_conn(inventory):
- inventory._options = {"aws_profile": "first_precedence",
- "aws_access_key": "test_access_key",
- "aws_secret_key": "test_secret_key",
- "aws_security_token": "test_security_token",
- "iam_role_arn": None}
- inventory._set_credentials()
- with pytest.raises(AnsibleError) as error_message:
- for connection, region in inventory._boto3_conn(regions=['us-east-1']):
- assert "Insufficient credentials found" in error_message
-
-
-def test_get_hostname_default(inventory):
- instance = instances['Instances'][0]
- assert inventory._get_hostname(instance, hostnames=None) == "ec2-12-345-67-890.compute-1.amazonaws.com"
-
-
-def test_get_hostname(inventory):
- hostnames = ['ip-address', 'dns-name']
- instance = instances['Instances'][0]
- assert inventory._get_hostname(instance, hostnames) == "12.345.67.890"
-
-
-def test_set_credentials(inventory):
- inventory._options = {'aws_access_key': 'test_access_key',
- 'aws_secret_key': 'test_secret_key',
- 'aws_security_token': 'test_security_token',
- 'aws_profile': 'test_profile',
- 'iam_role_arn': 'arn:aws:iam::112233445566:role/test-role'}
- inventory._set_credentials()
-
- assert inventory.boto_profile == "test_profile"
- assert inventory.aws_access_key_id == "test_access_key"
- assert inventory.aws_secret_access_key == "test_secret_key"
- assert inventory.aws_security_token == "test_security_token"
- assert inventory.iam_role_arn == "arn:aws:iam::112233445566:role/test-role"
-
-
-def test_insufficient_credentials(inventory):
- inventory._options = {
- 'aws_access_key': None,
- 'aws_secret_key': None,
- 'aws_security_token': None,
- 'aws_profile': None,
- 'iam_role_arn': None
- }
- with pytest.raises(AnsibleError) as error_message:
- inventory._set_credentials()
- assert "Insufficient credentials found" in error_message
-
-
-def test_verify_file_bad_config(inventory):
- assert inventory.verify_file('not_aws_config.yml') is False
diff --git a/test/units/plugins/lookup/fixtures/avi.json b/test/units/plugins/lookup/fixtures/avi.json
deleted file mode 100644
index ae89ca689c..0000000000
--- a/test/units/plugins/lookup/fixtures/avi.json
+++ /dev/null
@@ -1,104 +0,0 @@
-{
- "mock_single_obj": {
- "_last_modified": "",
- "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "dhcp_enabled": true,
- "exclude_discovered_subnets": false,
- "name": "PG-123",
- "synced_from_se": true,
- "tenant_ref": "https://192.0.2.132/api/tenant/admin",
- "url": "https://192.0.2.132/api/network/dvportgroup-2084-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "uuid": "dvportgroup-2084-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vcenter_dvs": true,
- "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-2084-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
- },
- "mock_multiple_obj": {
- "results": [
- {
- "_last_modified": "",
- "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "dhcp_enabled": true,
- "exclude_discovered_subnets": false,
- "name": "J-PG-0682",
- "synced_from_se": true,
- "tenant_ref": "https://192.0.2.132/api/tenant/admin",
- "url": "https://192.0.2.132/api/network/dvportgroup-2084-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "uuid": "dvportgroup-2084-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vcenter_dvs": true,
- "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-2084-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
- },
- {
- "_last_modified": "",
- "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "dhcp_enabled": true,
- "exclude_discovered_subnets": false,
- "name": "J-PG-0231",
- "synced_from_se": true,
- "tenant_ref": "https://192.0.2.132/api/tenant/admin",
- "url": "https://192.0.2.132/api/network/dvportgroup-1627-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "uuid": "dvportgroup-1627-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vcenter_dvs": true,
- "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-1627-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
- },
- {
- "_last_modified": "",
- "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "dhcp_enabled": true,
- "exclude_discovered_subnets": false,
- "name": "J-PG-0535",
- "synced_from_se": true,
- "tenant_ref": "https://192.0.2.132/api/tenant/admin",
- "url": "https://192.0.2.132/api/network/dvportgroup-1934-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "uuid": "dvportgroup-1934-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vcenter_dvs": true,
- "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-1934-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
- },
- {
- "_last_modified": "",
- "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "dhcp_enabled": true,
- "exclude_discovered_subnets": false,
- "name": "J-PG-0094",
- "synced_from_se": true,
- "tenant_ref": "https://192.0.2.132/api/tenant/admin",
- "url": "https://192.0.2.132/api/network/dvportgroup-1458-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "uuid": "dvportgroup-1458-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vcenter_dvs": true,
- "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-1458-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
- },
- {
- "_last_modified": "",
- "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "dhcp_enabled": true,
- "exclude_discovered_subnets": false,
- "name": "J-PG-0437",
- "synced_from_se": true,
- "tenant_ref": "https://192.0.2.132/api/tenant/admin",
- "url": "https://192.0.2.132/api/network/dvportgroup-1836-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "uuid": "dvportgroup-1836-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vcenter_dvs": true,
- "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-1836-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
- },
- {
- "_last_modified": "",
- "cloud_ref": "https://192.0.2.132/api/cloud/cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "dhcp_enabled": true,
- "exclude_discovered_subnets": false,
- "name": "J-PG-0673",
- "synced_from_se": true,
- "tenant_ref": "https://192.0.2.132/api/tenant/admin",
- "url": "https://192.0.2.132/api/network/dvportgroup-2075-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "uuid": "dvportgroup-2075-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vcenter_dvs": true,
- "vimgrnw_ref": "https://192.0.2.132/api/vimgrnwruntime/dvportgroup-2075-cloud-4d063be1-99c2-44cf-8b28-977bd970524c",
- "vrf_context_ref": "https://192.0.2.132/api/vrfcontext/vrfcontext-31f1b55f-319c-44eb-862f-69d79ffdf295"
- }
- ]
- }
-}
diff --git a/test/units/plugins/lookup/test_aws_secret.py b/test/units/plugins/lookup/test_aws_secret.py
deleted file mode 100644
index ae7734501c..0000000000
--- a/test/units/plugins/lookup/test_aws_secret.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# (c) 2019 Robert Williams
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-import datetime
-
-from ansible.errors import AnsibleError
-
-from ansible.plugins.loader import lookup_loader
-
-try:
- import boto3
- from botocore.exceptions import ClientError
-except ImportError:
- pytestmark = pytest.mark.skip("This test requires the boto3 and botocore Python libraries")
-
-
-@pytest.fixture
-def dummy_credentials():
- dummy_credentials = {}
- dummy_credentials['boto_profile'] = None
- dummy_credentials['aws_secret_key'] = "notasecret"
- dummy_credentials['aws_access_key'] = "notakey"
- dummy_credentials['aws_security_token'] = None
- dummy_credentials['region'] = 'eu-west-1'
- return dummy_credentials
-
-
-def test_lookup_variable(mocker, dummy_credentials):
- dateutil_tz = pytest.importorskip("dateutil.tz")
- simple_variable_success_response = {
- 'Name': 'secret',
- 'VersionId': 'cafe8168-e6ce-4e59-8830-5b143faf6c52',
- 'SecretString': '{"secret":"simplesecret"}',
- 'VersionStages': ['AWSCURRENT'],
- 'CreatedDate': datetime.datetime(2019, 4, 4, 11, 41, 0, 878000, tzinfo=dateutil_tz.tzlocal()),
- 'ResponseMetadata': {
- 'RequestId': '21099462-597c-490a-800f-8b7a41e5151c',
- 'HTTPStatusCode': 200,
- 'HTTPHeaders': {
- 'date': 'Thu, 04 Apr 2019 10:43:12 GMT',
- 'content-type': 'application/x-amz-json-1.1',
- 'content-length': '252',
- 'connection': 'keep-alive',
- 'x-amzn-requestid': '21099462-597c-490a-800f-8b7a41e5151c'
- },
- 'RetryAttempts': 0
- }
- }
- lookup = lookup_loader.get('aws_secret')
- boto3_double = mocker.MagicMock()
- boto3_double.Session.return_value.client.return_value.get_secret_value.return_value = simple_variable_success_response
- boto3_client_double = boto3_double.Session.return_value.client
-
- mocker.patch.object(boto3, 'session', boto3_double)
- retval = lookup.run(["simple_variable"], None, **dummy_credentials)
- assert(retval[0] == '{"secret":"simplesecret"}')
- boto3_client_double.assert_called_with('secretsmanager', 'eu-west-1', aws_access_key_id='notakey',
- aws_secret_access_key="notasecret", aws_session_token=None)
-
-
-error_response = {'Error': {'Code': 'ResourceNotFoundException', 'Message': 'Fake Testing Error'}}
-operation_name = 'FakeOperation'
-
-
-def test_warn_denied_variable(mocker, dummy_credentials):
- boto3_double = mocker.MagicMock()
- boto3_double.Session.return_value.client.return_value.get_secret_value.side_effect = ClientError(error_response, operation_name)
-
- with pytest.raises(AnsibleError):
- mocker.patch.object(boto3, 'session', boto3_double)
- lookup_loader.get('aws_secret').run(["denied_variable"], None, **dummy_credentials)
diff --git a/test/units/plugins/lookup/test_aws_ssm.py b/test/units/plugins/lookup/test_aws_ssm.py
deleted file mode 100644
index 811ccfb489..0000000000
--- a/test/units/plugins/lookup/test_aws_ssm.py
+++ /dev/null
@@ -1,166 +0,0 @@
-#
-# (c) 2017 Michael De La Rue
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-from copy import copy
-
-from ansible.errors import AnsibleError
-
-from ansible.plugins.lookup import aws_ssm
-
-try:
- import boto3
- from botocore.exceptions import ClientError
-except ImportError:
- pytestmark = pytest.mark.skip("This test requires the boto3 and botocore Python libraries")
-
-simple_variable_success_response = {
- 'Parameters': [
- {
- 'Name': 'simple_variable',
- 'Type': 'String',
- 'Value': 'simplevalue',
- 'Version': 1
- }
- ],
- 'InvalidParameters': [],
- 'ResponseMetadata': {
- 'RequestId': '12121212-3434-5656-7878-9a9a9a9a9a9a',
- 'HTTPStatusCode': 200,
- 'HTTPHeaders': {
- 'x-amzn-requestid': '12121212-3434-5656-7878-9a9a9a9a9a9a',
- 'content-type': 'application/x-amz-json-1.1',
- 'content-length': '116',
- 'date': 'Tue, 23 Jan 2018 11:04:27 GMT'
- },
- 'RetryAttempts': 0
- }
-}
-
-path_success_response = copy(simple_variable_success_response)
-path_success_response['Parameters'] = [
- {'Name': '/testpath/too', 'Type': 'String', 'Value': 'simple_value_too', 'Version': 1},
- {'Name': '/testpath/won', 'Type': 'String', 'Value': 'simple_value_won', 'Version': 1}
-]
-
-missing_variable_response = copy(simple_variable_success_response)
-missing_variable_response['Parameters'] = []
-missing_variable_response['InvalidParameters'] = ['missing_variable']
-
-some_missing_variable_response = copy(simple_variable_success_response)
-some_missing_variable_response['Parameters'] = [
- {'Name': 'simple', 'Type': 'String', 'Value': 'simple_value', 'Version': 1},
- {'Name': '/testpath/won', 'Type': 'String', 'Value': 'simple_value_won', 'Version': 1}
-]
-some_missing_variable_response['InvalidParameters'] = ['missing_variable']
-
-
-dummy_credentials = {}
-dummy_credentials['boto_profile'] = None
-dummy_credentials['aws_secret_key'] = "notasecret"
-dummy_credentials['aws_access_key'] = "notakey"
-dummy_credentials['aws_security_token'] = None
-dummy_credentials['region'] = 'eu-west-1'
-
-
-def test_lookup_variable(mocker):
- lookup = aws_ssm.LookupModule()
- lookup._load_name = "aws_ssm"
-
- boto3_double = mocker.MagicMock()
- boto3_double.Session.return_value.client.return_value.get_parameters.return_value = simple_variable_success_response
- boto3_client_double = boto3_double.Session.return_value.client
-
- mocker.patch.object(boto3, 'session', boto3_double)
- retval = lookup.run(["simple_variable"], {}, **dummy_credentials)
- assert(retval[0] == "simplevalue")
- boto3_client_double.assert_called_with('ssm', 'eu-west-1', aws_access_key_id='notakey',
- aws_secret_access_key="notasecret", aws_session_token=None)
-
-
-def test_path_lookup_variable(mocker):
- lookup = aws_ssm.LookupModule()
- lookup._load_name = "aws_ssm"
-
- boto3_double = mocker.MagicMock()
- get_path_fn = boto3_double.Session.return_value.client.return_value.get_parameters_by_path
- get_path_fn.return_value = path_success_response
- boto3_client_double = boto3_double.Session.return_value.client
-
- mocker.patch.object(boto3, 'session', boto3_double)
- args = copy(dummy_credentials)
- args["bypath"] = 'true'
- retval = lookup.run(["/testpath"], {}, **args)
- assert(retval[0]["/testpath/won"] == "simple_value_won")
- assert(retval[0]["/testpath/too"] == "simple_value_too")
- boto3_client_double.assert_called_with('ssm', 'eu-west-1', aws_access_key_id='notakey',
- aws_secret_access_key="notasecret", aws_session_token=None)
- get_path_fn.assert_called_with(Path="/testpath", Recursive=False, WithDecryption=True)
-
-
-def test_return_none_for_missing_variable(mocker):
- """
- during jinja2 templates, we can't shouldn't normally raise exceptions since this blocks the ability to use defaults.
-
- for this reason we return ```None``` for missing variables
- """
- lookup = aws_ssm.LookupModule()
- lookup._load_name = "aws_ssm"
-
- boto3_double = mocker.MagicMock()
- boto3_double.Session.return_value.client.return_value.get_parameters.return_value = missing_variable_response
-
- mocker.patch.object(boto3, 'session', boto3_double)
- retval = lookup.run(["missing_variable"], {}, **dummy_credentials)
- assert(retval[0] is None)
-
-
-def test_match_retvals_to_call_params_even_with_some_missing_variables(mocker):
- """
- If we get a complex list of variables with some missing and some not, we still have to return a
- list which matches with the original variable list.
- """
- lookup = aws_ssm.LookupModule()
- lookup._load_name = "aws_ssm"
-
- boto3_double = mocker.MagicMock()
- boto3_double.Session.return_value.client.return_value.get_parameters.return_value = some_missing_variable_response
-
- mocker.patch.object(boto3, 'session', boto3_double)
- retval = lookup.run(["simple", "missing_variable", "/testpath/won", "simple"], {}, **dummy_credentials)
- assert(retval == ["simple_value", None, "simple_value_won", "simple_value"])
-
-
-error_response = {'Error': {'Code': 'ResourceNotFoundException', 'Message': 'Fake Testing Error'}}
-operation_name = 'FakeOperation'
-
-
-def test_warn_denied_variable(mocker):
- lookup = aws_ssm.LookupModule()
- lookup._load_name = "aws_ssm"
-
- boto3_double = mocker.MagicMock()
- boto3_double.Session.return_value.client.return_value.get_parameters.side_effect = ClientError(error_response, operation_name)
-
- with pytest.raises(AnsibleError):
- mocker.patch.object(boto3, 'session', boto3_double)
- lookup.run(["denied_variable"], {}, **dummy_credentials)