summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSloane Hertel <shertel@redhat.com>2018-01-08 11:21:49 -0500
committerRyan Brown <sb@ryansb.com>2018-01-08 11:21:49 -0500
commit788010d0f0b6523cab8ab3dae6a5350ccc746cb5 (patch)
treee61f40e8fdf2ce63db5fd7bfa3ad91c40e82d83d
parent46263d77f145ffd46a0ff05ce61c5b96026c02f8 (diff)
downloadansible-788010d0f0b6523cab8ab3dae6a5350ccc746cb5.tar.gz
[cloud][testing] New integration tests for ec2_asg (#30554)
* Add some integration tests for ec2_asg. * Remove exception handling from ec2_asg boto3 connection since it is handled in boto3_conn(). * Update test failure assertions * Use yaml anchor for credentials and remove unnecessary dependencies. * Move AWS boto3 module credentials tests to separate target * Remove filters from tests
-rw-r--r--lib/ansible/module_utils/ec2.py5
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_asg.py16
-rw-r--r--test/integration/targets/boto3_conn/aliases1
-rw-r--r--test/integration/targets/boto3_conn/tasks/main.yml50
-rw-r--r--test/integration/targets/ec2_asg/aliases1
-rw-r--r--test/integration/targets/ec2_asg/defaults/main.yml6
-rw-r--r--test/integration/targets/ec2_asg/meta/main.yml2
-rw-r--r--test/integration/targets/ec2_asg/tasks/main.yml440
-rw-r--r--test/integration/targets/ec2_asg/vars/main.yml0
9 files changed, 510 insertions, 11 deletions
diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py
index e90a17d01b..1fe742173f 100644
--- a/lib/ansible/module_utils/ec2.py
+++ b/lib/ansible/module_utils/ec2.py
@@ -242,7 +242,10 @@ def get_aws_connection_info(module, boto3=False):
module.fail_json(msg="boto is required for this module. Please install boto and try again")
elif HAS_BOTO3:
# here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
- region = botocore.session.Session(profile=profile_name).get_config_variable('region')
+ try:
+ region = botocore.session.Session(profile=profile_name).get_config_variable('region')
+ except botocore.exceptions.ProfileNotFound as e:
+ pass
else:
module.fail_json(msg="Boto3 is required for this module. Please install boto3 and try again")
diff --git a/lib/ansible/modules/cloud/amazon/ec2_asg.py b/lib/ansible/modules/cloud/amazon/ec2_asg.py
index ec871d2593..31a7bd1669 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_asg.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_asg.py
@@ -1416,16 +1416,12 @@ def main():
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
- try:
- connection = boto3_conn(module,
- conn_type='client',
- resource='autoscaling',
- region=region,
- endpoint=ec2_url,
- **aws_connect_params)
- except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e:
- module.fail_json(msg="Can't authorize connection. Check your credentials and profile.",
- exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+ connection = boto3_conn(module,
+ conn_type='client',
+ resource='autoscaling',
+ region=region,
+ endpoint=ec2_url,
+ **aws_connect_params)
changed = create_changed = replace_changed = False
if state == 'present':
diff --git a/test/integration/targets/boto3_conn/aliases b/test/integration/targets/boto3_conn/aliases
new file mode 100644
index 0000000000..4ef4b2067d
--- /dev/null
+++ b/test/integration/targets/boto3_conn/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/test/integration/targets/boto3_conn/tasks/main.yml b/test/integration/targets/boto3_conn/tasks/main.yml
new file mode 100644
index 0000000000..d53973cedb
--- /dev/null
+++ b/test/integration/targets/boto3_conn/tasks/main.yml
@@ -0,0 +1,50 @@
+---
+# tasks file for test_ec2_asg
+
+- name: Test incomplete credentials with ec2_asg
+
+ block:
+
+ # ============================================================
+
+ - name: test invalid profile
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ region: "{{ aws_region }}"
+ profile: notavalidprofile
+ ignore_errors: yes
+ register: result
+
+ - name:
+ assert:
+ that:
+ - "'The config profile (notavalidprofile) could not be found' in result.msg"
+
+ - name: test partial credentials
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ region: "{{ aws_region }}"
+ aws_access_key: "{{ aws_access_key }}"
+ ignore_errors: yes
+ register: result
+
+ - name:
+ assert:
+ that:
+ - "'Partial credentials found in explicit, missing: aws_secret_access_key' in result.msg"
+
+ - name: test without specifying region
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ ignore_errors: yes
+ register: result
+
+ - name:
+ assert:
+ that:
+ - result.msg == 'The ec2_asg module requires a region and none was found in configuration, environment variables or module parameters'
+
+ # ============================================================
diff --git a/test/integration/targets/ec2_asg/aliases b/test/integration/targets/ec2_asg/aliases
new file mode 100644
index 0000000000..4ef4b2067d
--- /dev/null
+++ b/test/integration/targets/ec2_asg/aliases
@@ -0,0 +1 @@
+cloud/aws
diff --git a/test/integration/targets/ec2_asg/defaults/main.yml b/test/integration/targets/ec2_asg/defaults/main.yml
new file mode 100644
index 0000000000..9547e36e59
--- /dev/null
+++ b/test/integration/targets/ec2_asg/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# defaults file for ec2_asg
+# Amazon Linux AMI 2017.09.0 (HVM), SSD Volume Type
+ec2_ami_image:
+ us-east-1: ami-8c1be5f6
+ us-east-2: ami-c5062ba0
diff --git a/test/integration/targets/ec2_asg/meta/main.yml b/test/integration/targets/ec2_asg/meta/main.yml
new file mode 100644
index 0000000000..79dc5650f4
--- /dev/null
+++ b/test/integration/targets/ec2_asg/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - boto3_conn
diff --git a/test/integration/targets/ec2_asg/tasks/main.yml b/test/integration/targets/ec2_asg/tasks/main.yml
new file mode 100644
index 0000000000..47f535ff23
--- /dev/null
+++ b/test/integration/targets/ec2_asg/tasks/main.yml
@@ -0,0 +1,440 @@
+---
+# tasks file for test_ec2_asg
+
+- name: Test incomplete arguments with ec2_asg
+
+ block:
+
+ # ============================================================
+
+ - name: test without specifying required module options
+ ec2_asg:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ ignore_errors: yes
+ register: result
+
+ - name: assert name is a required module option
+ assert:
+ that:
+ - "result.msg == 'missing required arguments: name'"
+
+- name: Run ec2_asg integration tests.
+
+ block:
+
+ # ============================================================
+
+ # Set up the testing dependencies: VPC, subnet, security group, and two launch configurations
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: Create VPC for use in testing
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.55.77.0/24
+ tags:
+ Name: Ansible Testing VPC
+ tenancy: default
+ <<: *aws_connection_info
+ register: testing_vpc
+
+ - name: Create internet gateway for use in testing
+ ec2_vpc_igw:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ state: present
+ <<: *aws_connection_info
+ register: igw
+
+ - name: Create subnet for use in testing
+ ec2_vpc_subnet:
+ tags: "{{ resource_prefix }}-subnet"
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.55.77.0/24
+ az: "{{ aws_region }}a"
+ resource_tags:
+ Name: Ansible Testing Subnet
+ <<: *aws_connection_info
+ register: testing_subnet
+
+ - name: create routing rules
+ ec2_vpc_route_table:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ tags:
+ created: "{{ resource_prefix }}-route"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ subnets:
+ - "{{ testing_subnet.subnet.id }}"
+ <<: *aws_connection_info
+
+ - name: create a security group with the vpc created in the ec2_setup
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ <<: *aws_connection_info
+ register: sg
+
+ - name: ensure launch configs exist
+ ec2_lc:
+ name: "{{ item }}"
+ assign_public_ip: true
+ image_id: "{{ ec2_ami_image[aws_region] }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ packages:
+ - httpd
+ runcmd:
+ - "service httpd start"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: t2.micro
+ <<: *aws_connection_info
+ with_items:
+ - "{{ resource_prefix }}-lc"
+ - "{{ resource_prefix }}-lc-2"
+
+ # ============================================================
+
+ - name: launch asg and wait for instances to be deemed healthy (no ELB)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ state: present
+ wait_for_instances: yes
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - "output.viable_instances == 1"
+
+ # - name: pause for a bit to make sure that the group can't be trivially deleted
+ # pause: seconds=30
+ - name: kill asg
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ state: absent
+ wait_timeout: 700
+ <<: *aws_connection_info
+ async: 300
+
+ # ============================================================
+
+ - name: launch asg and do not wait for instances to be deemed healthy (no ELB)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ wait_for_instances: no
+ state: present
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - "output.viable_instances == 0"
+
+ - name: kill asg
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ state: absent
+ <<: *aws_connection_info
+ async: 300
+
+ # ============================================================
+
+ - name: load balancer name has to be less than 32 characters
+ # the 8 digit identifier at the end of resource_prefix helps determine during which test something
+ # was created
+ set_fact:
+ load_balancer_name: "{{ item }}-lb"
+ with_items: "{{ resource_prefix | regex_findall('.{8}$') }}"
+
+ - name: launch load balancer
+ ec2_elb_lb:
+ name: "{{ load_balancer_name }}"
+ state: present
+ security_group_ids:
+ - "{{ sg.group_id }}"
+ subnets: "{{ testing_subnet.subnet.id }}"
+ connection_draining_timeout: 60
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ health_check:
+ ping_protocol: tcp
+ ping_port: 80
+ ping_path: "/"
+ response_timeout: 5
+ interval: 10
+ unhealthy_threshold: 4
+ healthy_threshold: 2
+ <<: *aws_connection_info
+ register: load_balancer
+
+ - name: launch asg and wait for instances to be deemed healthy (ELB)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ health_check_type: ELB
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ health_check_period: 300
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ load_balancers: "{{ load_balancer_name }}"
+ wait_for_instances: yes
+ wait_timeout: 900
+ state: present
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - "output.viable_instances == 1"
+
+ # ============================================================
+
+ # grow scaling group to 3
+
+ - name: add 2 more instances wait for instances to be deemed healthy (ELB)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ health_check_type: ELB
+ desired_capacity: 3
+ min_size: 3
+ max_size: 5
+ health_check_period: 600
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ load_balancers: "{{ load_balancer_name }}"
+ wait_for_instances: yes
+ wait_timeout: 1200
+ state: present
+ <<: *aws_connection_info
+ register: output
+
+ - assert:
+ that:
+ - "output.viable_instances == 3"
+
+ # ============================================================
+
+ # # perform rolling replace with different launch configuration
+
+ - name: perform rolling update to new AMI
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc-2"
+ health_check_type: ELB
+ desired_capacity: 3
+ min_size: 1
+ max_size: 5
+ health_check_period: 900
+ load_balancers: "{{ load_balancer_name }}"
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ wait_for_instances: yes
+ replace_all_instances: yes
+ wait_timeout: 1800
+ state: present
+ <<: *aws_connection_info
+ register: output
+
+ # ensure that all instances have new launch config
+ - assert:
+ that:
+ - "item.value.launch_config_name == '{{ resource_prefix }}-lc-2'"
+ with_dict: "{{ output.instance_facts }}"
+
+ # assert they are all healthy and that the rolling update resulted in the appropriate number of instances
+ - assert:
+ that:
+ - "output.viable_instances == 3"
+
+ # ============================================================
+
+ # perform rolling replace with the original launch configuration
+
+ - name: perform rolling update to new AMI while removing the load balancer
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ health_check_type: EC2
+ desired_capacity: 3
+ min_size: 1
+ max_size: 5
+ health_check_period: 900
+ load_balancers: []
+ vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
+ wait_for_instances: yes
+ replace_all_instances: yes
+ wait_timeout: 1800
+ state: present
+ <<: *aws_connection_info
+ register: output
+
+ # ensure that all instances have new launch config
+ - assert:
+ that:
+ - "item.value.launch_config_name == '{{ resource_prefix }}-lc'"
+ with_dict: "{{ output.instance_facts }}"
+
+ # assert they are all healthy and that the rolling update resulted in the appropriate number of instances
+ # there should be the same number of instances as there were before the rolling update was performed
+ - assert:
+ that:
+ - "output.viable_instances == 3"
+
+ # ============================================================
+
+ always:
+
+ - name: kill asg
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ state: absent
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ # Remove the testing dependencies
+
+ - name: remove the load balancer
+ ec2_elb_lb:
+ name: "{{ load_balancer_name }}"
+ state: absent
+ security_group_ids:
+ - "{{ sg.group_id }}"
+ subnets: "{{ testing_subnet.subnet.id }}"
+ wait: yes
+ connection_draining_timeout: 60
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ health_check:
+ ping_protocol: tcp
+ ping_port: 80
+ ping_path: "/"
+ response_timeout: 5
+ interval: 10
+ unhealthy_threshold: 4
+ healthy_threshold: 2
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove launch configs
+ ec2_lc:
+ name: "{{ resource_prefix }}-lc"
+ state: absent
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+ with_items:
+ - "{{ resource_prefix }}-lc"
+ - "{{ resource_prefix }}-lc-2"
+
+ - name: remove the security group
+ ec2_group:
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ state: absent
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove routing rules
+ ec2_vpc_route_table:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ tags:
+ created: "{{ resource_prefix }}-route"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ subnets:
+ - "{{ testing_subnet.subnet.id }}"
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove internet gateway
+ ec2_vpc_igw:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ state: absent
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove the subnet
+ ec2_vpc_subnet:
+ tags: "{{ resource_prefix }}-subnet"
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: 10.55.77.0/24
+ az: "{{ aws_region }}a"
+ resource_tags:
+ Name: Ansible Testing Subnet
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: remove the VPC
+ ec2_vpc_net:
+ name: "{{ resource_prefix }}-vpc"
+ cidr_block: 10.55.77.0/24
+ state: absent
+ tags:
+ Name: Ansible Testing VPC
+ tenancy: default
+ <<: *aws_connection_info
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/test/integration/targets/ec2_asg/vars/main.yml b/test/integration/targets/ec2_asg/vars/main.yml
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/integration/targets/ec2_asg/vars/main.yml