summaryrefslogtreecommitdiff
path: root/test/support/integration/plugins/modules
diff options
context:
space:
mode:
Diffstat (limited to 'test/support/integration/plugins/modules')
l---------test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_resource_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_webapp_facts.py1
-rw-r--r--test/support/integration/plugins/modules/aws_az_info.py110
-rw-r--r--test/support/integration/plugins/modules/aws_codebuild.py408
-rw-r--r--test/support/integration/plugins/modules/aws_s3.py925
-rw-r--r--test/support/integration/plugins/modules/aws_step_functions_state_machine.py232
-rw-r--r--test/support/integration/plugins/modules/aws_step_functions_state_machine_execution.py197
-rw-r--r--test/support/integration/plugins/modules/azure_rm_appserviceplan.py379
-rw-r--r--test/support/integration/plugins/modules/azure_rm_functionapp.py421
-rw-r--r--test/support/integration/plugins/modules/azure_rm_functionapp_info.py206
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py241
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py216
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py304
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py211
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py277
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py207
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbserver.py388
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py264
-rw-r--r--test/support/integration/plugins/modules/azure_rm_resource.py427
-rw-r--r--test/support/integration/plugins/modules/azure_rm_resource_info.py431
-rw-r--r--test/support/integration/plugins/modules/azure_rm_storageaccount.py684
-rw-r--r--test/support/integration/plugins/modules/azure_rm_webapp.py1070
-rw-r--r--test/support/integration/plugins/modules/azure_rm_webapp_info.py488
-rw-r--r--test/support/integration/plugins/modules/azure_rm_webappslot.py1058
-rw-r--r--test/support/integration/plugins/modules/cloudformation.py837
-rw-r--r--test/support/integration/plugins/modules/cloudformation_info.py354
-rw-r--r--test/support/integration/plugins/modules/cs_role.py211
-rw-r--r--test/support/integration/plugins/modules/cs_role_permission.py351
-rw-r--r--test/support/integration/plugins/modules/cs_service_offering.py583
-rw-r--r--test/support/integration/plugins/modules/ec2.py1766
-rw-r--r--test/support/integration/plugins/modules/ec2_ami_info.py281
-rw-r--r--test/support/integration/plugins/modules/ec2_eni.py633
-rw-r--r--test/support/integration/plugins/modules/ec2_eni_info.py275
-rw-r--r--test/support/integration/plugins/modules/ec2_group.py1345
-rw-r--r--test/support/integration/plugins/modules/ec2_instance.py1805
-rw-r--r--test/support/integration/plugins/modules/ec2_instance_info.py571
-rw-r--r--test/support/integration/plugins/modules/ec2_key.py271
-rw-r--r--test/support/integration/plugins/modules/ec2_vpc_igw.py283
-rw-r--r--test/support/integration/plugins/modules/ec2_vpc_net.py524
-rw-r--r--test/support/integration/plugins/modules/ec2_vpc_route_table.py750
-rw-r--r--test/support/integration/plugins/modules/ec2_vpc_subnet.py604
-rw-r--r--test/support/integration/plugins/modules/hcloud_server.py555
-rw-r--r--test/support/integration/plugins/modules/iam_role.py673
-rw-r--r--test/support/integration/plugins/modules/k8s.py274
-rw-r--r--test/support/integration/plugins/modules/k8s_info.py179
-rw-r--r--test/support/integration/plugins/modules/nios_txt_record.py134
-rw-r--r--test/support/integration/plugins/modules/nios_zone.py228
-rw-r--r--test/support/integration/plugins/modules/python_requirements_info.py175
-rw-r--r--test/support/integration/plugins/modules/s3_bucket.py740
-rw-r--r--test/support/integration/plugins/modules/sts_assume_role.py180
-rw-r--r--test/support/integration/plugins/modules/tower_credential_type.py174
-rw-r--r--test/support/integration/plugins/modules/tower_receive.py172
-rw-r--r--test/support/integration/plugins/modules/vmware_guest.py2914
-rw-r--r--test/support/integration/plugins/modules/vmware_guest_custom_attributes.py259
-rw-r--r--test/support/integration/plugins/modules/vmware_host_hyperthreading.py261
59 files changed, 27512 insertions, 0 deletions
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py
new file mode 120000
index 0000000000..f9993bfba7
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py
@@ -0,0 +1 @@
+azure_rm_mariadbconfiguration_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py
new file mode 120000
index 0000000000..b8293e64df
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py
@@ -0,0 +1 @@
+azure_rm_mariadbdatabase_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py
new file mode 120000
index 0000000000..4311a0c1cc
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py
@@ -0,0 +1 @@
+azure_rm_mariadbfirewallrule_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py
new file mode 120000
index 0000000000..5f76e0e932
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py
@@ -0,0 +1 @@
+azure_rm_mariadbserver_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_resource_facts.py b/test/support/integration/plugins/modules/_azure_rm_resource_facts.py
new file mode 120000
index 0000000000..710fda1074
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_resource_facts.py
@@ -0,0 +1 @@
+azure_rm_resource_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py b/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py
new file mode 120000
index 0000000000..ead87c850b
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py
@@ -0,0 +1 @@
+azure_rm_webapp_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/aws_az_info.py b/test/support/integration/plugins/modules/aws_az_info.py
new file mode 100644
index 0000000000..eccbf4d7d4
--- /dev/null
+++ b/test/support/integration/plugins/modules/aws_az_info.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'supported_by': 'community',
+ 'status': ['preview']
+}
+
+DOCUMENTATION = '''
+module: aws_az_info
+short_description: Gather information about availability zones in AWS.
+description:
+ - Gather information about availability zones in AWS.
+ - This module was called C(aws_az_facts) before Ansible 2.9. The usage did not change.
+version_added: '2.5'
+author: 'Henrique Rodrigues (@Sodki)'
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for
+ possible filters. Filter names and values are case sensitive. You can also use underscores
+ instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
+ required: false
+ default: {}
+ type: dict
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements: [botocore, boto3]
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all availability zones
+- aws_az_info:
+
+# Gather information about a single availability zone
+- aws_az_info:
+ filters:
+ zone-name: eu-west-1a
+'''
+
+RETURN = '''
+availability_zones:
+ returned: on success
+ description: >
+ Availability zones that match the provided filters. Each element consists of a dict with all the information
+ related to that available zone.
+ type: list
+ sample: "[
+ {
+ 'messages': [],
+ 'region_name': 'us-west-1',
+ 'state': 'available',
+ 'zone_name': 'us-west-1b'
+ },
+ {
+ 'messages': [],
+ 'region_name': 'us-west-1',
+ 'state': 'available',
+ 'zone_name': 'us-west-1c'
+ }
+ ]"
+'''
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ if module._name == 'aws_az_facts':
+ module.deprecate("The 'aws_az_facts' module has been renamed to 'aws_az_info'", version='2.14')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ # Replace filter key underscores with dashes, for compatibility
+ sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items())
+
+ try:
+ availability_zones = connection.describe_availability_zones(
+ Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe availability zones.")
+
+ # Turn the boto3 result into ansible_friendly_snaked_names
+ snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']]
+
+ module.exit_json(availability_zones=snaked_availability_zones)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/aws_codebuild.py b/test/support/integration/plugins/modules/aws_codebuild.py
new file mode 100644
index 0000000000..837e22e005
--- /dev/null
+++ b/test/support/integration/plugins/modules/aws_codebuild.py
@@ -0,0 +1,408 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: aws_codebuild
+short_description: Create or delete an AWS CodeBuild project
+notes:
+ - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html).
+description:
+ - Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code.
+version_added: "2.9"
+author:
+ - Stefan Horning (@stefanhorning) <horning@mediapeers.com>
+requirements: [ botocore, boto3 ]
+options:
+ name:
+ description:
+ - Name of the CodeBuild project.
+ required: true
+ type: str
+ description:
+ description:
+ - Descriptive text of the CodeBuild project.
+ type: str
+ source:
+ description:
+ - Configure service and location for the build input source.
+ required: true
+ suboptions:
+ type:
+ description:
+ - "The type of the source. Allows one of these: C(CODECOMMIT), C(CODEPIPELINE), C(GITHUB), C(S3), C(BITBUCKET), C(GITHUB_ENTERPRISE)."
+ required: true
+ type: str
+ location:
+ description:
+ - Information about the location of the source code to be built. For type CODEPIPELINE location should not be specified.
+ type: str
+ git_clone_depth:
+ description:
+ - When using git you can specify the clone depth as an integer here.
+ type: int
+ buildspec:
+ description:
+ - The build spec declaration to use for the builds in this build project. Leave empty if part of the code project.
+ type: str
+ insecure_ssl:
+ description:
+ - Enable this flag to ignore SSL warnings while connecting to the project source code.
+ type: bool
+ type: dict
+ artifacts:
+ description:
+ - Information about the build output artifacts for the build project.
+ required: true
+ suboptions:
+ type:
+ description:
+ - "The type of build output for artifacts. Can be one of the following: C(CODEPIPELINE), C(NO_ARTIFACTS), C(S3)."
+ required: true
+ location:
+ description:
+ - Information about the build output artifact location. When choosing type S3, set the bucket name here.
+ path:
+ description:
+ - Along with namespace_type and name, the pattern that AWS CodeBuild will use to name and store the output artifacts.
+ - Used for path in S3 bucket when type is C(S3).
+ namespace_type:
+ description:
+ - Along with path and name, the pattern that AWS CodeBuild will use to determine the name and location to store the output artifacts.
+ - Accepts C(BUILD_ID) and C(NONE).
+ - "See docs here: U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project)."
+ name:
+ description:
+ - Along with path and namespace_type, the pattern that AWS CodeBuild will use to name and store the output artifact.
+ packaging:
+ description:
+ - The type of build output artifact to create on S3, can be NONE for creating a folder or ZIP for a ZIP file.
+ type: dict
+ cache:
+ description:
+ - Caching params to speed up following builds.
+ suboptions:
+ type:
+ description:
+ - Cache type. Can be C(NO_CACHE) or C(S3).
+ required: true
+ location:
+ description:
+ - Caching location on S3.
+ required: true
+ type: dict
+ environment:
+ description:
+ - Information about the build environment for the build project.
+ suboptions:
+ type:
+ description:
+ - The type of build environment to use for the project. Usually C(LINUX_CONTAINER).
+ required: true
+ image:
+ description:
+ - The ID of the Docker image to use for this build project.
+ required: true
+ compute_type:
+ description:
+ - Information about the compute resources the build project will use.
+ - "Available values include: C(BUILD_GENERAL1_SMALL), C(BUILD_GENERAL1_MEDIUM), C(BUILD_GENERAL1_LARGE)."
+ required: true
+ environment_variables:
+ description:
+ - A set of environment variables to make available to builds for the build project. List of dictionaries with name and value fields.
+ - "Example: { name: 'MY_ENV_VARIABLE', value: 'test' }"
+ privileged_mode:
+ description:
+ - Enables running the Docker daemon inside a Docker container. Set to true only if the build project is be used to build Docker images.
+ type: dict
+ service_role:
+ description:
+ - The ARN of the AWS IAM role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
+ type: str
+ timeout_in_minutes:
+ description:
+ - How long CodeBuild should wait until timing out any build that has not been marked as completed.
+ default: 60
+ type: int
+ encryption_key:
+ description:
+ - The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.
+ type: str
+ tags:
+ description:
+ - A set of tags for the build project.
+ type: list
+ elements: dict
+ suboptions:
+ key:
+ description: The name of the Tag.
+ type: str
+ value:
+ description: The value of the Tag.
+ type: str
+ vpc_config:
+ description:
+ - The VPC config enables AWS CodeBuild to access resources in an Amazon VPC.
+ type: dict
+ state:
+ description:
+ - Create or remove code build project.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- aws_codebuild:
+ name: my_project
+ description: My nice little project
+ service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role"
+ source:
+ # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3
+ type: CODEPIPELINE
+ buildspec: ''
+ artifacts:
+ namespaceType: NONE
+ packaging: NONE
+ type: CODEPIPELINE
+ name: my_project
+ environment:
+ computeType: BUILD_GENERAL1_SMALL
+ privilegedMode: "true"
+ image: "aws/codebuild/docker:17.09.0"
+ type: LINUX_CONTAINER
+ environmentVariables:
+ - { name: 'PROFILE', value: 'staging' }
+ encryption_key: "arn:aws:kms:us-east-1:123123:alias/aws/s3"
+ region: us-east-1
+ state: present
+'''
+
+RETURN = '''
+project:
+ description: Returns the dictionary describing the code project configuration.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: Name of the CodeBuild project
+ returned: always
+ type: str
+ sample: my_project
+ arn:
+ description: ARN of the CodeBuild project
+ returned: always
+ type: str
+ sample: arn:aws:codebuild:us-east-1:123123123:project/vod-api-app-builder
+ description:
+ description: A description of the build project
+ returned: always
+ type: str
+ sample: My nice little project
+ source:
+ description: Information about the build input source code.
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of the repository
+ returned: always
+ type: str
+ sample: CODEPIPELINE
+ location:
+ description: Location identifier, depending on the source type.
+ returned: when configured
+ type: str
+ git_clone_depth:
+ description: The git clone depth
+ returned: when configured
+ type: int
+ build_spec:
+ description: The build spec declaration to use for the builds in this build project.
+ returned: always
+ type: str
+ auth:
+ description: Information about the authorization settings for AWS CodeBuild to access the source code to be built.
+ returned: when configured
+ type: complex
+ insecure_ssl:
+ description: True if set to ignore SSL warnings.
+ returned: when configured
+ type: bool
+ artifacts:
+ description: Information about the output of build artifacts
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of build artifact.
+ returned: always
+ type: str
+ sample: CODEPIPELINE
+ location:
+ description: Output location for build artifacts
+ returned: when configured
+ type: str
+ # and more... see http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project
+ cache:
+ description: Cache settings for the build project.
+ returned: when configured
+ type: dict
+ environment:
+ description: Environment settings for the build
+ returned: always
+ type: dict
+ service_role:
+ description: IAM role to be used during build to access other AWS services.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123123123:role/codebuild-service-role
+ timeout_in_minutes:
+ description: The timeout of a build in minutes
+ returned: always
+ type: int
+ sample: 60
+ tags:
+ description: Tags added to the project
+ returned: when configured
+ type: list
+ created:
+ description: Timestamp of the create time of the project
+ returned: always
+ type: str
+ sample: "2018-04-17T16:56:03.245000+02:00"
+'''
+
+from ansible.module_utils.aws.core import AnsibleAWSModule, get_boto3_client_method_parameters
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def create_or_update_project(client, params, module):
+ resp = {}
+ name = params['name']
+ # clean up params
+ formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None))
+ permitted_create_params = get_boto3_client_method_parameters(client, 'create_project')
+ permitted_update_params = get_boto3_client_method_parameters(client, 'update_project')
+
+ formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params)
+ formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params)
+
+ # Check if project with that name already exists and if so update existing:
+ found = describe_project(client=client, name=name, module=module)
+ changed = False
+
+ if 'name' in found:
+ found_project = found
+ resp = update_project(client=client, params=formatted_update_params, module=module)
+ updated_project = resp['project']
+
+ # Prep both dicts for sensible change comparison:
+ found_project.pop('lastModified')
+ updated_project.pop('lastModified')
+ if 'tags' not in updated_project:
+ updated_project['tags'] = []
+
+ if updated_project != found_project:
+ changed = True
+ return resp, changed
+ # Or create new project:
+ try:
+ resp = client.create_project(**formatted_create_params)
+ changed = True
+ return resp, changed
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to create CodeBuild project")
+
+
+def update_project(client, params, module):
+ name = params['name']
+
+ try:
+ resp = client.update_project(**params)
+ return resp
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update CodeBuild project")
+
+
+def delete_project(client, name, module):
+ found = describe_project(client=client, name=name, module=module)
+ changed = False
+ if 'name' in found:
+ # Mark as changed when a project with that name existed before calling delete
+ changed = True
+ try:
+ resp = client.delete_project(name=name)
+ return resp, changed
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to delete CodeBuild project")
+
+
+def describe_project(client, name, module):
+ project = {}
+ try:
+ projects = client.batch_get_projects(names=[name])['projects']
+ if len(projects) > 0:
+ project = projects[0]
+ return project
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to describe CodeBuild projects")
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(),
+ source=dict(required=True, type='dict'),
+ artifacts=dict(required=True, type='dict'),
+ cache=dict(type='dict'),
+ environment=dict(type='dict'),
+ service_role=dict(),
+ timeout_in_minutes=dict(type='int', default=60),
+ encryption_key=dict(),
+ tags=dict(type='list'),
+ vpc_config=dict(type='dict'),
+ state=dict(choices=['present', 'absent'], default='present')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ client_conn = module.client('codebuild')
+
+ state = module.params.get('state')
+ changed = False
+
+ if state == 'present':
+ project_result, changed = create_or_update_project(
+ client=client_conn,
+ params=module.params,
+ module=module)
+ elif state == 'absent':
+ project_result, changed = delete_project(client=client_conn, name=module.params['name'], module=module)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(project_result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/aws_s3.py b/test/support/integration/plugins/modules/aws_s3.py
new file mode 100644
index 0000000000..54874f05ce
--- /dev/null
+++ b/test/support/integration/plugins/modules/aws_s3.py
@@ -0,0 +1,925 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: aws_s3
+short_description: manage objects in S3.
+description:
+ - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and
+ deleting both objects and buckets, retrieving objects as files or strings and generating download links.
+ This module has a dependency on boto3 and botocore.
+notes:
+ - In 2.4, this module has been renamed from C(s3) into M(aws_s3).
+version_added: "1.1"
+options:
+ bucket:
+ description:
+ - Bucket name.
+ required: true
+ type: str
+ dest:
+ description:
+ - The destination file path when downloading an object/key with a GET operation.
+ version_added: "1.3"
+ type: path
+ encrypt:
+ description:
+ - When set for PUT mode, asks for server-side encryption.
+ default: true
+ version_added: "2.0"
+ type: bool
+ encryption_mode:
+ description:
+ - What encryption mode to use if I(encrypt=true).
+ default: AES256
+ choices:
+ - AES256
+ - aws:kms
+ version_added: "2.7"
+ type: str
+ expiry:
+ description:
+ - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation.
+ default: 600
+ aliases: ['expiration']
+ type: int
+ headers:
+ description:
+ - Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
+ version_added: "2.0"
+ type: dict
+ marker:
+ description:
+ - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
+ version_added: "2.0"
+ type: str
+ max_keys:
+ description:
+ - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
+ default: 1000
+ version_added: "2.0"
+ type: int
+ metadata:
+ description:
+ - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
+ version_added: "1.6"
+ type: dict
+ mode:
+ description:
+ - Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+),
+ getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket),
+ and delobj (delete object, Ansible 2.0+).
+ required: true
+ choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
+ type: str
+ object:
+ description:
+ - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
+ type: str
+ permission:
+ description:
+ - This option lets the user set the canned permissions on the object/bucket that are created.
+ The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or
+ C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read),
+ C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list.
+ default: ['private']
+ version_added: "2.0"
+ type: list
+ elements: str
+ prefix:
+ description:
+ - Limits the response to keys that begin with the specified prefix for list mode.
+ default: ""
+ version_added: "2.0"
+ type: str
+ version:
+ description:
+ - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
+ version_added: "2.0"
+ type: str
+ overwrite:
+ description:
+ - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
+ Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0.
+ When this is set to 'different', the md5 sum of the local file is compared with the 'ETag' of the object/key in S3.
+ The ETag may or may not be an MD5 digest of the object data. See the ETag response header here
+ U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html)
+ default: 'always'
+ aliases: ['force']
+ version_added: "1.2"
+ type: str
+ retries:
+ description:
+ - On recoverable failure, how many times to retry before actually failing.
+ default: 0
+ version_added: "2.0"
+ type: int
+ aliases: ['retry']
+ s3_url:
+ description:
+ - S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS.
+ aliases: [ S3_URL ]
+ type: str
+ dualstack:
+ description:
+ - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
+ - Requires at least botocore version 1.4.45.
+ type: bool
+ default: false
+ version_added: "2.7"
+ rgw:
+ description:
+ - Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url).
+ default: false
+ version_added: "2.2"
+ type: bool
+ src:
+ description:
+ - The source file path when performing a PUT operation.
+ version_added: "1.3"
+ type: str
+ ignore_nonexistent_bucket:
+ description:
+ - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the
+ GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying
+ I(ignore_nonexistent_bucket=true)."
+ version_added: "2.3"
+ type: bool
+ encryption_kms_key_id:
+ description:
+ - KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms)
+ version_added: "2.7"
+ type: str
+requirements: [ "boto3", "botocore" ]
+author:
+ - "Lester Wade (@lwade)"
+ - "Sloane Hertel (@s-hertel)"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+- name: Simple PUT operation
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+
+- name: Simple PUT operation in Ceph RGW S3
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ rgw: true
+ s3_url: "http://localhost:8000"
+
+- name: Simple GET operation
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Get a specific version of an object.
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ version: 48c9ee5131af7a716edc22df9772aa6f
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: PUT/upload with metadata
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
+
+- name: PUT/upload with custom headers
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ headers: 'x-amz-grant-full-control=emailAddress=owner@example.com'
+
+- name: List keys simple
+ aws_s3:
+ bucket: mybucket
+ mode: list
+
+- name: List keys all options
+ aws_s3:
+ bucket: mybucket
+ mode: list
+ prefix: /my/desired/
+ marker: /my/desired/0023.txt
+ max_keys: 472
+
+- name: Create an empty bucket
+ aws_s3:
+ bucket: mybucket
+ mode: create
+ permission: public-read
+
+- name: Create a bucket with key as directory, in the EU region
+ aws_s3:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+ region: eu-west-1
+
+- name: Delete a bucket and all contents
+ aws_s3:
+ bucket: mybucket
+ mode: delete
+
+- name: GET an object but don't download if the file checksums match. New in 2.0
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+ overwrite: different
+
+- name: Delete an object from a bucket
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ mode: delobj
+'''
+
+RETURN = '''
+msg:
+ description: Message indicating the status of the operation.
+ returned: always
+ type: str
+ sample: PUT operation complete
+url:
+ description: URL of the object.
+ returned: (for put and geturl operations)
+ type: str
+ sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=<access-key>&Expires=1506888865&Signature=<signature>
+expiry:
+ description: Number of seconds the presigned url is valid for.
+ returned: (for geturl operation)
+ type: int
+ sample: 600
+contents:
+ description: Contents of the object as string.
+ returned: (for getstr operation)
+ type: str
+ sample: "Hello, world!"
+s3_keys:
+ description: List of object keys.
+ returned: (for list operation)
+ type: list
+ elements: str
+ sample:
+ - prefix1/
+ - prefix1/key1
+ - prefix1/key2
+'''
+
+import mimetypes
+import os
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ssl import SSLError
+from ansible.module_utils.basic import to_text, to_native
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.aws.s3 import calculate_etag, HAS_MD5
+from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
+
+try:
+ import botocore
+except ImportError:
+ pass # will be detected by imported AnsibleAWSModule
+
+IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented']
+
+
+class Sigv4Required(Exception):
+ pass
+
+
+def key_check(module, s3, bucket, obj, version=None, validate=True):
+ exists = True
+ try:
+ if version:
+ s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ s3.head_object(Bucket=bucket, Key=obj)
+ except botocore.exceptions.ClientError as e:
+ # if a client error is thrown, check if it's a 404 error
+ # if it's a 404 error, then the object does not exist
+ error_code = int(e.response['Error']['Code'])
+ if error_code == 404:
+ exists = False
+ elif error_code == 403 and validate is False:
+ pass
+ else:
+ module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
+ return exists
+
+
+def etag_compare(module, local_file, s3, bucket, obj, version=None):
+ s3_etag = get_etag(s3, bucket, obj, version=version)
+ local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
+
+ return s3_etag == local_etag
+
+
+def get_etag(s3, bucket, obj, version=None):
+ if version:
+ key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ key_check = s3.head_object(Bucket=bucket, Key=obj)
+ if not key_check:
+ return None
+ return key_check['ETag']
+
+
+def bucket_check(module, s3, bucket, validate=True):
+ exists = True
+ try:
+ s3.head_bucket(Bucket=bucket)
+ except botocore.exceptions.ClientError as e:
+ # If a client error is thrown, then check that it was a 404 error.
+ # If it was a 404 error, then the bucket does not exist.
+ error_code = int(e.response['Error']['Code'])
+ if error_code == 404:
+ exists = False
+ elif error_code == 403 and validate is False:
+ pass
+ else:
+ module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
+ except botocore.exceptions.EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided")
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
+ return exists
+
+
+def create_bucket(module, s3, bucket, location=None):
+ if module.check_mode:
+ module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True)
+ configuration = {}
+ if location not in ('us-east-1', None):
+ configuration['LocationConstraint'] = location
+ try:
+ if len(configuration) > 0:
+ s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration)
+ else:
+ s3.create_bucket(Bucket=bucket)
+ if module.params.get('permission'):
+ # Wait for the bucket to exist before setting ACLs
+ s3.get_waiter('bucket_exists').wait(Bucket=bucket)
+ for acl in module.params.get('permission'):
+ s3.put_bucket_acl(ACL=acl, Bucket=bucket)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
+ module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
+ else:
+ module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
+
+ if bucket:
+ return True
+
+
+def paginated_list(s3, **pagination_params):
+ pg = s3.get_paginator('list_objects_v2')
+ for page in pg.paginate(**pagination_params):
+ yield [data['Key'] for data in page.get('Contents', [])]
+
+
+def paginated_versioned_list_with_fallback(s3, **pagination_params):
+ try:
+ versioned_pg = s3.get_paginator('list_object_versions')
+ for page in versioned_pg.paginate(**pagination_params):
+ delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])]
+ current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])]
+ yield delete_markers + current_objects
+ except botocore.exceptions.ClientError as e:
+ if to_text(e.response['Error']['Code']) in IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']:
+ for page in paginated_list(s3, **pagination_params):
+ yield [{'Key': data['Key']} for data in page]
+
+
+def list_keys(module, s3, bucket, prefix, marker, max_keys):
+ pagination_params = {'Bucket': bucket}
+ for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)):
+ pagination_params[param_name] = param_value
+ try:
+ keys = sum(paginated_list(s3, **pagination_params), [])
+ module.exit_json(msg="LIST operation complete", s3_keys=keys)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket))
+
+
+def delete_bucket(module, s3, bucket):
+ if module.check_mode:
+ module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
+ try:
+ exists = bucket_check(module, s3, bucket)
+ if exists is False:
+ return False
+ # if there are contents then we need to delete them before we can delete the bucket
+ for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket):
+ if keys:
+ s3.delete_objects(Bucket=bucket, Delete={'Objects': keys})
+ s3.delete_bucket(Bucket=bucket)
+ return True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket)
+
+
+def delete_key(module, s3, bucket, obj):
+ if module.check_mode:
+ module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
+ try:
+ s3.delete_object(Bucket=bucket, Key=obj)
+ module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj)
+
+
+def create_dirkey(module, s3, bucket, obj, encrypt):
+ if module.check_mode:
+ module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
+ try:
+ params = {'Bucket': bucket, 'Key': obj, 'Body': b''}
+ if encrypt:
+ params['ServerSideEncryption'] = module.params['encryption_mode']
+ if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
+ params['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
+
+ s3.put_object(**params)
+ for acl in module.params.get('permission'):
+ s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
+ module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning")
+ else:
+ module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
+ module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), changed=True)
+
+
+def path_check(path):
+ if os.path.exists(path):
+ return True
+ else:
+ return False
+
+
+def option_in_extra_args(option):
+ temp_option = option.replace('-', '').lower()
+
+ allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition',
+ 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage',
+ 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl',
+ 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP',
+ 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption',
+ 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey',
+ 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'}
+
+ if temp_option in allowed_extra_args:
+ return allowed_extra_args[temp_option]
+
+
+def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
+ if module.check_mode:
+ module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
+ try:
+ extra = {}
+ if encrypt:
+ extra['ServerSideEncryption'] = module.params['encryption_mode']
+ if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
+ extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
+ if metadata:
+ extra['Metadata'] = {}
+
+ # determine object metadata and extra arguments
+ for option in metadata:
+ extra_args_option = option_in_extra_args(option)
+ if extra_args_option is not None:
+ extra[extra_args_option] = metadata[option]
+ else:
+ extra['Metadata'][option] = metadata[option]
+
+ if 'ContentType' not in extra:
+ content_type = mimetypes.guess_type(src)[0]
+ if content_type is None:
+ # s3 default content type
+ content_type = 'binary/octet-stream'
+ extra['ContentType'] = content_type
+
+ s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to complete PUT operation.")
+ try:
+ for acl in module.params.get('permission'):
+ s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
+ module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
+ else:
+ module.fail_json_aws(e, msg="Unable to set object ACL")
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Unable to set object ACL")
+ try:
+ url = s3.generate_presigned_url(ClientMethod='put_object',
+ Params={'Bucket': bucket, 'Key': obj},
+ ExpiresIn=expiry)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to generate presigned URL")
+ module.exit_json(msg="PUT operation complete", url=url, changed=True)
+
+
+def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
+ if module.check_mode:
+ module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
+ # retries is the number of loops; range/xrange needs to be one
+ # more to get that count of loops.
+ try:
+ if version:
+ key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ key = s3.get_object(Bucket=bucket, Key=obj)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
+ raise Sigv4Required()
+ elif e.response['Error']['Code'] not in ("403", "404"):
+ # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
+ # user does not have the s3:GetObject permission. 404 errors are handled by download_file().
+ module.fail_json_aws(e, msg="Could not find the key %s." % obj)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Could not find the key %s." % obj)
+
+ optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {}
+ for x in range(0, retries + 1):
+ try:
+ s3.download_file(bucket, obj, dest, **optional_kwargs)
+ module.exit_json(msg="GET operation complete", changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ # actually fail on last pass through the loop.
+ if x >= retries:
+ module.fail_json_aws(e, msg="Failed while downloading %s." % obj)
+ # otherwise, try again, this may be a transient timeout.
+ except SSLError as e: # will ClientError catch SSLError?
+ # actually fail on last pass through the loop.
+ if x >= retries:
+ module.fail_json_aws(e, msg="s3 download failed")
+ # otherwise, try again, this may be a transient timeout.
+
+
+def download_s3str(module, s3, bucket, obj, version=None, validate=True):
+ if module.check_mode:
+ module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
+ try:
+ if version:
+ contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read())
+ else:
+ contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read())
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
+ raise Sigv4Required()
+ else:
+ module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
+
+
+def get_download_url(module, s3, bucket, obj, expiry, changed=True):
+ try:
+ url = s3.generate_presigned_url(ClientMethod='get_object',
+ Params={'Bucket': bucket, 'Key': obj},
+ ExpiresIn=expiry)
+ module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while getting download url.")
+
+
+def is_fakes3(s3_url):
+ """ Return True if s3_url has scheme fakes3:// """
+ if s3_url is not None:
+ return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False):
+ if s3_url and rgw: # TODO - test this
+ rgw = urlparse(s3_url)
+ params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ elif is_fakes3(s3_url):
+ fakes3 = urlparse(s3_url)
+ port = fakes3.port
+ if fakes3.scheme == 'fakes3s':
+ protocol = "https"
+ if port is None:
+ port = 443
+ else:
+ protocol = "http"
+ if port is None:
+ port = 80
+ params = dict(module=module, conn_type='client', resource='s3', region=location,
+ endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
+ use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
+ else:
+ params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms':
+ params['config'] = botocore.client.Config(signature_version='s3v4')
+ elif module.params['mode'] in ('get', 'getstr') and sig_4:
+ params['config'] = botocore.client.Config(signature_version='s3v4')
+ if module.params['dualstack']:
+ dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True})
+ if 'config' in params:
+ params['config'] = params['config'].merge(dualconf)
+ else:
+ params['config'] = dualconf
+ return boto3_conn(**params)
+
+
+def main():
+ argument_spec = dict(
+ bucket=dict(required=True),
+ dest=dict(default=None, type='path'),
+ encrypt=dict(default=True, type='bool'),
+ encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
+ expiry=dict(default=600, type='int', aliases=['expiration']),
+ headers=dict(type='dict'),
+ marker=dict(default=""),
+ max_keys=dict(default=1000, type='int'),
+ metadata=dict(type='dict'),
+ mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
+ object=dict(),
+ permission=dict(type='list', default=['private']),
+ version=dict(default=None),
+ overwrite=dict(aliases=['force'], default='always'),
+ prefix=dict(default=""),
+ retries=dict(aliases=['retry'], type='int', default=0),
+ s3_url=dict(aliases=['S3_URL']),
+ dualstack=dict(default='no', type='bool'),
+ rgw=dict(default='no', type='bool'),
+ src=dict(),
+ ignore_nonexistent_bucket=dict(default=False, type='bool'),
+ encryption_kms_key_id=dict()
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[['mode', 'put', ['src', 'object']],
+ ['mode', 'get', ['dest', 'object']],
+ ['mode', 'getstr', ['object']],
+ ['mode', 'geturl', ['object']]],
+ )
+
+ bucket = module.params.get('bucket')
+ encrypt = module.params.get('encrypt')
+ expiry = module.params.get('expiry')
+ dest = module.params.get('dest', '')
+ headers = module.params.get('headers')
+ marker = module.params.get('marker')
+ max_keys = module.params.get('max_keys')
+ metadata = module.params.get('metadata')
+ mode = module.params.get('mode')
+ obj = module.params.get('object')
+ version = module.params.get('version')
+ overwrite = module.params.get('overwrite')
+ prefix = module.params.get('prefix')
+ retries = module.params.get('retries')
+ s3_url = module.params.get('s3_url')
+ dualstack = module.params.get('dualstack')
+ rgw = module.params.get('rgw')
+ src = module.params.get('src')
+ ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')
+
+ object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
+ bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]
+
+ if overwrite not in ['always', 'never', 'different']:
+ if module.boolean(overwrite):
+ overwrite = 'always'
+ else:
+ overwrite = 'never'
+
+ if overwrite == 'different' and not HAS_MD5:
+ module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+
+ if region in ('us-east-1', '', None):
+ # default to US Standard region
+ location = 'us-east-1'
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+
+ if module.params.get('object'):
+ obj = module.params['object']
+ # If there is a top level object, do nothing - if the object starts with /
+ # remove the leading character to maintain compatibility with Ansible versions < 2.4
+ if obj.startswith('/'):
+ obj = obj[1:]
+
+ # Bucket deletion does not require obj. Prevents ambiguity with delobj.
+ if obj and mode == "delete":
+ module.fail_json(msg='Parameter obj cannot be used with mode=delete')
+
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not s3_url and 'S3_URL' in os.environ:
+ s3_url = os.environ['S3_URL']
+
+ if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
+ module.fail_json(msg='dualstack only applies to AWS S3')
+
+ if dualstack and not module.botocore_at_least('1.4.45'):
+ module.fail_json(msg='dualstack requires botocore >= 1.4.45')
+
+ # rgw requires an explicit url
+ if rgw and not s3_url:
+ module.fail_json(msg='rgw flavour requires s3_url')
+
+ # Look at s3_url and tweak connection settings
+ # if connecting to RGW, Walrus or fakes3
+ if s3_url:
+ for key in ['validate_certs', 'security_token', 'profile_name']:
+ aws_connect_kwargs.pop(key, None)
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)
+
+ validate = not ignore_nonexistent_bucket
+
+ # separate types of ACLs
+ bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
+ object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
+ error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
+ if error_acl:
+ module.fail_json(msg='Unknown permission specified: %s' % error_acl)
+
+ # First, we check to see if the bucket exists, we get "bucket" returned.
+ bucketrtn = bucket_check(module, s3, bucket, validate=validate)
+
+ if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
+ module.fail_json(msg="Source bucket cannot be found.")
+
+ if mode == 'get':
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn is False:
+ if version:
+ module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ if path_check(dest) and overwrite != 'always':
+ if overwrite == 'never':
+ module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
+ if etag_compare(module, dest, s3, bucket, obj, version=version):
+ module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
+
+ try:
+ download_s3file(module, s3, bucket, obj, dest, retries, version=version)
+ except Sigv4Required:
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
+ download_s3file(module, s3, bucket, obj, dest, retries, version=version)
+
+ if mode == 'put':
+
+ # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
+ # these were separated into the variables bucket_acl and object_acl above
+
+ if not path_check(src):
+ module.fail_json(msg="Local object for PUT does not exist")
+
+ if bucketrtn:
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ else:
+ # If the bucket doesn't exist we should create it.
+ # only use valid bucket acls for create_bucket function
+ module.params['permission'] = bucket_acl
+ create_bucket(module, s3, bucket, location)
+
+ if keyrtn and overwrite != 'always':
+ if overwrite == 'never' or etag_compare(module, src, s3, bucket, obj):
+ # Return the download URL for the existing object
+ get_download_url(module, s3, bucket, obj, expiry, changed=False)
+
+ # only use valid object acls for the upload_s3file function
+ module.params['permission'] = object_acl
+ upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
+
+ # Delete an object from a bucket, not the entire bucket
+ if mode == 'delobj':
+ if obj is None:
+ module.fail_json(msg="object parameter is required")
+ if bucket:
+ deletertn = delete_key(module, s3, bucket, obj)
+ if deletertn is True:
+ module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
+ else:
+ module.fail_json(msg="Bucket parameter is required.")
+
+ # Delete an entire bucket, including all objects in the bucket
+ if mode == 'delete':
+ if bucket:
+ deletertn = delete_bucket(module, s3, bucket)
+ if deletertn is True:
+ module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
+ else:
+ module.fail_json(msg="Bucket parameter is required.")
+
+ # Support for listing a set of keys
+ if mode == 'list':
+ exists = bucket_check(module, s3, bucket)
+
+ # If the bucket does not exist then bail out
+ if not exists:
+ module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)
+
+ list_keys(module, s3, bucket, prefix, marker, max_keys)
+
+ # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
+ # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
+ if mode == 'create':
+
+ # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
+ # these were separated above into the variables bucket_acl and object_acl
+
+ if bucket and not obj:
+ if bucketrtn:
+ module.exit_json(msg="Bucket already exists.", changed=False)
+ else:
+ # only use valid bucket acls when creating the bucket
+ module.params['permission'] = bucket_acl
+ module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
+ if bucket and obj:
+ if obj.endswith('/'):
+ dirobj = obj
+ else:
+ dirobj = obj + "/"
+ if bucketrtn:
+ if key_check(module, s3, bucket, dirobj):
+ module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
+ else:
+ # setting valid object acls for the create_dirkey function
+ module.params['permission'] = object_acl
+ create_dirkey(module, s3, bucket, dirobj, encrypt)
+ else:
+ # only use valid bucket acls for the create_bucket function
+ module.params['permission'] = bucket_acl
+ created = create_bucket(module, s3, bucket, location)
+ # only use valid object acls for the create_dirkey function
+ module.params['permission'] = object_acl
+ create_dirkey(module, s3, bucket, dirobj, encrypt)
+
+ # Support for grabbing the time-expired URL for an object in S3/Walrus.
+ if mode == 'geturl':
+ if not bucket and not obj:
+ module.fail_json(msg="Bucket and Object parameters must be set")
+
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn:
+ get_download_url(module, s3, bucket, obj, expiry)
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ if mode == 'getstr':
+ if bucket and obj:
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn:
+ try:
+ download_s3str(module, s3, bucket, obj, version=version)
+ except Sigv4Required:
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
+ download_s3str(module, s3, bucket, obj, version=version)
+ elif version is not None:
+ module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ module.exit_json(failed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/aws_step_functions_state_machine.py b/test/support/integration/plugins/modules/aws_step_functions_state_machine.py
new file mode 100644
index 0000000000..329ee4283d
--- /dev/null
+++ b/test/support/integration/plugins/modules/aws_step_functions_state_machine.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# Copyright (c) 2019, Tom De Keyser (@tdekeyser)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: aws_step_functions_state_machine
+
+short_description: Manage AWS Step Functions state machines
+
+version_added: "2.10"
+
+description:
+ - Create, update and delete state machines in AWS Step Functions.
+ - Calling the module in C(state=present) for an existing AWS Step Functions state machine
+ will attempt to update the state machine definition, IAM Role, or tags with the provided data.
+
+options:
+ name:
+ description:
+ - Name of the state machine
+ required: true
+ type: str
+ definition:
+ description:
+ - The Amazon States Language definition of the state machine. See
+ U(https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) for more
+ information on the Amazon States Language.
+ - "This parameter is required when C(state=present)."
+ type: json
+ role_arn:
+ description:
+ - The ARN of the IAM Role that will be used by the state machine for its executions.
+ - "This parameter is required when C(state=present)."
+ type: str
+ state:
+ description:
+ - Desired state for the state machine
+ default: present
+ choices: [ present, absent ]
+ type: str
+ tags:
+ description:
+ - A hash/dictionary of tags to add to the new state machine or to add/remove from an existing one.
+ type: dict
+ purge_tags:
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter.
+ If the I(tags) parameter is not set then tags will not be modified.
+ default: yes
+ type: bool
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+
+author:
+ - Tom De Keyser (@tdekeyser)
+'''
+
+EXAMPLES = '''
+# Create a new AWS Step Functions state machine
+- name: Setup HelloWorld state machine
+ aws_step_functions_state_machine:
+ name: "HelloWorldStateMachine"
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: arn:aws:iam::987654321012:role/service-role/invokeLambdaStepFunctionsRole
+ tags:
+ project: helloWorld
+
+# Update an existing state machine
+- name: Change IAM Role and tags of HelloWorld state machine
+ aws_step_functions_state_machine:
+ name: HelloWorldStateMachine
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: arn:aws:iam::987654321012:role/service-role/anotherStepFunctionsRole
+ tags:
+ otherTag: aDifferentTag
+
+# Remove the AWS Step Functions state machine
+- name: Delete HelloWorld state machine
+ aws_step_functions_state_machine:
+ name: HelloWorldStateMachine
+ state: absent
+'''
+
+RETURN = '''
+state_machine_arn:
+ description: ARN of the AWS Step Functions state machine
+ type: str
+ returned: always
+'''
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, compare_aws_tags, boto3_tag_list_to_ansible_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def manage_state_machine(state, sfn_client, module):
+ state_machine_arn = get_state_machine_arn(sfn_client, module)
+
+ if state == 'present':
+ if state_machine_arn is None:
+ create(sfn_client, module)
+ else:
+ update(state_machine_arn, sfn_client, module)
+ elif state == 'absent':
+ if state_machine_arn is not None:
+ remove(state_machine_arn, sfn_client, module)
+
+ check_mode(module, msg='State is up-to-date.')
+ module.exit_json(changed=False)
+
+
+def create(sfn_client, module):
+ check_mode(module, msg='State machine would be created.', changed=True)
+
+ tags = module.params.get('tags')
+ sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') if tags else []
+
+ state_machine = sfn_client.create_state_machine(
+ name=module.params.get('name'),
+ definition=module.params.get('definition'),
+ roleArn=module.params.get('role_arn'),
+ tags=sfn_tags
+ )
+ module.exit_json(changed=True, state_machine_arn=state_machine.get('stateMachineArn'))
+
+
+def remove(state_machine_arn, sfn_client, module):
+ check_mode(module, msg='State machine would be deleted: {0}'.format(state_machine_arn), changed=True)
+
+ sfn_client.delete_state_machine(stateMachineArn=state_machine_arn)
+ module.exit_json(changed=True, state_machine_arn=state_machine_arn)
+
+
+def update(state_machine_arn, sfn_client, module):
+ tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module)
+
+ if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove:
+ check_mode(module, msg='State machine would be updated: {0}'.format(state_machine_arn), changed=True)
+
+ sfn_client.update_state_machine(
+ stateMachineArn=state_machine_arn,
+ definition=module.params.get('definition'),
+ roleArn=module.params.get('role_arn')
+ )
+ sfn_client.untag_resource(
+ resourceArn=state_machine_arn,
+ tagKeys=tags_to_remove
+ )
+ sfn_client.tag_resource(
+ resourceArn=state_machine_arn,
+ tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name='key', tag_value_key_name='value')
+ )
+
+ module.exit_json(changed=True, state_machine_arn=state_machine_arn)
+
+
+def compare_tags(state_machine_arn, sfn_client, module):
+ new_tags = module.params.get('tags')
+ current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get('tags')
+ return compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get('purge_tags'))
+
+
+def params_changed(state_machine_arn, sfn_client, module):
+ """
+ Check whether the state machine definition or IAM Role ARN is different
+ from the existing state machine parameters.
+ """
+ current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn)
+ return current.get('definition') != module.params.get('definition') or current.get('roleArn') != module.params.get('role_arn')
+
+
+def get_state_machine_arn(sfn_client, module):
+ """
+ Finds the state machine ARN based on the name parameter. Returns None if
+ there is no state machine with this name.
+ """
+ target_name = module.params.get('name')
+ all_state_machines = sfn_client.list_state_machines(aws_retry=True).get('stateMachines')
+
+ for state_machine in all_state_machines:
+ if state_machine.get('name') == target_name:
+ return state_machine.get('stateMachineArn')
+
+
+def check_mode(module, msg='', changed=False):
+ if module.check_mode:
+ module.exit_json(changed=changed, output=msg)
+
+
+def main():
+ module_args = dict(
+ name=dict(type='str', required=True),
+ definition=dict(type='json'),
+ role_arn=dict(type='str'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ tags=dict(default=None, type='dict'),
+ purge_tags=dict(default=True, type='bool'),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=module_args,
+ required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])],
+ supports_check_mode=True
+ )
+
+ sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5))
+ state = module.params.get('state')
+
+ try:
+ manage_state_machine(state, sfn_client, module)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to manage state machine')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/aws_step_functions_state_machine_execution.py b/test/support/integration/plugins/modules/aws_step_functions_state_machine_execution.py
new file mode 100644
index 0000000000..a6e0d7182d
--- /dev/null
+++ b/test/support/integration/plugins/modules/aws_step_functions_state_machine_execution.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# Copyright (c) 2019, Prasad Katti (@prasadkatti)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: aws_step_functions_state_machine_execution
+
+short_description: Start or stop execution of an AWS Step Functions state machine.
+
+version_added: "2.10"
+
+description:
+ - Start or stop execution of a state machine in AWS Step Functions.
+
+options:
+ action:
+ description: Desired action (start or stop) for a state machine execution.
+ default: start
+ choices: [ start, stop ]
+ type: str
+ name:
+ description: Name of the execution.
+ type: str
+ execution_input:
+ description: The JSON input data for the execution.
+ type: json
+ default: {}
+ state_machine_arn:
+ description: The ARN of the state machine that will be executed.
+ type: str
+ execution_arn:
+ description: The ARN of the execution you wish to stop.
+ type: str
+ cause:
+ description: A detailed explanation of the cause for stopping the execution.
+ type: str
+ default: ''
+ error:
+ description: The error code of the failure to pass in when stopping the execution.
+ type: str
+ default: ''
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+
+author:
+ - Prasad Katti (@prasadkatti)
+'''
+
+EXAMPLES = '''
+- name: Start an execution of a state machine
+ aws_step_functions_state_machine_execution:
+ name: an_execution_name
+ execution_input: '{ "IsHelloWorldExample": true }'
+ state_machine_arn: "arn:aws:states:us-west-2:682285639423:stateMachine:HelloWorldStateMachine"
+
+- name: Stop an execution of a state machine
+ aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
+ cause: "cause of task failure"
+ error: "error code of the failure"
+'''
+
+RETURN = '''
+execution_arn:
+ description: ARN of the AWS Step Functions state machine execution.
+ type: str
+ returned: if action == start and changed == True
+ sample: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
+start_date:
+ description: The date the execution is started.
+ type: str
+ returned: if action == start and changed == True
+ sample: "2019-11-02T22:39:49.071000-07:00"
+stop_date:
+ description: The date the execution is stopped.
+ type: str
+ returned: if action == stop
+ sample: "2019-11-02T22:39:49.071000-07:00"
+'''
+
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def start_execution(module, sfn_client):
+ '''
+ start_execution uses execution name to determine if a previous execution already exists.
+ If an execution by the provided name exists, call client.start_execution will not be called.
+ '''
+
+ state_machine_arn = module.params.get('state_machine_arn')
+ name = module.params.get('name')
+ execution_input = module.params.get('execution_input')
+
+ try:
+ # list_executions is eventually consistent
+ page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn)
+
+ for execution in page_iterators.build_full_result()['executions']:
+ if name == execution['name']:
+ check_mode(module, msg='State machine execution already exists.', changed=False)
+ module.exit_json(changed=False)
+
+ check_mode(module, msg='State machine execution would be started.', changed=True)
+ res_execution = sfn_client.start_execution(
+ stateMachineArn=state_machine_arn,
+ name=name,
+ input=execution_input
+ )
+ except (ClientError, BotoCoreError) as e:
+ if e.response['Error']['Code'] == 'ExecutionAlreadyExists':
+ # this will never be executed anymore
+ module.exit_json(changed=False)
+ module.fail_json_aws(e, msg="Failed to start execution.")
+
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution))
+
+
+def stop_execution(module, sfn_client):
+
+ cause = module.params.get('cause')
+ error = module.params.get('error')
+ execution_arn = module.params.get('execution_arn')
+
+ try:
+ # describe_execution is eventually consistent
+ execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status']
+ if execution_status != 'RUNNING':
+ check_mode(module, msg='State machine execution is not running.', changed=False)
+ module.exit_json(changed=False)
+
+ check_mode(module, msg='State machine execution would be stopped.', changed=True)
+ res = sfn_client.stop_execution(
+ executionArn=execution_arn,
+ cause=cause,
+ error=error
+ )
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to stop execution.")
+
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(res))
+
+
+def check_mode(module, msg='', changed=False):
+ if module.check_mode:
+ module.exit_json(changed=changed, output=msg)
+
+
+def main():
+ module_args = dict(
+ action=dict(choices=['start', 'stop'], default='start'),
+ name=dict(type='str'),
+ execution_input=dict(type='json', default={}),
+ state_machine_arn=dict(type='str'),
+ cause=dict(type='str', default=''),
+ error=dict(type='str', default=''),
+ execution_arn=dict(type='str')
+ )
+ module = AnsibleAWSModule(
+ argument_spec=module_args,
+ required_if=[('action', 'start', ['name', 'state_machine_arn']),
+ ('action', 'stop', ['execution_arn']),
+ ],
+ supports_check_mode=True
+ )
+
+ sfn_client = module.client('stepfunctions')
+
+ action = module.params.get('action')
+ if action == "start":
+ start_execution(module, sfn_client)
+ else:
+ stop_execution(module, sfn_client)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_appserviceplan.py b/test/support/integration/plugins/modules/azure_rm_appserviceplan.py
new file mode 100644
index 0000000000..ee871c352b
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_appserviceplan.py
@@ -0,0 +1,379 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_appserviceplan
+version_added: "2.7"
+short_description: Manage App Service Plan
+description:
+ - Create, update and delete instance of App Service Plan.
+
+options:
+ resource_group:
+ description:
+ - Name of the resource group to which the resource belongs.
+ required: True
+
+ name:
+ description:
+ - Unique name of the app service plan to create or update.
+ required: True
+
+ location:
+ description:
+ - Resource location. If not set, location from the resource group will be used as default.
+
+ sku:
+ description:
+ - The pricing tiers, e.g., C(F1), C(D1), C(B1), C(B2), C(B3), C(S1), C(P1), C(P1V2) etc.
+ - Please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/plans/) for more detail.
+ - For Linux app service plan, please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/) for more detail.
+ is_linux:
+ description:
+ - Describe whether to host webapp on Linux worker.
+ type: bool
+ default: false
+
+ number_of_workers:
+ description:
+ - Describe number of workers to be allocated.
+
+ state:
+ description:
+ - Assert the state of the app service plan.
+ - Use C(present) to create or update an app service plan and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Yunge Zhu (@yungezz)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a windows app service plan
+ azure_rm_appserviceplan:
+ resource_group: myResourceGroup
+ name: myAppPlan
+ location: eastus
+ sku: S1
+
+ - name: Create a linux app service plan
+ azure_rm_appserviceplan:
+ resource_group: myResourceGroup
+ name: myAppPlan
+ location: eastus
+ sku: S1
+ is_linux: true
+ number_of_workers: 1
+
+ - name: update sku of existing windows app service plan
+ azure_rm_appserviceplan:
+ resource_group: myResourceGroup
+ name: myAppPlan
+ location: eastus
+ sku: S2
+'''
+
+RETURN = '''
+azure_appserviceplan:
+ description: Facts about the current state of the app service plan.
+ returned: always
+ type: dict
+ sample: {
+ "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppPlan"
+ }
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrestazure.azure_operation import AzureOperationPoller
+ from msrest.serialization import Model
+ from azure.mgmt.web.models import (
+ app_service_plan, AppServicePlan, SkuDescription
+ )
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+def _normalize_sku(sku):
+ if sku is None:
+ return sku
+
+ sku = sku.upper()
+ if sku == 'FREE':
+ return 'F1'
+ elif sku == 'SHARED':
+ return 'D1'
+ return sku
+
+
+def get_sku_name(tier):
+ tier = tier.upper()
+ if tier == 'F1' or tier == "FREE":
+ return 'FREE'
+ elif tier == 'D1' or tier == "SHARED":
+ return 'SHARED'
+ elif tier in ['B1', 'B2', 'B3', 'BASIC']:
+ return 'BASIC'
+ elif tier in ['S1', 'S2', 'S3']:
+ return 'STANDARD'
+ elif tier in ['P1', 'P2', 'P3']:
+ return 'PREMIUM'
+ elif tier in ['P1V2', 'P2V2', 'P3V2']:
+ return 'PREMIUMV2'
+ else:
+ return None
+
+
+def appserviceplan_to_dict(plan):
+ return dict(
+ id=plan.id,
+ name=plan.name,
+ kind=plan.kind,
+ location=plan.location,
+ reserved=plan.reserved,
+ is_linux=plan.reserved,
+ provisioning_state=plan.provisioning_state,
+ status=plan.status,
+ target_worker_count=plan.target_worker_count,
+ sku=dict(
+ name=plan.sku.name,
+ size=plan.sku.size,
+ tier=plan.sku.tier,
+ family=plan.sku.family,
+ capacity=plan.sku.capacity
+ ),
+ resource_group=plan.resource_group,
+ number_of_sites=plan.number_of_sites,
+ tags=plan.tags if plan.tags else None
+ )
+
+
+class AzureRMAppServicePlans(AzureRMModuleBase):
+ """Configuration class for an Azure RM App Service Plan resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ location=dict(
+ type='str'
+ ),
+ sku=dict(
+ type='str'
+ ),
+ is_linux=dict(
+ type='bool',
+ default=False
+ ),
+ number_of_workers=dict(
+ type='str'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.name = None
+ self.location = None
+
+ self.sku = None
+ self.is_linux = None
+ self.number_of_workers = 1
+
+ self.tags = None
+
+ self.results = dict(
+ changed=False,
+ ansible_facts=dict(azure_appserviceplan=None)
+ )
+ self.state = None
+
+ super(AzureRMAppServicePlans, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=True)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if kwargs[key]:
+ setattr(self, key, kwargs[key])
+
+ old_response = None
+ response = None
+ to_be_updated = False
+
+ # set location
+ resource_group = self.get_resource_group(self.resource_group)
+ if not self.location:
+ self.location = resource_group.location
+
+ # get app service plan
+ old_response = self.get_plan()
+
+ # if not existing
+ if not old_response:
+ self.log("App Service plan doesn't exist")
+
+ if self.state == "present":
+ to_be_updated = True
+
+ if not self.sku:
+ self.fail('Please specify sku in plan when creation')
+
+ else:
+ # existing app service plan, do update
+ self.log("App Service Plan already exists")
+
+ if self.state == 'present':
+ self.log('Result: {0}'.format(old_response))
+
+ update_tags, newtags = self.update_tags(old_response.get('tags', dict()))
+
+ if update_tags:
+ to_be_updated = True
+ self.tags = newtags
+
+ # check if sku changed
+ if self.sku and _normalize_sku(self.sku) != old_response['sku']['size']:
+ to_be_updated = True
+
+ # check if number_of_workers changed
+ if self.number_of_workers and int(self.number_of_workers) != old_response['sku']['capacity']:
+ to_be_updated = True
+
+ if self.is_linux and self.is_linux != old_response['reserved']:
+ self.fail("Operation not allowed: cannot update reserved of app service plan.")
+
+ if old_response:
+ self.results['id'] = old_response['id']
+
+ if to_be_updated:
+ self.log('Need to Create/Update app service plan')
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ response = self.create_or_update_plan()
+ self.results['id'] = response['id']
+
+ if self.state == 'absent' and old_response:
+ self.log("Delete app service plan")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_plan()
+
+ self.log('App service plan instance deleted')
+
+ return self.results
+
+ def get_plan(self):
+ '''
+ Gets app service plan
+ :return: deserialized app service plan dictionary
+ '''
+ self.log("Get App Service Plan {0}".format(self.name))
+
+ try:
+ response = self.web_client.app_service_plans.get(self.resource_group, self.name)
+ if response:
+ self.log("Response : {0}".format(response))
+ self.log("App Service Plan : {0} found".format(response.name))
+
+ return appserviceplan_to_dict(response)
+ except CloudError as ex:
+ self.log("Didn't find app service plan {0} in resource group {1}".format(self.name, self.resource_group))
+
+ return False
+
+ def create_or_update_plan(self):
+ '''
+ Creates app service plan
+ :return: deserialized app service plan dictionary
+ '''
+ self.log("Create App Service Plan {0}".format(self.name))
+
+ try:
+ # normalize sku
+ sku = _normalize_sku(self.sku)
+
+ sku_def = SkuDescription(tier=get_sku_name(
+ sku), name=sku, capacity=self.number_of_workers)
+ plan_def = AppServicePlan(
+ location=self.location, app_service_plan_name=self.name, sku=sku_def, reserved=self.is_linux, tags=self.tags if self.tags else None)
+
+ response = self.web_client.app_service_plans.create_or_update(self.resource_group, self.name, plan_def)
+
+ if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
+ response = self.get_poller_result(response)
+
+ self.log("Response : {0}".format(response))
+
+ return appserviceplan_to_dict(response)
+ except CloudError as ex:
+ self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex)))
+
+ def delete_plan(self):
+ '''
+ Deletes specified App service plan in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the App service plan {0}".format(self.name))
+ try:
+ response = self.web_client.app_service_plans.delete(resource_group_name=self.resource_group,
+ name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete App service plan.')
+ self.fail(
+ "Error deleting the App service plan : {0}".format(str(e)))
+
+ return True
+
+
+def main():
+ """Main execution"""
+ AzureRMAppServicePlans()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_functionapp.py b/test/support/integration/plugins/modules/azure_rm_functionapp.py
new file mode 100644
index 0000000000..0c372a88de
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_functionapp.py
@@ -0,0 +1,421 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Thomas Stringer <tomstr@microsoft.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_functionapp
+version_added: "2.4"
+short_description: Manage Azure Function Apps
+description:
+ - Create, update or delete an Azure Function App.
+options:
+ resource_group:
+ description:
+ - Name of resource group.
+ required: true
+ aliases:
+ - resource_group_name
+ name:
+ description:
+ - Name of the Azure Function App.
+ required: true
+ location:
+ description:
+ - Valid Azure location. Defaults to location of the resource group.
+ plan:
+ description:
+ - App service plan.
+ - It can be name of existing app service plan in same resource group as function app.
+ - It can be resource id of existing app service plan.
+ - Resource id. For example /subscriptions/<subs_id>/resourceGroups/<resource_group>/providers/Microsoft.Web/serverFarms/<plan_name>.
+ - It can be a dict which contains C(name), C(resource_group).
+ - C(name). Name of app service plan.
+ - C(resource_group). Resource group name of app service plan.
+ version_added: "2.8"
+ container_settings:
+ description: Web app container settings.
+ suboptions:
+ name:
+ description:
+ - Name of container. For example "imagename:tag".
+ registry_server_url:
+ description:
+ - Container registry server url. For example C(mydockerregistry.io).
+ registry_server_user:
+ description:
+ - The container registry server user name.
+ registry_server_password:
+ description:
+ - The container registry server password.
+ version_added: "2.8"
+ storage_account:
+ description:
+ - Name of the storage account to use.
+ required: true
+ aliases:
+ - storage
+ - storage_account_name
+ app_settings:
+ description:
+ - Dictionary containing application settings.
+ state:
+ description:
+ - Assert the state of the Function App. Use C(present) to create or update a Function App and C(absent) to delete.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Thomas Stringer (@trstringer)
+'''
+
+EXAMPLES = '''
+- name: Create a function app
+ azure_rm_functionapp:
+ resource_group: myResourceGroup
+ name: myFunctionApp
+ storage_account: myStorageAccount
+
+- name: Create a function app with app settings
+ azure_rm_functionapp:
+ resource_group: myResourceGroup
+ name: myFunctionApp
+ storage_account: myStorageAccount
+ app_settings:
+ setting1: value1
+ setting2: value2
+
+- name: Create container based function app
+ azure_rm_functionapp:
+ resource_group: myResourceGroup
+ name: myFunctionApp
+ storage_account: myStorageAccount
+ plan:
+ resource_group: myResourceGroup
+ name: myAppPlan
+ container_settings:
+ name: httpd
+ registry_server_url: index.docker.io
+
+- name: Delete a function app
+ azure_rm_functionapp:
+ resource_group: myResourceGroup
+ name: myFunctionApp
+ state: absent
+'''
+
+RETURN = '''
+state:
+ description:
+ - Current state of the Azure Function App.
+ returned: success
+ type: dict
+ example:
+ id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myFunctionApp
+ name: myfunctionapp
+ kind: functionapp
+ location: East US
+ type: Microsoft.Web/sites
+ state: Running
+ host_names:
+ - myfunctionapp.azurewebsites.net
+ repository_site_name: myfunctionapp
+ usage_state: Normal
+ enabled: true
+ enabled_host_names:
+ - myfunctionapp.azurewebsites.net
+ - myfunctionapp.scm.azurewebsites.net
+ availability_state: Normal
+ host_name_ssl_states:
+ - name: myfunctionapp.azurewebsites.net
+ ssl_state: Disabled
+ host_type: Standard
+ - name: myfunctionapp.scm.azurewebsites.net
+ ssl_state: Disabled
+ host_type: Repository
+ server_farm_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/EastUSPlan
+ reserved: false
+ last_modified_time_utc: 2017-08-22T18:54:01.190Z
+ scm_site_also_stopped: false
+ client_affinity_enabled: true
+ client_cert_enabled: false
+ host_names_disabled: false
+ outbound_ip_addresses: ............
+ container_size: 1536
+ daily_memory_time_quota: 0
+ resource_group: myResourceGroup
+ default_host_name: myfunctionapp.azurewebsites.net
+''' # NOQA
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from azure.mgmt.web.models import (
+ site_config, app_service_plan, Site, SiteConfig, NameValuePair, SiteSourceControl,
+ AppServicePlan, SkuDescription
+ )
+ from azure.mgmt.resource.resources import ResourceManagementClient
+ from msrest.polling import LROPoller
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+container_settings_spec = dict(
+ name=dict(type='str', required=True),
+ registry_server_url=dict(type='str'),
+ registry_server_user=dict(type='str'),
+ registry_server_password=dict(type='str', no_log=True)
+)
+
+
+class AzureRMFunctionApp(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ resource_group=dict(type='str', required=True, aliases=['resource_group_name']),
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ location=dict(type='str'),
+ storage_account=dict(
+ type='str',
+ aliases=['storage', 'storage_account_name']
+ ),
+ app_settings=dict(type='dict'),
+ plan=dict(
+ type='raw'
+ ),
+ container_settings=dict(
+ type='dict',
+ options=container_settings_spec
+ )
+ )
+
+ self.results = dict(
+ changed=False,
+ state=dict()
+ )
+
+ self.resource_group = None
+ self.name = None
+ self.state = None
+ self.location = None
+ self.storage_account = None
+ self.app_settings = None
+ self.plan = None
+ self.container_settings = None
+
+ required_if = [('state', 'present', ['storage_account'])]
+
+ super(AzureRMFunctionApp, self).__init__(
+ self.module_arg_spec,
+ supports_check_mode=True,
+ required_if=required_if
+ )
+
+ def exec_module(self, **kwargs):
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ if self.app_settings is None:
+ self.app_settings = dict()
+
+ try:
+ resource_group = self.rm_client.resource_groups.get(self.resource_group)
+ except CloudError:
+ self.fail('Unable to retrieve resource group')
+
+ self.location = self.location or resource_group.location
+
+ try:
+ function_app = self.web_client.web_apps.get(
+ resource_group_name=self.resource_group,
+ name=self.name
+ )
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ exists = function_app is not None
+ except CloudError as exc:
+ exists = False
+
+ if self.state == 'absent':
+ if exists:
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+ try:
+ self.web_client.web_apps.delete(
+ resource_group_name=self.resource_group,
+ name=self.name
+ )
+ self.results['changed'] = True
+ except CloudError as exc:
+ self.fail('Failure while deleting web app: {0}'.format(exc))
+ else:
+ self.results['changed'] = False
+ else:
+ kind = 'functionapp'
+ linux_fx_version = None
+ if self.container_settings and self.container_settings.get('name'):
+ kind = 'functionapp,linux,container'
+ linux_fx_version = 'DOCKER|'
+ if self.container_settings.get('registry_server_url'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
+ linux_fx_version += self.container_settings['registry_server_url'] + '/'
+ linux_fx_version += self.container_settings['name']
+ if self.container_settings.get('registry_server_user'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings.get('registry_server_user')
+
+ if self.container_settings.get('registry_server_password'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings.get('registry_server_password')
+
+ if not self.plan and function_app:
+ self.plan = function_app.server_farm_id
+
+ if not exists:
+ function_app = Site(
+ location=self.location,
+ kind=kind,
+ site_config=SiteConfig(
+ app_settings=self.aggregated_app_settings(),
+ scm_type='LocalGit'
+ )
+ )
+ self.results['changed'] = True
+ else:
+ self.results['changed'], function_app = self.update(function_app)
+
+ # get app service plan
+ if self.plan:
+ if isinstance(self.plan, dict):
+ self.plan = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Web/serverfarms/{2}".format(
+ self.subscription_id,
+ self.plan.get('resource_group', self.resource_group),
+ self.plan.get('name')
+ )
+ function_app.server_farm_id = self.plan
+
+ # set linux fx version
+ if linux_fx_version:
+ function_app.site_config.linux_fx_version = linux_fx_version
+
+ if self.check_mode:
+ self.results['state'] = function_app.as_dict()
+ elif self.results['changed']:
+ try:
+ new_function_app = self.web_client.web_apps.create_or_update(
+ resource_group_name=self.resource_group,
+ name=self.name,
+ site_envelope=function_app
+ ).result()
+ self.results['state'] = new_function_app.as_dict()
+ except CloudError as exc:
+ self.fail('Error creating or updating web app: {0}'.format(exc))
+
+ return self.results
+
+ def update(self, source_function_app):
+ """Update the Site object if there are any changes"""
+
+ source_app_settings = self.web_client.web_apps.list_application_settings(
+ resource_group_name=self.resource_group,
+ name=self.name
+ )
+
+ changed, target_app_settings = self.update_app_settings(source_app_settings.properties)
+
+ source_function_app.site_config = SiteConfig(
+ app_settings=target_app_settings,
+ scm_type='LocalGit'
+ )
+
+ return changed, source_function_app
+
+ def update_app_settings(self, source_app_settings):
+ """Update app settings"""
+
+ target_app_settings = self.aggregated_app_settings()
+ target_app_settings_dict = dict([(i.name, i.value) for i in target_app_settings])
+ return target_app_settings_dict != source_app_settings, target_app_settings
+
+ def necessary_functionapp_settings(self):
+ """Construct the necessary app settings required for an Azure Function App"""
+
+ function_app_settings = []
+
+ if self.container_settings is None:
+ for key in ['AzureWebJobsStorage', 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING', 'AzureWebJobsDashboard']:
+ function_app_settings.append(NameValuePair(name=key, value=self.storage_connection_string))
+ function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~1'))
+ function_app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='6.5.0'))
+ function_app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=self.name))
+ else:
+ function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
+ function_app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE', value=False))
+ function_app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=self.storage_connection_string))
+
+ return function_app_settings
+
+ def aggregated_app_settings(self):
+ """Combine both system and user app settings"""
+
+ function_app_settings = self.necessary_functionapp_settings()
+ for app_setting_key in self.app_settings:
+ found_setting = None
+ for s in function_app_settings:
+ if s.name == app_setting_key:
+ found_setting = s
+ break
+ if found_setting:
+ found_setting.value = self.app_settings[app_setting_key]
+ else:
+ function_app_settings.append(NameValuePair(
+ name=app_setting_key,
+ value=self.app_settings[app_setting_key]
+ ))
+ return function_app_settings
+
+ @property
+ def storage_connection_string(self):
+ """Construct the storage account connection string"""
+
+ return 'DefaultEndpointsProtocol=https;AccountName={0};AccountKey={1}'.format(
+ self.storage_account,
+ self.storage_key
+ )
+
+ @property
+ def storage_key(self):
+ """Retrieve the storage account key"""
+
+ return self.storage_client.storage_accounts.list_keys(
+ resource_group_name=self.resource_group,
+ account_name=self.storage_account
+ ).keys[0].value
+
+
+def main():
+ """Main function execution"""
+
+ AzureRMFunctionApp()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_functionapp_info.py b/test/support/integration/plugins/modules/azure_rm_functionapp_info.py
new file mode 100644
index 0000000000..0cd5b6f60b
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_functionapp_info.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 Thomas Stringer, <tomstr@microsoft.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_functionapp_info
+version_added: "2.9"
+short_description: Get Azure Function App facts
+description:
+ - Get facts for one Azure Function App or all Function Apps within a resource group.
+options:
+ name:
+ description:
+ - Only show results for a specific Function App.
+ resource_group:
+ description:
+ - Limit results to a resource group. Required when filtering by name.
+ aliases:
+ - resource_group_name
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Thomas Stringer (@trstringer)
+'''
+
+EXAMPLES = '''
+ - name: Get facts for one Function App
+ azure_rm_functionapp_info:
+ resource_group: myResourceGroup
+ name: myfunctionapp
+
+ - name: Get facts for all Function Apps in a resource group
+ azure_rm_functionapp_info:
+ resource_group: myResourceGroup
+
+ - name: Get facts for all Function Apps by tags
+ azure_rm_functionapp_info:
+ tags:
+ - testing
+'''
+
+RETURN = '''
+azure_functionapps:
+ description:
+ - List of Azure Function Apps dicts.
+ returned: always
+ type: list
+ example:
+ id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/sites/myfunctionapp
+ name: myfunctionapp
+ kind: functionapp
+ location: East US
+ type: Microsoft.Web/sites
+ state: Running
+ host_names:
+ - myfunctionapp.azurewebsites.net
+ repository_site_name: myfunctionapp
+ usage_state: Normal
+ enabled: true
+ enabled_host_names:
+ - myfunctionapp.azurewebsites.net
+ - myfunctionapp.scm.azurewebsites.net
+ availability_state: Normal
+ host_name_ssl_states:
+ - name: myfunctionapp.azurewebsites.net
+ ssl_state: Disabled
+ host_type: Standard
+ - name: myfunctionapp.scm.azurewebsites.net
+ ssl_state: Disabled
+ host_type: Repository
+ server_farm_id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/serverfarms/EastUSPlan
+ reserved: false
+ last_modified_time_utc: 2017-08-22T18:54:01.190Z
+ scm_site_also_stopped: false
+ client_affinity_enabled: true
+ client_cert_enabled: false
+ host_names_disabled: false
+ outbound_ip_addresses: ............
+ container_size: 1536
+ daily_memory_time_quota: 0
+ resource_group: myResourceGroup
+ default_host_name: myfunctionapp.azurewebsites.net
+'''
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+except Exception:
+ # This is handled in azure_rm_common
+ pass
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+
+class AzureRMFunctionAppInfo(AzureRMModuleBase):
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ name=dict(type='str'),
+ resource_group=dict(type='str', aliases=['resource_group_name']),
+ tags=dict(type='list'),
+ )
+
+ self.results = dict(
+ changed=False,
+ ansible_info=dict(azure_functionapps=[])
+ )
+
+ self.name = None
+ self.resource_group = None
+ self.tags = None
+
+ super(AzureRMFunctionAppInfo, self).__init__(
+ self.module_arg_spec,
+ supports_tags=False,
+ facts_module=True
+ )
+
+ def exec_module(self, **kwargs):
+
+ is_old_facts = self.module._name == 'azure_rm_functionapp_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_functionapp_facts' module has been renamed to 'azure_rm_functionapp_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if self.name and not self.resource_group:
+ self.fail("Parameter error: resource group required when filtering by name.")
+
+ if self.name:
+ self.results['ansible_info']['azure_functionapps'] = self.get_functionapp()
+ elif self.resource_group:
+ self.results['ansible_info']['azure_functionapps'] = self.list_resource_group()
+ else:
+ self.results['ansible_info']['azure_functionapps'] = self.list_all()
+
+ return self.results
+
+ def get_functionapp(self):
+ self.log('Get properties for Function App {0}'.format(self.name))
+ function_app = None
+ result = []
+
+ try:
+ function_app = self.web_client.web_apps.get(
+ self.resource_group,
+ self.name
+ )
+ except CloudError:
+ pass
+
+ if function_app and self.has_tags(function_app.tags, self.tags):
+ result = function_app.as_dict()
+
+ return [result]
+
+ def list_resource_group(self):
+ self.log('List items')
+ try:
+ response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
+ except Exception as exc:
+ self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc)))
+
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(item.as_dict())
+ return results
+
+ def list_all(self):
+ self.log('List all items')
+ try:
+ response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
+ except Exception as exc:
+ self.fail("Error listing all items - {0}".format(str(exc)))
+
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(item.as_dict())
+ return results
+
+
+def main():
+ AzureRMFunctionAppInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py
new file mode 100644
index 0000000000..212cf7959d
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbconfiguration
+version_added: "2.8"
+short_description: Manage Configuration instance
+description:
+ - Create, update and delete instance of Configuration.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ required: True
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ name:
+ description:
+ - The name of the server configuration.
+ required: True
+ value:
+ description:
+ - Value of the configuration.
+ state:
+ description:
+ - Assert the state of the MariaDB configuration. Use C(present) to update setting, or C(absent) to reset to default value.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+'''
+
+EXAMPLES = '''
+ - name: Update SQL Server setting
+ azure_rm_mariadbconfiguration:
+ resource_group: myResourceGroup
+ server_name: myServer
+ name: event_scheduler
+ value: "ON"
+'''
+
+RETURN = '''
+id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myServer/confi
+ gurations/event_scheduler"
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from azure.mgmt.rdbms.mysql import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class Actions:
+ NoAction, Create, Update, Delete = range(4)
+
+
+class AzureRMMariaDbConfiguration(AzureRMModuleBase):
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ value=dict(
+ type='str'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ self.value = None
+
+ self.results = dict(changed=False)
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ super(AzureRMMariaDbConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=False)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec.keys()):
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+
+ old_response = None
+ response = None
+
+ old_response = self.get_configuration()
+
+ if not old_response:
+ self.log("Configuration instance doesn't exist")
+ if self.state == 'absent':
+ self.log("Old instance didn't exist")
+ else:
+ self.to_do = Actions.Create
+ else:
+ self.log("Configuration instance already exists")
+ if self.state == 'absent' and old_response['source'] == 'user-override':
+ self.to_do = Actions.Delete
+ elif self.state == 'present':
+ self.log("Need to check if Configuration instance has to be deleted or may be updated")
+ if self.value != old_response.get('value'):
+ self.to_do = Actions.Update
+
+ if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
+ self.log("Need to Create / Update the Configuration instance")
+
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+
+ response = self.create_update_configuration()
+
+ self.results['changed'] = True
+ self.log("Creation / Update done")
+ elif self.to_do == Actions.Delete:
+ self.log("Configuration instance deleted")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_configuration()
+ else:
+ self.log("Configuration instance unchanged")
+ self.results['changed'] = False
+ response = old_response
+
+ if response:
+ self.results["id"] = response["id"]
+
+ return self.results
+
+ def create_update_configuration(self):
+ self.log("Creating / Updating the Configuration instance {0}".format(self.name))
+
+ try:
+ response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ configuration_name=self.name,
+ value=self.value,
+ source='user-override')
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the Configuration instance.')
+ self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
+ return response.as_dict()
+
+ def delete_configuration(self):
+ self.log("Deleting the Configuration instance {0}".format(self.name))
+ try:
+ response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ configuration_name=self.name,
+ source='system-default')
+ except CloudError as e:
+ self.log('Error attempting to delete the Configuration instance.')
+ self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
+
+ return True
+
+ def get_configuration(self):
+ self.log("Checking if the Configuration instance {0} is present".format(self.name))
+ found = False
+ try:
+ response = self.mariadb_client.configurations.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ configuration_name=self.name)
+ found = True
+ self.log("Response : {0}".format(response))
+ self.log("Configuration instance : {0} found".format(response.name))
+ except CloudError as e:
+ self.log('Did not find the Configuration instance.')
+ if found is True:
+ return response.as_dict()
+
+ return False
+
+
+def main():
+ """Main execution"""
+ AzureRMMariaDbConfiguration()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py
new file mode 100644
index 0000000000..ad38f1255f
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbconfiguration_info
+version_added: "2.9"
+short_description: Get Azure MariaDB Configuration facts
+description:
+ - Get facts of Azure MariaDB Configuration.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ type: str
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ type: str
+ name:
+ description:
+ - Setting name.
+ type: str
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Get specific setting of MariaDB Server
+ azure_rm_mariadbconfiguration_info:
+ resource_group: myResourceGroup
+ server_name: testserver
+ name: deadlock_timeout
+
+ - name: Get all settings of MariaDB Server
+ azure_rm_mariadbconfiguration_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+'''
+
+RETURN = '''
+settings:
+ description:
+ - A list of dictionaries containing MariaDB Server settings.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Setting resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver
+ /configurations/deadlock_timeout"
+ name:
+ description:
+ - Setting name.
+ returned: always
+ type: str
+ sample: deadlock_timeout
+ value:
+ description:
+ - Setting value.
+ returned: always
+ type: raw
+ sample: 1000
+ description:
+ description:
+ - Description of the configuration.
+ returned: always
+ type: str
+ sample: Deadlock timeout.
+ source:
+ description:
+ - Source of the configuration.
+ returned: always
+ type: str
+ sample: system-default
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_operation import AzureOperationPoller
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMMariaDbConfigurationInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(changed=False)
+ self.mgmt_client = None
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ super(AzureRMMariaDbConfigurationInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_mariadbconfiguration_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_mariadbconfiguration_facts' module has been renamed to 'azure_rm_mariadbconfiguration_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ if self.name is not None:
+ self.results['settings'] = self.get()
+ else:
+ self.results['settings'] = self.list_by_server()
+ return self.results
+
+ def get(self):
+ '''
+ Gets facts of the specified MariaDB Configuration.
+
+ :return: deserialized MariaDB Configurationinstance state dictionary
+ '''
+ response = None
+ results = []
+ try:
+ response = self.mgmt_client.configurations.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ configuration_name=self.name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for Configurations.')
+
+ if response is not None:
+ results.append(self.format_item(response))
+
+ return results
+
+ def list_by_server(self):
+ '''
+ Gets facts of the specified MariaDB Configuration.
+
+ :return: deserialized MariaDB Configurationinstance state dictionary
+ '''
+ response = None
+ results = []
+ try:
+ response = self.mgmt_client.configurations.list_by_server(resource_group_name=self.resource_group,
+ server_name=self.server_name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for Configurations.')
+
+ if response is not None:
+ for item in response:
+ results.append(self.format_item(item))
+
+ return results
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'server_name': self.server_name,
+ 'id': d['id'],
+ 'name': d['name'],
+ 'value': d['value'],
+ 'description': d['description'],
+ 'source': d['source']
+ }
+ return d
+
+
+def main():
+ AzureRMMariaDbConfigurationInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py
new file mode 100644
index 0000000000..8492b96854
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbdatabase
+version_added: "2.8"
+short_description: Manage MariaDB Database instance
+description:
+ - Create, update and delete instance of MariaDB Database.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ name:
+ description:
+ - The name of the database.
+ required: True
+ charset:
+ description:
+ - The charset of the database. Check MariaDB documentation for possible values.
+ - This is only set on creation, use I(force_update) to recreate a database if the values don't match.
+ collation:
+ description:
+ - The collation of the database. Check MariaDB documentation for possible values.
+ - This is only set on creation, use I(force_update) to recreate a database if the values don't match.
+ force_update:
+ description:
+ - When set to C(true), will delete and recreate the existing MariaDB database if any of the properties don't match what is set.
+ - When set to C(false), no change will occur to the database even if any of the properties do not match.
+ type: bool
+ default: 'no'
+ state:
+ description:
+ - Assert the state of the MariaDB Database. Use C(present) to create or update a database and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Create (or update) MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: myResourceGroup
+ server_name: testserver
+ name: db1
+'''
+
+RETURN = '''
+id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/databases/db1
+name:
+ description:
+ - Resource name.
+ returned: always
+ type: str
+ sample: db1
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class Actions:
+ NoAction, Create, Update, Delete = range(4)
+
+
+class AzureRMMariaDbDatabase(AzureRMModuleBase):
+ """Configuration class for an Azure RM MariaDB Database resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ charset=dict(
+ type='str'
+ ),
+ collation=dict(
+ type='str'
+ ),
+ force_update=dict(
+ type='bool',
+ default=False
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ self.force_update = None
+ self.parameters = dict()
+
+ self.results = dict(changed=False)
+ self.mgmt_client = None
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ super(AzureRMMariaDbDatabase, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()):
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs[key] is not None:
+ if key == "charset":
+ self.parameters["charset"] = kwargs[key]
+ elif key == "collation":
+ self.parameters["collation"] = kwargs[key]
+
+ old_response = None
+ response = None
+
+ self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ resource_group = self.get_resource_group(self.resource_group)
+
+ old_response = self.get_mariadbdatabase()
+
+ if not old_response:
+ self.log("MariaDB Database instance doesn't exist")
+ if self.state == 'absent':
+ self.log("Old instance didn't exist")
+ else:
+ self.to_do = Actions.Create
+ else:
+ self.log("MariaDB Database instance already exists")
+ if self.state == 'absent':
+ self.to_do = Actions.Delete
+ elif self.state == 'present':
+ self.log("Need to check if MariaDB Database instance has to be deleted or may be updated")
+ if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']):
+ self.to_do = Actions.Update
+ if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']):
+ self.to_do = Actions.Update
+ if self.to_do == Actions.Update:
+ if self.force_update:
+ if not self.check_mode:
+ self.delete_mariadbdatabase()
+ else:
+ self.fail("Database properties cannot be updated without setting 'force_update' option")
+ self.to_do = Actions.NoAction
+
+ if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
+ self.log("Need to Create / Update the MariaDB Database instance")
+
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+
+ response = self.create_update_mariadbdatabase()
+ self.results['changed'] = True
+ self.log("Creation / Update done")
+ elif self.to_do == Actions.Delete:
+ self.log("MariaDB Database instance deleted")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_mariadbdatabase()
+ # make sure instance is actually deleted, for some Azure resources, instance is hanging around
+ # for some time after deletion -- this should be really fixed in Azure
+ while self.get_mariadbdatabase():
+ time.sleep(20)
+ else:
+ self.log("MariaDB Database instance unchanged")
+ self.results['changed'] = False
+ response = old_response
+
+ if response:
+ self.results["id"] = response["id"]
+ self.results["name"] = response["name"]
+
+ return self.results
+
+ def create_update_mariadbdatabase(self):
+ '''
+ Creates or updates MariaDB Database with the specified configuration.
+
+ :return: deserialized MariaDB Database instance state dictionary
+ '''
+ self.log("Creating / Updating the MariaDB Database instance {0}".format(self.name))
+
+ try:
+ response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ database_name=self.name,
+ parameters=self.parameters)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the MariaDB Database instance.')
+ self.fail("Error creating the MariaDB Database instance: {0}".format(str(exc)))
+ return response.as_dict()
+
+ def delete_mariadbdatabase(self):
+ '''
+ Deletes specified MariaDB Database instance in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the MariaDB Database instance {0}".format(self.name))
+ try:
+ response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ database_name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the MariaDB Database instance.')
+ self.fail("Error deleting the MariaDB Database instance: {0}".format(str(e)))
+
+ return True
+
+ def get_mariadbdatabase(self):
+ '''
+ Gets the properties of the specified MariaDB Database.
+
+ :return: deserialized MariaDB Database instance state dictionary
+ '''
+ self.log("Checking if the MariaDB Database instance {0} is present".format(self.name))
+ found = False
+ try:
+ response = self.mgmt_client.databases.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ database_name=self.name)
+ found = True
+ self.log("Response : {0}".format(response))
+ self.log("MariaDB Database instance : {0} found".format(response.name))
+ except CloudError as e:
+ self.log('Did not find the MariaDB Database instance.')
+ if found is True:
+ return response.as_dict()
+
+ return False
+
+
+def main():
+ """Main execution"""
+ AzureRMMariaDbDatabase()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py
new file mode 100644
index 0000000000..61e33015b1
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbdatabase_info
+version_added: "2.9"
+short_description: Get Azure MariaDB Database facts
+description:
+ - Get facts of MariaDB Database.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ type: str
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ type: str
+ name:
+ description:
+ - The name of the database.
+ type: str
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Get instance of MariaDB Database
+ azure_rm_mariadbdatabase_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+ name: database_name
+
+ - name: List instances of MariaDB Database
+ azure_rm_mariadbdatabase_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+'''
+
+RETURN = '''
+databases:
+ description:
+ - A list of dictionaries containing facts for MariaDB Databases.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser
+ ver/databases/db1"
+ resource_group:
+ description:
+ - Resource group name.
+ returned: always
+ type: str
+ sample: testrg
+ server_name:
+ description:
+ - Server name.
+ returned: always
+ type: str
+ sample: testserver
+ name:
+ description:
+ - Resource name.
+ returned: always
+ type: str
+ sample: db1
+ charset:
+ description:
+ - The charset of the database.
+ returned: always
+ type: str
+ sample: UTF8
+ collation:
+ description:
+ - The collation of the database.
+ returned: always
+ type: str
+ sample: English_United States.1252
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if (self.resource_group is not None and
+ self.server_name is not None and
+ self.name is not None):
+ self.results['databases'] = self.get()
+ elif (self.resource_group is not None and
+ self.server_name is not None):
+ self.results['databases'] = self.list_by_server()
+ return self.results
+
+ def get(self):
+ response = None
+ results = []
+ try:
+ response = self.mariadb_client.databases.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ database_name=self.name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for Databases.')
+
+ if response is not None:
+ results.append(self.format_item(response))
+
+ return results
+
+ def list_by_server(self):
+ response = None
+ results = []
+ try:
+ response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group,
+ server_name=self.server_name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
+
+ if response is not None:
+ for item in response:
+ results.append(self.format_item(item))
+
+ return results
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'server_name': self.server_name,
+ 'name': d['name'],
+ 'charset': d['charset'],
+ 'collation': d['collation']
+ }
+ return d
+
+
+def main():
+ AzureRMMariaDbDatabaseInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py
new file mode 100644
index 0000000000..1fc8c5e79e
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbfirewallrule
+version_added: "2.8"
+short_description: Manage MariaDB firewall rule instance
+description:
+ - Create, update and delete instance of MariaDB firewall rule.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ name:
+ description:
+ - The name of the MariaDB firewall rule.
+ required: True
+ start_ip_address:
+ description:
+ - The start IP address of the MariaDB firewall rule. Must be IPv4 format.
+ end_ip_address:
+ description:
+ - The end IP address of the MariaDB firewall rule. Must be IPv4 format.
+ state:
+ description:
+ - Assert the state of the MariaDB firewall rule. Use C(present) to create or update a rule and C(absent) to ensure it is not present.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Create (or update) MariaDB firewall rule
+ azure_rm_mariadbfirewallrule:
+ resource_group: myResourceGroup
+ server_name: testserver
+ name: rule1
+ start_ip_address: 10.0.0.17
+ end_ip_address: 10.0.0.20
+'''
+
+RETURN = '''
+id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire
+ wallRules/rule1"
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class Actions:
+ NoAction, Create, Update, Delete = range(4)
+
+
+class AzureRMMariaDbFirewallRule(AzureRMModuleBase):
+ """Configuration class for an Azure RM MariaDB firewall rule resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ start_ip_address=dict(
+ type='str'
+ ),
+ end_ip_address=dict(
+ type='str'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ self.start_ip_address = None
+ self.end_ip_address = None
+
+ self.results = dict(changed=False)
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ super(AzureRMMariaDbFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()):
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+
+ old_response = None
+ response = None
+
+ resource_group = self.get_resource_group(self.resource_group)
+
+ old_response = self.get_firewallrule()
+
+ if not old_response:
+ self.log("MariaDB firewall rule instance doesn't exist")
+ if self.state == 'absent':
+ self.log("Old instance didn't exist")
+ else:
+ self.to_do = Actions.Create
+ else:
+ self.log("MariaDB firewall rule instance already exists")
+ if self.state == 'absent':
+ self.to_do = Actions.Delete
+ elif self.state == 'present':
+ self.log("Need to check if MariaDB firewall rule instance has to be deleted or may be updated")
+ if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
+ self.to_do = Actions.Update
+ if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
+ self.to_do = Actions.Update
+
+ if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
+ self.log("Need to Create / Update the MariaDB firewall rule instance")
+
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+
+ response = self.create_update_firewallrule()
+
+ if not old_response:
+ self.results['changed'] = True
+ else:
+ self.results['changed'] = old_response.__ne__(response)
+ self.log("Creation / Update done")
+ elif self.to_do == Actions.Delete:
+ self.log("MariaDB firewall rule instance deleted")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_firewallrule()
+ # make sure instance is actually deleted, for some Azure resources, instance is hanging around
+ # for some time after deletion -- this should be really fixed in Azure
+ while self.get_firewallrule():
+ time.sleep(20)
+ else:
+ self.log("MariaDB firewall rule instance unchanged")
+ self.results['changed'] = False
+ response = old_response
+
+ if response:
+ self.results["id"] = response["id"]
+
+ return self.results
+
+ def create_update_firewallrule(self):
+ '''
+ Creates or updates MariaDB firewall rule with the specified configuration.
+
+ :return: deserialized MariaDB firewall rule instance state dictionary
+ '''
+ self.log("Creating / Updating the MariaDB firewall rule instance {0}".format(self.name))
+
+ try:
+ response = self.mariadb_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ firewall_rule_name=self.name,
+ start_ip_address=self.start_ip_address,
+ end_ip_address=self.end_ip_address)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the MariaDB firewall rule instance.')
+ self.fail("Error creating the MariaDB firewall rule instance: {0}".format(str(exc)))
+ return response.as_dict()
+
+ def delete_firewallrule(self):
+ '''
+ Deletes specified MariaDB firewall rule instance in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the MariaDB firewall rule instance {0}".format(self.name))
+ try:
+ response = self.mariadb_client.firewall_rules.delete(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ firewall_rule_name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the MariaDB firewall rule instance.')
+ self.fail("Error deleting the MariaDB firewall rule instance: {0}".format(str(e)))
+
+ return True
+
+ def get_firewallrule(self):
+ '''
+ Gets the properties of the specified MariaDB firewall rule.
+
+ :return: deserialized MariaDB firewall rule instance state dictionary
+ '''
+ self.log("Checking if the MariaDB firewall rule instance {0} is present".format(self.name))
+ found = False
+ try:
+ response = self.mariadb_client.firewall_rules.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ firewall_rule_name=self.name)
+ found = True
+ self.log("Response : {0}".format(response))
+ self.log("MariaDB firewall rule instance : {0} found".format(response.name))
+ except CloudError as e:
+ self.log('Did not find the MariaDB firewall rule instance.')
+ if found is True:
+ return response.as_dict()
+
+ return False
+
+
+def main():
+ """Main execution"""
+ AzureRMMariaDbFirewallRule()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py
new file mode 100644
index 0000000000..45557b5113
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbfirewallrule_info
+version_added: "2.9"
+short_description: Get Azure MariaDB Firewall Rule facts
+description:
+ - Get facts of Azure MariaDB Firewall Rule.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group.
+ required: True
+ type: str
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ type: str
+ name:
+ description:
+ - The name of the server firewall rule.
+ type: str
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Get instance of MariaDB Firewall Rule
+ azure_rm_mariadbfirewallrule_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+ name: firewall_rule_name
+
+ - name: List instances of MariaDB Firewall Rule
+ azure_rm_mariadbfirewallrule_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+'''
+
+RETURN = '''
+rules:
+ description:
+ - A list of dictionaries containing facts for MariaDB Firewall Rule.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire
+ wallRules/rule1"
+ server_name:
+ description:
+ - The name of the server.
+ returned: always
+ type: str
+ sample: testserver
+ name:
+ description:
+ - Resource name.
+ returned: always
+ type: str
+ sample: rule1
+ start_ip_address:
+ description:
+ - The start IP address of the MariaDB firewall rule.
+ returned: always
+ type: str
+ sample: 10.0.0.16
+ end_ip_address:
+ description:
+ - The end IP address of the MariaDB firewall rule.
+ returned: always
+ type: str
+ sample: 10.0.0.18
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_operation import AzureOperationPoller
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMMariaDbFirewallRuleInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.mgmt_client = None
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ super(AzureRMMariaDbFirewallRuleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_mariadbfirewallrule_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_mariadbfirewallrule_facts' module has been renamed to 'azure_rm_mariadbfirewallrule_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ if (self.name is not None):
+ self.results['rules'] = self.get()
+ else:
+ self.results['rules'] = self.list_by_server()
+ return self.results
+
+ def get(self):
+ response = None
+ results = []
+ try:
+ response = self.mgmt_client.firewall_rules.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ firewall_rule_name=self.name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for FirewallRules.')
+
+ if response is not None:
+ results.append(self.format_item(response))
+
+ return results
+
+ def list_by_server(self):
+ response = None
+ results = []
+ try:
+ response = self.mgmt_client.firewall_rules.list_by_server(resource_group_name=self.resource_group,
+ server_name=self.server_name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for FirewallRules.')
+
+ if response is not None:
+ for item in response:
+ results.append(self.format_item(item))
+
+ return results
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'id': d['id'],
+ 'server_name': self.server_name,
+ 'name': d['name'],
+ 'start_ip_address': d['start_ip_address'],
+ 'end_ip_address': d['end_ip_address']
+ }
+ return d
+
+
+def main():
+ AzureRMMariaDbFirewallRuleInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbserver.py b/test/support/integration/plugins/modules/azure_rm_mariadbserver.py
new file mode 100644
index 0000000000..30a2998844
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbserver.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbserver
+version_added: "2.8"
+short_description: Manage MariaDB Server instance
+description:
+ - Create, update and delete instance of MariaDB Server.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ name:
+ description:
+ - The name of the server.
+ required: True
+ sku:
+ description:
+ - The SKU (pricing tier) of the server.
+ suboptions:
+ name:
+ description:
+ - The name of the SKU, typically, tier + family + cores, for example C(B_Gen4_1), C(GP_Gen5_8).
+ tier:
+ description:
+ - The tier of the particular SKU, for example C(Basic).
+ choices:
+ - basic
+ - standard
+ capacity:
+ description:
+ - The scale up/out capacity, representing server's compute units.
+ type: int
+ size:
+ description:
+ - The size code, to be interpreted by resource as appropriate.
+ location:
+ description:
+ - Resource location. If not set, location from the resource group will be used as default.
+ storage_mb:
+ description:
+ - The maximum storage allowed for a server.
+ type: int
+ version:
+ description:
+ - Server version.
+ choices:
+ - 10.2
+ enforce_ssl:
+ description:
+ - Enable SSL enforcement.
+ type: bool
+ default: False
+ admin_username:
+ description:
+ - The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation).
+ admin_password:
+ description:
+ - The password of the administrator login.
+ create_mode:
+ description:
+ - Create mode of SQL Server.
+ default: Default
+ state:
+ description:
+ - Assert the state of the MariaDB Server. Use C(present) to create or update a server and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Create (or update) MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: myResourceGroup
+ name: testserver
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: eastus
+ storage_mb: 1024
+ enforce_ssl: True
+ version: 10.2
+ admin_username: cloudsa
+ admin_password: password
+'''
+
+RETURN = '''
+id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/mariadbsrv1b6dd89593
+version:
+ description:
+ - Server version. Possible values include C(10.2).
+ returned: always
+ type: str
+ sample: 10.2
+state:
+ description:
+ - A state of a server that is visible to user. Possible values include C(Ready), C(Dropping), C(Disabled).
+ returned: always
+ type: str
+ sample: Ready
+fully_qualified_domain_name:
+ description:
+ - The fully qualified domain name of a server.
+ returned: always
+ type: str
+ sample: mariadbsrv1b6dd89593.mariadb.database.azure.com
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class Actions:
+ NoAction, Create, Update, Delete = range(4)
+
+
+class AzureRMMariaDbServers(AzureRMModuleBase):
+ """Configuration class for an Azure RM MariaDB Server resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ sku=dict(
+ type='dict'
+ ),
+ location=dict(
+ type='str'
+ ),
+ storage_mb=dict(
+ type='int'
+ ),
+ version=dict(
+ type='str',
+ choices=['10.2']
+ ),
+ enforce_ssl=dict(
+ type='bool',
+ default=False
+ ),
+ create_mode=dict(
+ type='str',
+ default='Default'
+ ),
+ admin_username=dict(
+ type='str'
+ ),
+ admin_password=dict(
+ type='str',
+ no_log=True
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.name = None
+ self.parameters = dict()
+ self.tags = None
+
+ self.results = dict(changed=False)
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ super(AzureRMMariaDbServers, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=True)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs[key] is not None:
+ if key == "sku":
+ ev = kwargs[key]
+ if 'tier' in ev:
+ if ev['tier'] == 'basic':
+ ev['tier'] = 'Basic'
+ elif ev['tier'] == 'standard':
+ ev['tier'] = 'Standard'
+ self.parameters["sku"] = ev
+ elif key == "location":
+ self.parameters["location"] = kwargs[key]
+ elif key == "storage_mb":
+ self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})["storage_mb"] = kwargs[key]
+ elif key == "version":
+ self.parameters.setdefault("properties", {})["version"] = kwargs[key]
+ elif key == "enforce_ssl":
+ self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[key] else 'Disabled'
+ elif key == "create_mode":
+ self.parameters.setdefault("properties", {})["create_mode"] = kwargs[key]
+ elif key == "admin_username":
+ self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key]
+ elif key == "admin_password":
+ self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key]
+
+ old_response = None
+ response = None
+
+ resource_group = self.get_resource_group(self.resource_group)
+
+ if "location" not in self.parameters:
+ self.parameters["location"] = resource_group.location
+
+ old_response = self.get_mariadbserver()
+
+ if not old_response:
+ self.log("MariaDB Server instance doesn't exist")
+ if self.state == 'absent':
+ self.log("Old instance didn't exist")
+ else:
+ self.to_do = Actions.Create
+ else:
+ self.log("MariaDB Server instance already exists")
+ if self.state == 'absent':
+ self.to_do = Actions.Delete
+ elif self.state == 'present':
+ self.log("Need to check if MariaDB Server instance has to be deleted or may be updated")
+ update_tags, newtags = self.update_tags(old_response.get('tags', {}))
+ if update_tags:
+ self.tags = newtags
+ self.to_do = Actions.Update
+
+ if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
+ self.log("Need to Create / Update the MariaDB Server instance")
+
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+
+ response = self.create_update_mariadbserver()
+
+ if not old_response:
+ self.results['changed'] = True
+ else:
+ self.results['changed'] = old_response.__ne__(response)
+ self.log("Creation / Update done")
+ elif self.to_do == Actions.Delete:
+ self.log("MariaDB Server instance deleted")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_mariadbserver()
+ # make sure instance is actually deleted, for some Azure resources, instance is hanging around
+ # for some time after deletion -- this should be really fixed in Azure
+ while self.get_mariadbserver():
+ time.sleep(20)
+ else:
+ self.log("MariaDB Server instance unchanged")
+ self.results['changed'] = False
+ response = old_response
+
+ if response:
+ self.results["id"] = response["id"]
+ self.results["version"] = response["version"]
+ self.results["state"] = response["user_visible_state"]
+ self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"]
+
+ return self.results
+
+ def create_update_mariadbserver(self):
+ '''
+ Creates or updates MariaDB Server with the specified configuration.
+
+ :return: deserialized MariaDB Server instance state dictionary
+ '''
+ self.log("Creating / Updating the MariaDB Server instance {0}".format(self.name))
+
+ try:
+ self.parameters['tags'] = self.tags
+ if self.to_do == Actions.Create:
+ response = self.mariadb_client.servers.create(resource_group_name=self.resource_group,
+ server_name=self.name,
+ parameters=self.parameters)
+ else:
+ # structure of parameters for update must be changed
+ self.parameters.update(self.parameters.pop("properties", {}))
+ response = self.mariadb_client.servers.update(resource_group_name=self.resource_group,
+ server_name=self.name,
+ parameters=self.parameters)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the MariaDB Server instance.')
+ self.fail("Error creating the MariaDB Server instance: {0}".format(str(exc)))
+ return response.as_dict()
+
+ def delete_mariadbserver(self):
+ '''
+ Deletes specified MariaDB Server instance in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the MariaDB Server instance {0}".format(self.name))
+ try:
+ response = self.mariadb_client.servers.delete(resource_group_name=self.resource_group,
+ server_name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the MariaDB Server instance.')
+ self.fail("Error deleting the MariaDB Server instance: {0}".format(str(e)))
+
+ return True
+
+ def get_mariadbserver(self):
+ '''
+ Gets the properties of the specified MariaDB Server.
+
+ :return: deserialized MariaDB Server instance state dictionary
+ '''
+ self.log("Checking if the MariaDB Server instance {0} is present".format(self.name))
+ found = False
+ try:
+ response = self.mariadb_client.servers.get(resource_group_name=self.resource_group,
+ server_name=self.name)
+ found = True
+ self.log("Response : {0}".format(response))
+ self.log("MariaDB Server instance : {0} found".format(response.name))
+ except CloudError as e:
+ self.log('Did not find the MariaDB Server instance.')
+ if found is True:
+ return response.as_dict()
+
+ return False
+
+
+def main():
+ """Main execution"""
+ AzureRMMariaDbServers()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py
new file mode 100644
index 0000000000..ffe52c5d37
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbserver_info
+version_added: "2.9"
+short_description: Get Azure MariaDB Server facts
+description:
+ - Get facts of MariaDB Server.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ type: str
+ name:
+ description:
+ - The name of the server.
+ type: str
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+ type: list
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Get instance of MariaDB Server
+ azure_rm_mariadbserver_info:
+ resource_group: myResourceGroup
+ name: server_name
+
+ - name: List instances of MariaDB Server
+ azure_rm_mariadbserver_info:
+ resource_group: myResourceGroup
+'''
+
+RETURN = '''
+servers:
+ description:
+ - A list of dictionaries containing facts for MariaDB servers.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myabdud1223
+ resource_group:
+ description:
+ - Resource group name.
+ returned: always
+ type: str
+ sample: myResourceGroup
+ name:
+ description:
+ - Resource name.
+ returned: always
+ type: str
+ sample: myabdud1223
+ location:
+ description:
+ - The location the resource resides in.
+ returned: always
+ type: str
+ sample: eastus
+ sku:
+ description:
+ - The SKU of the server.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description:
+ - The name of the SKU.
+ returned: always
+ type: str
+ sample: GP_Gen4_2
+ tier:
+ description:
+ - The tier of the particular SKU.
+ returned: always
+ type: str
+ sample: GeneralPurpose
+ capacity:
+ description:
+ - The scale capacity.
+ returned: always
+ type: int
+ sample: 2
+ storage_mb:
+ description:
+ - The maximum storage allowed for a server.
+ returned: always
+ type: int
+ sample: 128000
+ enforce_ssl:
+ description:
+ - Enable SSL enforcement.
+ returned: always
+ type: bool
+ sample: False
+ admin_username:
+ description:
+ - The administrator's login name of a server.
+ returned: always
+ type: str
+ sample: serveradmin
+ version:
+ description:
+ - Server version.
+ returned: always
+ type: str
+ sample: "9.6"
+ user_visible_state:
+ description:
+ - A state of a server that is visible to user.
+ returned: always
+ type: str
+ sample: Ready
+ fully_qualified_domain_name:
+ description:
+ - The fully qualified domain name of a server.
+ returned: always
+ type: str
+ sample: myabdud1223.mys.database.azure.com
+ tags:
+ description:
+ - Tags assigned to the resource. Dictionary of string:string pairs.
+ type: dict
+ sample: { tag1: abc }
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMMariaDbServerInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str'
+ ),
+ tags=dict(
+ type='list'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.name = None
+ self.tags = None
+ super(AzureRMMariaDbServerInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_mariadbserver_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_mariadbserver_facts' module has been renamed to 'azure_rm_mariadbserver_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if (self.resource_group is not None and
+ self.name is not None):
+ self.results['servers'] = self.get()
+ elif (self.resource_group is not None):
+ self.results['servers'] = self.list_by_resource_group()
+ return self.results
+
+ def get(self):
+ response = None
+ results = []
+ try:
+ response = self.mariadb_client.servers.get(resource_group_name=self.resource_group,
+ server_name=self.name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for MariaDB Server.')
+
+ if response and self.has_tags(response.tags, self.tags):
+ results.append(self.format_item(response))
+
+ return results
+
+ def list_by_resource_group(self):
+ response = None
+ results = []
+ try:
+ response = self.mariadb_client.servers.list_by_resource_group(resource_group_name=self.resource_group)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for MariaDB Servers.')
+
+ if response is not None:
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(self.format_item(item))
+
+ return results
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'id': d['id'],
+ 'resource_group': self.resource_group,
+ 'name': d['name'],
+ 'sku': d['sku'],
+ 'location': d['location'],
+ 'storage_mb': d['storage_profile']['storage_mb'],
+ 'version': d['version'],
+ 'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'),
+ 'admin_username': d['administrator_login'],
+ 'user_visible_state': d['user_visible_state'],
+ 'fully_qualified_domain_name': d['fully_qualified_domain_name'],
+ 'tags': d.get('tags')
+ }
+
+ return d
+
+
+def main():
+ AzureRMMariaDbServerInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_resource.py b/test/support/integration/plugins/modules/azure_rm_resource.py
new file mode 100644
index 0000000000..6ea3e3bb9b
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_resource.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_resource
+version_added: "2.6"
+short_description: Create any Azure resource
+description:
+ - Create, update or delete any Azure resource using Azure REST API.
+ - This module gives access to resources that are not supported via Ansible modules.
+ - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API.
+
+options:
+ url:
+ description:
+ - Azure RM Resource URL.
+ api_version:
+ description:
+ - Specific API version to be used.
+ provider:
+ description:
+ - Provider type.
+ - Required if URL is not specified.
+ resource_group:
+ description:
+ - Resource group to be used.
+ - Required if URL is not specified.
+ resource_type:
+ description:
+ - Resource type.
+ - Required if URL is not specified.
+ resource_name:
+ description:
+ - Resource name.
+ - Required if URL Is not specified.
+ subresource:
+ description:
+ - List of subresources.
+ suboptions:
+ namespace:
+ description:
+ - Subresource namespace.
+ type:
+ description:
+ - Subresource type.
+ name:
+ description:
+ - Subresource name.
+ body:
+ description:
+ - The body of the HTTP request/response to the web service.
+ method:
+ description:
+ - The HTTP method of the request or response. It must be uppercase.
+ choices:
+ - GET
+ - PUT
+ - POST
+ - HEAD
+ - PATCH
+ - DELETE
+ - MERGE
+ default: "PUT"
+ status_code:
+ description:
+ - A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes.
+ type: list
+ default: [ 200, 201, 202 ]
+ idempotency:
+ description:
+ - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body).
+ default: no
+ type: bool
+ polling_timeout:
+ description:
+ - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body).
+ default: 0
+ type: int
+ version_added: "2.8"
+ polling_interval:
+ description:
+ - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body).
+ default: 60
+ type: int
+ version_added: "2.8"
+ state:
+ description:
+ - Assert the state of the resource. Use C(present) to create or update resource or C(absent) to delete resource.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+
+'''
+
+EXAMPLES = '''
+ - name: Update scaleset info using azure_rm_resource
+ azure_rm_resource:
+ resource_group: myResourceGroup
+ provider: compute
+ resource_type: virtualmachinescalesets
+ resource_name: myVmss
+ api_version: "2017-12-01"
+ body: { body }
+'''
+
+RETURN = '''
+response:
+ description:
+ - Response specific to resource type.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Storage/storageAccounts/staccb57dc95183"
+ kind:
+ description:
+ - The kind of storage.
+ type: str
+ returned: always
+ sample: Storage
+ location:
+ description:
+ - The resource location, defaults to location of the resource group.
+ type: str
+ returned: always
+ sample: eastus
+ name:
+ description:
+ The storage account name.
+ type: str
+ returned: always
+ sample: staccb57dc95183
+ properties:
+ description:
+ - The storage account's related properties.
+ type: dict
+ returned: always
+ sample: {
+ "creationTime": "2019-06-13T06:34:33.0996676Z",
+ "encryption": {
+ "keySource": "Microsoft.Storage",
+ "services": {
+ "blob": {
+ "enabled": true,
+ "lastEnabledTime": "2019-06-13T06:34:33.1934074Z"
+ },
+ "file": {
+ "enabled": true,
+ "lastEnabledTime": "2019-06-13T06:34:33.1934074Z"
+ }
+ }
+ },
+ "networkAcls": {
+ "bypass": "AzureServices",
+ "defaultAction": "Allow",
+ "ipRules": [],
+ "virtualNetworkRules": []
+ },
+ "primaryEndpoints": {
+ "blob": "https://staccb57dc95183.blob.core.windows.net/",
+ "file": "https://staccb57dc95183.file.core.windows.net/",
+ "queue": "https://staccb57dc95183.queue.core.windows.net/",
+ "table": "https://staccb57dc95183.table.core.windows.net/"
+ },
+ "primaryLocation": "eastus",
+ "provisioningState": "Succeeded",
+ "secondaryLocation": "westus",
+ "statusOfPrimary": "available",
+ "statusOfSecondary": "available",
+ "supportsHttpsTrafficOnly": false
+ }
+ sku:
+ description:
+ - The storage account SKU.
+ type: dict
+ returned: always
+ sample: {
+ "name": "Standard_GRS",
+ "tier": "Standard"
+ }
+ tags:
+ description:
+ - Resource tags.
+ type: dict
+ returned: always
+ sample: { 'key1': 'value1' }
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: "Microsoft.Storage/storageAccounts"
+
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+from ansible.module_utils.azure_rm_common_rest import GenericRestClient
+from ansible.module_utils.common.dict_transformations import dict_merge
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.service_client import ServiceClient
+ from msrestazure.tools import resource_id, is_valid_resource_id
+ import json
+
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMResource(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ url=dict(
+ type='str'
+ ),
+ provider=dict(
+ type='str',
+ ),
+ resource_group=dict(
+ type='str',
+ ),
+ resource_type=dict(
+ type='str',
+ ),
+ resource_name=dict(
+ type='str',
+ ),
+ subresource=dict(
+ type='list',
+ default=[]
+ ),
+ api_version=dict(
+ type='str'
+ ),
+ method=dict(
+ type='str',
+ default='PUT',
+ choices=["GET", "PUT", "POST", "HEAD", "PATCH", "DELETE", "MERGE"]
+ ),
+ body=dict(
+ type='raw'
+ ),
+ status_code=dict(
+ type='list',
+ default=[200, 201, 202]
+ ),
+ idempotency=dict(
+ type='bool',
+ default=False
+ ),
+ polling_timeout=dict(
+ type='int',
+ default=0
+ ),
+ polling_interval=dict(
+ type='int',
+ default=60
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False,
+ response=None
+ )
+ self.mgmt_client = None
+ self.url = None
+ self.api_version = None
+ self.provider = None
+ self.resource_group = None
+ self.resource_type = None
+ self.resource_name = None
+ self.subresource_type = None
+ self.subresource_name = None
+ self.subresource = []
+ self.method = None
+ self.status_code = []
+ self.idempotency = False
+ self.polling_timeout = None
+ self.polling_interval = None
+ self.state = None
+ self.body = None
+ super(AzureRMResource, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ if self.state == 'absent':
+ self.method = 'DELETE'
+ self.status_code.append(204)
+
+ if self.url is None:
+ orphan = None
+ rargs = dict()
+ rargs['subscription'] = self.subscription_id
+ rargs['resource_group'] = self.resource_group
+ if not (self.provider is None or self.provider.lower().startswith('.microsoft')):
+ rargs['namespace'] = "Microsoft." + self.provider
+ else:
+ rargs['namespace'] = self.provider
+
+ if self.resource_type is not None and self.resource_name is not None:
+ rargs['type'] = self.resource_type
+ rargs['name'] = self.resource_name
+ for i in range(len(self.subresource)):
+ resource_ns = self.subresource[i].get('namespace', None)
+ resource_type = self.subresource[i].get('type', None)
+ resource_name = self.subresource[i].get('name', None)
+ if resource_type is not None and resource_name is not None:
+ rargs['child_namespace_' + str(i + 1)] = resource_ns
+ rargs['child_type_' + str(i + 1)] = resource_type
+ rargs['child_name_' + str(i + 1)] = resource_name
+ else:
+ orphan = resource_type
+ else:
+ orphan = self.resource_type
+
+ self.url = resource_id(**rargs)
+
+ if orphan is not None:
+ self.url += '/' + orphan
+
+ # if api_version was not specified, get latest one
+ if not self.api_version:
+ try:
+ # extract provider and resource type
+ if "/providers/" in self.url:
+ provider = self.url.split("/providers/")[1].split("/")[0]
+ resourceType = self.url.split(provider + "/")[1].split("/")[0]
+ url = "/subscriptions/" + self.subscription_id + "/providers/" + provider
+ api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text)
+ for rt in api_versions['resourceTypes']:
+ if rt['resourceType'].lower() == resourceType.lower():
+ self.api_version = rt['apiVersions'][0]
+ break
+ else:
+ # if there's no provider in API version, assume Microsoft.Resources
+ self.api_version = '2018-05-01'
+ if not self.api_version:
+ self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType))
+ except Exception as exc:
+ self.fail("Failed to obtain API version: {0}".format(str(exc)))
+
+ query_parameters = {}
+ query_parameters['api-version'] = self.api_version
+
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+
+ needs_update = True
+ response = None
+
+ if self.idempotency:
+ original = self.mgmt_client.query(self.url, "GET", query_parameters, None, None, [200, 404], 0, 0)
+
+ if original.status_code == 404:
+ if self.state == 'absent':
+ needs_update = False
+ else:
+ try:
+ response = json.loads(original.text)
+ needs_update = (dict_merge(response, self.body) != response)
+ except Exception:
+ pass
+
+ if needs_update:
+ response = self.mgmt_client.query(self.url,
+ self.method,
+ query_parameters,
+ header_parameters,
+ self.body,
+ self.status_code,
+ self.polling_timeout,
+ self.polling_interval)
+ if self.state == 'present':
+ try:
+ response = json.loads(response.text)
+ except Exception:
+ response = response.text
+ else:
+ response = None
+
+ self.results['response'] = response
+ self.results['changed'] = needs_update
+
+ return self.results
+
+
+def main():
+ AzureRMResource()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_resource_info.py b/test/support/integration/plugins/modules/azure_rm_resource_info.py
new file mode 100644
index 0000000000..354cd79578
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_resource_info.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_resource_info
+version_added: "2.9"
+short_description: Generic facts of Azure resources
+description:
+ - Obtain facts of any resource using Azure REST API.
+ - This module gives access to resources that are not supported via Ansible modules.
+ - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API.
+
+options:
+ url:
+ description:
+ - Azure RM Resource URL.
+ api_version:
+ description:
+ - Specific API version to be used.
+ provider:
+ description:
+ - Provider type, should be specified in no URL is given.
+ resource_group:
+ description:
+ - Resource group to be used.
+ - Required if URL is not specified.
+ resource_type:
+ description:
+ - Resource type.
+ resource_name:
+ description:
+ - Resource name.
+ subresource:
+ description:
+ - List of subresources.
+ suboptions:
+ namespace:
+ description:
+ - Subresource namespace.
+ type:
+ description:
+ - Subresource type.
+ name:
+ description:
+ - Subresource name.
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+
+'''
+
+EXAMPLES = '''
+ - name: Get scaleset info
+ azure_rm_resource_info:
+ resource_group: myResourceGroup
+ provider: compute
+ resource_type: virtualmachinescalesets
+ resource_name: myVmss
+ api_version: "2017-12-01"
+
+ - name: Query all the resources in the resource group
+ azure_rm_resource_info:
+ resource_group: "{{ resource_group }}"
+ resource_type: resources
+'''
+
+RETURN = '''
+response:
+ description:
+ - Response specific to resource type.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Id of the Azure resource.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/virtualMachines/myVM"
+ location:
+ description:
+ - Resource location.
+ type: str
+ returned: always
+ sample: eastus
+ name:
+ description:
+ - Resource name.
+ type: str
+ returned: always
+ sample: myVM
+ properties:
+ description:
+ - Specifies the virtual machine's property.
+ type: complex
+ returned: always
+ contains:
+ diagnosticsProfile:
+ description:
+ - Specifies the boot diagnostic settings state.
+ type: complex
+ returned: always
+ contains:
+ bootDiagnostics:
+ description:
+ - A debugging feature, which to view Console Output and Screenshot to diagnose VM status.
+ type: dict
+ returned: always
+ sample: {
+ "enabled": true,
+ "storageUri": "https://vxisurgdiag.blob.core.windows.net/"
+ }
+ hardwareProfile:
+ description:
+ - Specifies the hardware settings for the virtual machine.
+ type: dict
+ returned: always
+ sample: {
+ "vmSize": "Standard_D2s_v3"
+ }
+ networkProfile:
+ description:
+ - Specifies the network interfaces of the virtual machine.
+ type: complex
+ returned: always
+ contains:
+ networkInterfaces:
+ description:
+ - Describes a network interface reference.
+ type: list
+ returned: always
+ sample:
+ - {
+ "id": "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/networkInterfaces/myvm441"
+ }
+ osProfile:
+ description:
+ - Specifies the operating system settings for the virtual machine.
+ type: complex
+ returned: always
+ contains:
+ adminUsername:
+ description:
+ - Specifies the name of the administrator account.
+ type: str
+ returned: always
+ sample: azureuser
+ allowExtensionOperations:
+ description:
+ - Specifies whether extension operations should be allowed on the virtual machine.
+ - This may only be set to False when no extensions are present on the virtual machine.
+ type: bool
+ returned: always
+ sample: true
+ computerName:
+ description:
+ - Specifies the host OS name of the virtual machine.
+ type: str
+ returned: always
+ sample: myVM
+ requireGuestProvisionSignale:
+ description:
+ - Specifies the host require guest provision signal or not.
+ type: bool
+ returned: always
+ sample: true
+ secrets:
+ description:
+ - Specifies set of certificates that should be installed onto the virtual machine.
+ type: list
+ returned: always
+ sample: []
+ linuxConfiguration:
+ description:
+ - Specifies the Linux operating system settings on the virtual machine.
+ type: dict
+ returned: when OS type is Linux
+ sample: {
+ "disablePasswordAuthentication": false,
+ "provisionVMAgent": true
+ }
+ provisioningState:
+ description:
+ - The provisioning state.
+ type: str
+ returned: always
+ sample: Succeeded
+ vmID:
+ description:
+ - Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure laaS VMs SMBIOS.
+ - It can be read using platform BIOS commands.
+ type: str
+ returned: always
+ sample: "eb86d9bb-6725-4787-a487-2e497d5b340c"
+ storageProfile:
+ description:
+ - Specifies the storage account type for the managed disk.
+ type: complex
+ returned: always
+ contains:
+ dataDisks:
+ description:
+ - Specifies the parameters that are used to add a data disk to virtual machine.
+ type: list
+ returned: always
+ sample:
+ - {
+ "caching": "None",
+ "createOption": "Attach",
+ "diskSizeGB": 1023,
+ "lun": 2,
+ "managedDisk": {
+ "id": "/subscriptions/xxxx....xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk2",
+ "storageAccountType": "StandardSSD_LRS"
+ },
+ "name": "testdisk2"
+ }
+ - {
+ "caching": "None",
+ "createOption": "Attach",
+ "diskSizeGB": 1023,
+ "lun": 1,
+ "managedDisk": {
+ "id": "/subscriptions/xxxx...xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk3",
+ "storageAccountType": "StandardSSD_LRS"
+ },
+ "name": "testdisk3"
+ }
+
+ imageReference:
+ description:
+ - Specifies information about the image to use.
+ type: dict
+ returned: always
+ sample: {
+ "offer": "UbuntuServer",
+ "publisher": "Canonical",
+ "sku": "18.04-LTS",
+ "version": "latest"
+ }
+ osDisk:
+ description:
+ - Specifies information about the operating system disk used by the virtual machine.
+ type: dict
+ returned: always
+ sample: {
+ "caching": "ReadWrite",
+ "createOption": "FromImage",
+ "diskSizeGB": 30,
+ "managedDisk": {
+ "id": "/subscriptions/xxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/disks/myVM_disk1_xxx",
+ "storageAccountType": "Premium_LRS"
+ },
+ "name": "myVM_disk1_xxx",
+ "osType": "Linux"
+ }
+ type:
+ description:
+ - The type of identity used for the virtual machine.
+ type: str
+ returned: always
+ sample: "Microsoft.Compute/virtualMachines"
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+from ansible.module_utils.azure_rm_common_rest import GenericRestClient
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.service_client import ServiceClient
+ from msrestazure.tools import resource_id, is_valid_resource_id
+ import json
+
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMResourceInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ url=dict(
+ type='str'
+ ),
+ provider=dict(
+ type='str'
+ ),
+ resource_group=dict(
+ type='str'
+ ),
+ resource_type=dict(
+ type='str'
+ ),
+ resource_name=dict(
+ type='str'
+ ),
+ subresource=dict(
+ type='list',
+ default=[]
+ ),
+ api_version=dict(
+ type='str'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ response=[]
+ )
+ self.mgmt_client = None
+ self.url = None
+ self.api_version = None
+ self.provider = None
+ self.resource_group = None
+ self.resource_type = None
+ self.resource_name = None
+ self.subresource = []
+ super(AzureRMResourceInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_resource_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_resource_facts' module has been renamed to 'azure_rm_resource_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ if self.url is None:
+ orphan = None
+ rargs = dict()
+ rargs['subscription'] = self.subscription_id
+ rargs['resource_group'] = self.resource_group
+ if not (self.provider is None or self.provider.lower().startswith('.microsoft')):
+ rargs['namespace'] = "Microsoft." + self.provider
+ else:
+ rargs['namespace'] = self.provider
+
+ if self.resource_type is not None and self.resource_name is not None:
+ rargs['type'] = self.resource_type
+ rargs['name'] = self.resource_name
+ for i in range(len(self.subresource)):
+ resource_ns = self.subresource[i].get('namespace', None)
+ resource_type = self.subresource[i].get('type', None)
+ resource_name = self.subresource[i].get('name', None)
+ if resource_type is not None and resource_name is not None:
+ rargs['child_namespace_' + str(i + 1)] = resource_ns
+ rargs['child_type_' + str(i + 1)] = resource_type
+ rargs['child_name_' + str(i + 1)] = resource_name
+ else:
+ orphan = resource_type
+ else:
+ orphan = self.resource_type
+
+ self.url = resource_id(**rargs)
+
+ if orphan is not None:
+ self.url += '/' + orphan
+
+ # if api_version was not specified, get latest one
+ if not self.api_version:
+ try:
+ # extract provider and resource type
+ if "/providers/" in self.url:
+ provider = self.url.split("/providers/")[1].split("/")[0]
+ resourceType = self.url.split(provider + "/")[1].split("/")[0]
+ url = "/subscriptions/" + self.subscription_id + "/providers/" + provider
+ api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text)
+ for rt in api_versions['resourceTypes']:
+ if rt['resourceType'].lower() == resourceType.lower():
+ self.api_version = rt['apiVersions'][0]
+ break
+ else:
+ # if there's no provider in API version, assume Microsoft.Resources
+ self.api_version = '2018-05-01'
+ if not self.api_version:
+ self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType))
+ except Exception as exc:
+ self.fail("Failed to obtain API version: {0}".format(str(exc)))
+
+ self.results['url'] = self.url
+
+ query_parameters = {}
+ query_parameters['api-version'] = self.api_version
+
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ skiptoken = None
+
+ while True:
+ if skiptoken:
+ query_parameters['skiptoken'] = skiptoken
+ response = self.mgmt_client.query(self.url, "GET", query_parameters, header_parameters, None, [200, 404], 0, 0)
+ try:
+ response = json.loads(response.text)
+ if isinstance(response, dict):
+ if response.get('value'):
+ self.results['response'] = self.results['response'] + response['value']
+ skiptoken = response.get('nextLink')
+ else:
+ self.results['response'] = self.results['response'] + [response]
+ except Exception as e:
+ self.fail('Failed to parse response: ' + str(e))
+ if not skiptoken:
+ break
+ return self.results
+
+
+def main():
+ AzureRMResourceInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_storageaccount.py b/test/support/integration/plugins/modules/azure_rm_storageaccount.py
new file mode 100644
index 0000000000..d4158bbda8
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_storageaccount.py
@@ -0,0 +1,684 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
+# Chris Houseknecht, <house@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_storageaccount
+version_added: "2.1"
+short_description: Manage Azure storage accounts
+description:
+ - Create, update or delete a storage account.
+options:
+ resource_group:
+ description:
+ - Name of the resource group to use.
+ required: true
+ aliases:
+ - resource_group_name
+ name:
+ description:
+ - Name of the storage account to update or create.
+ state:
+ description:
+ - State of the storage account. Use C(present) to create or update a storage account and use C(absent) to delete an account.
+ default: present
+ choices:
+ - absent
+ - present
+ location:
+ description:
+ - Valid Azure location. Defaults to location of the resource group.
+ account_type:
+ description:
+ - Type of storage account. Required when creating a storage account.
+ - C(Standard_ZRS) and C(Premium_LRS) accounts cannot be changed to other account types.
+ - Other account types cannot be changed to C(Standard_ZRS) or C(Premium_LRS).
+ choices:
+ - Premium_LRS
+ - Standard_GRS
+ - Standard_LRS
+ - StandardSSD_LRS
+ - Standard_RAGRS
+ - Standard_ZRS
+ - Premium_ZRS
+ aliases:
+ - type
+ custom_domain:
+ description:
+ - User domain assigned to the storage account.
+ - Must be a dictionary with I(name) and I(use_sub_domain) keys where I(name) is the CNAME source.
+ - Only one custom domain is supported per storage account at this time.
+ - To clear the existing custom domain, use an empty string for the custom domain name property.
+ - Can be added to an existing storage account. Will be ignored during storage account creation.
+ aliases:
+ - custom_dns_domain_suffix
+ kind:
+ description:
+ - The kind of storage.
+ default: 'Storage'
+ choices:
+ - Storage
+ - StorageV2
+ - BlobStorage
+ version_added: "2.2"
+ access_tier:
+ description:
+ - The access tier for this storage account. Required when I(kind=BlobStorage).
+ choices:
+ - Hot
+ - Cool
+ version_added: "2.4"
+ force_delete_nonempty:
+ description:
+ - Attempt deletion if resource already exists and cannot be updated.
+ type: bool
+ aliases:
+ - force
+ https_only:
+ description:
+ - Allows https traffic only to storage service when set to C(true).
+ type: bool
+ version_added: "2.8"
+ blob_cors:
+ description:
+ - Specifies CORS rules for the Blob service.
+ - You can include up to five CorsRule elements in the request.
+ - If no blob_cors elements are included in the argument list, nothing about CORS will be changed.
+ - If you want to delete all CORS rules and disable CORS for the Blob service, explicitly set I(blob_cors=[]).
+ type: list
+ version_added: "2.8"
+ suboptions:
+ allowed_origins:
+ description:
+ - A list of origin domains that will be allowed via CORS, or "*" to allow all domains.
+ type: list
+ required: true
+ allowed_methods:
+ description:
+ - A list of HTTP methods that are allowed to be executed by the origin.
+ type: list
+ required: true
+ max_age_in_seconds:
+ description:
+ - The number of seconds that the client/browser should cache a preflight response.
+ type: int
+ required: true
+ exposed_headers:
+ description:
+ - A list of response headers to expose to CORS clients.
+ type: list
+ required: true
+ allowed_headers:
+ description:
+ - A list of headers allowed to be part of the cross-origin request.
+ type: list
+ required: true
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+ - Matt Davis (@nitzmahone)
+'''
+
+EXAMPLES = '''
+ - name: remove account, if it exists
+ azure_rm_storageaccount:
+ resource_group: myResourceGroup
+ name: clh0002
+ state: absent
+
+ - name: create an account
+ azure_rm_storageaccount:
+ resource_group: myResourceGroup
+ name: clh0002
+ type: Standard_RAGRS
+ tags:
+ testing: testing
+ delete: on-exit
+
+ - name: create an account with blob CORS
+ azure_rm_storageaccount:
+ resource_group: myResourceGroup
+ name: clh002
+ type: Standard_RAGRS
+ blob_cors:
+ - allowed_origins:
+ - http://www.example.com/
+ allowed_methods:
+ - GET
+ - POST
+ allowed_headers:
+ - x-ms-meta-data*
+ - x-ms-meta-target*
+ - x-ms-meta-abc
+ exposed_headers:
+ - x-ms-meta-*
+ max_age_in_seconds: 200
+'''
+
+
+RETURN = '''
+state:
+ description:
+ - Current state of the storage account.
+ returned: always
+ type: complex
+ contains:
+ account_type:
+ description:
+ - Type of storage account.
+ returned: always
+ type: str
+ sample: Standard_RAGRS
+ custom_domain:
+ description:
+ - User domain assigned to the storage account.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description:
+ - CNAME source.
+ returned: always
+ type: str
+ sample: testaccount
+ use_sub_domain:
+ description:
+ - Whether to use sub domain.
+ returned: always
+ type: bool
+ sample: true
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/clh0003"
+ location:
+ description:
+ - Valid Azure location. Defaults to location of the resource group.
+ returned: always
+ type: str
+ sample: eastus2
+ name:
+ description:
+ - Name of the storage account to update or create.
+ returned: always
+ type: str
+ sample: clh0003
+ primary_endpoints:
+ description:
+ - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the primary location.
+ returned: always
+ type: dict
+ sample: {
+ "blob": "https://clh0003.blob.core.windows.net/",
+ "queue": "https://clh0003.queue.core.windows.net/",
+ "table": "https://clh0003.table.core.windows.net/"
+ }
+ primary_location:
+ description:
+ - The location of the primary data center for the storage account.
+ returned: always
+ type: str
+ sample: eastus2
+ provisioning_state:
+ description:
+ - The status of the storage account.
+ - Possible values include C(Creating), C(ResolvingDNS), C(Succeeded).
+ returned: always
+ type: str
+ sample: Succeeded
+ resource_group:
+ description:
+ - The resource group's name.
+ returned: always
+ type: str
+ sample: Testing
+ secondary_endpoints:
+ description:
+ - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the secondary location.
+ returned: always
+ type: dict
+ sample: {
+ "blob": "https://clh0003-secondary.blob.core.windows.net/",
+ "queue": "https://clh0003-secondary.queue.core.windows.net/",
+ "table": "https://clh0003-secondary.table.core.windows.net/"
+ }
+ secondary_location:
+ description:
+ - The location of the geo-replicated secondary for the storage account.
+ returned: always
+ type: str
+ sample: centralus
+ status_of_primary:
+ description:
+ - The status of the primary location of the storage account; either C(available) or C(unavailable).
+ returned: always
+ type: str
+ sample: available
+ status_of_secondary:
+ description:
+ - The status of the secondary location of the storage account; either C(available) or C(unavailable).
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description:
+ - Resource tags.
+ returned: always
+ type: dict
+ sample: { 'tags1': 'value1' }
+ type:
+ description:
+ - The storage account type.
+ returned: always
+ type: str
+ sample: "Microsoft.Storage/storageAccounts"
+'''
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from azure.storage.cloudstorageaccount import CloudStorageAccount
+ from azure.common import AzureMissingResourceHttpError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+import copy
+from ansible.module_utils.azure_rm_common import AZURE_SUCCESS_STATE, AzureRMModuleBase
+from ansible.module_utils._text import to_native
+
+cors_rule_spec = dict(
+ allowed_origins=dict(type='list', elements='str', required=True),
+ allowed_methods=dict(type='list', elements='str', required=True),
+ max_age_in_seconds=dict(type='int', required=True),
+ exposed_headers=dict(type='list', elements='str', required=True),
+ allowed_headers=dict(type='list', elements='str', required=True),
+)
+
+
+def compare_cors(cors1, cors2):
+ if len(cors1) != len(cors2):
+ return False
+ copy2 = copy.copy(cors2)
+ for rule1 in cors1:
+ matched = False
+ for rule2 in copy2:
+ if (rule1['max_age_in_seconds'] == rule2['max_age_in_seconds']
+ and set(rule1['allowed_methods']) == set(rule2['allowed_methods'])
+ and set(rule1['allowed_origins']) == set(rule2['allowed_origins'])
+ and set(rule1['allowed_headers']) == set(rule2['allowed_headers'])
+ and set(rule1['exposed_headers']) == set(rule2['exposed_headers'])):
+ matched = True
+ copy2.remove(rule2)
+ if not matched:
+ return False
+ return True
+
+
+class AzureRMStorageAccount(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ account_type=dict(type='str',
+ choices=['Premium_LRS', 'Standard_GRS', 'Standard_LRS', 'StandardSSD_LRS', 'Standard_RAGRS', 'Standard_ZRS', 'Premium_ZRS'],
+ aliases=['type']),
+ custom_domain=dict(type='dict', aliases=['custom_dns_domain_suffix']),
+ location=dict(type='str'),
+ name=dict(type='str', required=True),
+ resource_group=dict(required=True, type='str', aliases=['resource_group_name']),
+ state=dict(default='present', choices=['present', 'absent']),
+ force_delete_nonempty=dict(type='bool', default=False, aliases=['force']),
+ tags=dict(type='dict'),
+ kind=dict(type='str', default='Storage', choices=['Storage', 'StorageV2', 'BlobStorage']),
+ access_tier=dict(type='str', choices=['Hot', 'Cool']),
+ https_only=dict(type='bool', default=False),
+ blob_cors=dict(type='list', options=cors_rule_spec, elements='dict')
+ )
+
+ self.results = dict(
+ changed=False,
+ state=dict()
+ )
+
+ self.account_dict = None
+ self.resource_group = None
+ self.name = None
+ self.state = None
+ self.location = None
+ self.account_type = None
+ self.custom_domain = None
+ self.tags = None
+ self.force_delete_nonempty = None
+ self.kind = None
+ self.access_tier = None
+ self.https_only = None
+ self.blob_cors = None
+
+ super(AzureRMStorageAccount, self).__init__(self.module_arg_spec,
+ supports_check_mode=True)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ setattr(self, key, kwargs[key])
+
+ resource_group = self.get_resource_group(self.resource_group)
+ if not self.location:
+ # Set default location
+ self.location = resource_group.location
+
+ if len(self.name) < 3 or len(self.name) > 24:
+ self.fail("Parameter error: name length must be between 3 and 24 characters.")
+
+ if self.custom_domain:
+ if self.custom_domain.get('name', None) is None:
+ self.fail("Parameter error: expecting custom_domain to have a name attribute of type string.")
+ if self.custom_domain.get('use_sub_domain', None) is None:
+ self.fail("Parameter error: expecting custom_domain to have a use_sub_domain "
+ "attribute of type boolean.")
+
+ self.account_dict = self.get_account()
+
+ if self.state == 'present' and self.account_dict and \
+ self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE:
+ self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state "
+ "to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE))
+
+ if self.account_dict is not None:
+ self.results['state'] = self.account_dict
+ else:
+ self.results['state'] = dict()
+
+ if self.state == 'present':
+ if not self.account_dict:
+ self.results['state'] = self.create_account()
+ else:
+ self.update_account()
+ elif self.state == 'absent' and self.account_dict:
+ self.delete_account()
+ self.results['state'] = dict(Status='Deleted')
+
+ return self.results
+
+ def check_name_availability(self):
+ self.log('Checking name availability for {0}'.format(self.name))
+ try:
+ response = self.storage_client.storage_accounts.check_name_availability(self.name)
+ except CloudError as e:
+ self.log('Error attempting to validate name.')
+ self.fail("Error checking name availability: {0}".format(str(e)))
+ if not response.name_available:
+ self.log('Error name not available.')
+ self.fail("{0} - {1}".format(response.message, response.reason))
+
+ def get_account(self):
+ self.log('Get properties for account {0}'.format(self.name))
+ account_obj = None
+ blob_service_props = None
+ account_dict = None
+
+ try:
+ account_obj = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name)
+ blob_service_props = self.storage_client.blob_services.get_service_properties(self.resource_group, self.name)
+ except CloudError:
+ pass
+
+ if account_obj:
+ account_dict = self.account_obj_to_dict(account_obj, blob_service_props)
+
+ return account_dict
+
+ def account_obj_to_dict(self, account_obj, blob_service_props=None):
+ account_dict = dict(
+ id=account_obj.id,
+ name=account_obj.name,
+ location=account_obj.location,
+ resource_group=self.resource_group,
+ type=account_obj.type,
+ access_tier=(account_obj.access_tier.value
+ if account_obj.access_tier is not None else None),
+ sku_tier=account_obj.sku.tier.value,
+ sku_name=account_obj.sku.name.value,
+ provisioning_state=account_obj.provisioning_state.value,
+ secondary_location=account_obj.secondary_location,
+ status_of_primary=(account_obj.status_of_primary.value
+ if account_obj.status_of_primary is not None else None),
+ status_of_secondary=(account_obj.status_of_secondary.value
+ if account_obj.status_of_secondary is not None else None),
+ primary_location=account_obj.primary_location,
+ https_only=account_obj.enable_https_traffic_only
+ )
+ account_dict['custom_domain'] = None
+ if account_obj.custom_domain:
+ account_dict['custom_domain'] = dict(
+ name=account_obj.custom_domain.name,
+ use_sub_domain=account_obj.custom_domain.use_sub_domain
+ )
+
+ account_dict['primary_endpoints'] = None
+ if account_obj.primary_endpoints:
+ account_dict['primary_endpoints'] = dict(
+ blob=account_obj.primary_endpoints.blob,
+ queue=account_obj.primary_endpoints.queue,
+ table=account_obj.primary_endpoints.table
+ )
+ account_dict['secondary_endpoints'] = None
+ if account_obj.secondary_endpoints:
+ account_dict['secondary_endpoints'] = dict(
+ blob=account_obj.secondary_endpoints.blob,
+ queue=account_obj.secondary_endpoints.queue,
+ table=account_obj.secondary_endpoints.table
+ )
+ account_dict['tags'] = None
+ if account_obj.tags:
+ account_dict['tags'] = account_obj.tags
+ if blob_service_props and blob_service_props.cors and blob_service_props.cors.cors_rules:
+ account_dict['blob_cors'] = [dict(
+ allowed_origins=[to_native(y) for y in x.allowed_origins],
+ allowed_methods=[to_native(y) for y in x.allowed_methods],
+ max_age_in_seconds=x.max_age_in_seconds,
+ exposed_headers=[to_native(y) for y in x.exposed_headers],
+ allowed_headers=[to_native(y) for y in x.allowed_headers]
+ ) for x in blob_service_props.cors.cors_rules]
+ return account_dict
+
+ def update_account(self):
+ self.log('Update storage account {0}'.format(self.name))
+ if bool(self.https_only) != bool(self.account_dict.get('https_only')):
+ self.results['changed'] = True
+ self.account_dict['https_only'] = self.https_only
+ if not self.check_mode:
+ try:
+ parameters = self.storage_models.StorageAccountUpdateParameters(enable_https_traffic_only=self.https_only)
+ self.storage_client.storage_accounts.update(self.resource_group,
+ self.name,
+ parameters)
+ except Exception as exc:
+ self.fail("Failed to update account type: {0}".format(str(exc)))
+
+ if self.account_type:
+ if self.account_type != self.account_dict['sku_name']:
+ # change the account type
+ SkuName = self.storage_models.SkuName
+ if self.account_dict['sku_name'] in [SkuName.premium_lrs, SkuName.standard_zrs]:
+ self.fail("Storage accounts of type {0} and {1} cannot be changed.".format(
+ SkuName.premium_lrs, SkuName.standard_zrs))
+ if self.account_type in [SkuName.premium_lrs, SkuName.standard_zrs]:
+ self.fail("Storage account of type {0} cannot be changed to a type of {1} or {2}.".format(
+ self.account_dict['sku_name'], SkuName.premium_lrs, SkuName.standard_zrs))
+
+ self.results['changed'] = True
+ self.account_dict['sku_name'] = self.account_type
+
+ if self.results['changed'] and not self.check_mode:
+ # Perform the update. The API only allows changing one attribute per call.
+ try:
+ self.log("sku_name: %s" % self.account_dict['sku_name'])
+ self.log("sku_tier: %s" % self.account_dict['sku_tier'])
+ sku = self.storage_models.Sku(name=SkuName(self.account_dict['sku_name']))
+ sku.tier = self.storage_models.SkuTier(self.account_dict['sku_tier'])
+ parameters = self.storage_models.StorageAccountUpdateParameters(sku=sku)
+ self.storage_client.storage_accounts.update(self.resource_group,
+ self.name,
+ parameters)
+ except Exception as exc:
+ self.fail("Failed to update account type: {0}".format(str(exc)))
+
+ if self.custom_domain:
+ if not self.account_dict['custom_domain'] or self.account_dict['custom_domain'] != self.custom_domain:
+ self.results['changed'] = True
+ self.account_dict['custom_domain'] = self.custom_domain
+
+ if self.results['changed'] and not self.check_mode:
+ new_domain = self.storage_models.CustomDomain(name=self.custom_domain['name'],
+ use_sub_domain=self.custom_domain['use_sub_domain'])
+ parameters = self.storage_models.StorageAccountUpdateParameters(custom_domain=new_domain)
+ try:
+ self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
+ except Exception as exc:
+ self.fail("Failed to update custom domain: {0}".format(str(exc)))
+
+ if self.access_tier:
+ if not self.account_dict['access_tier'] or self.account_dict['access_tier'] != self.access_tier:
+ self.results['changed'] = True
+ self.account_dict['access_tier'] = self.access_tier
+
+ if self.results['changed'] and not self.check_mode:
+ parameters = self.storage_models.StorageAccountUpdateParameters(access_tier=self.access_tier)
+ try:
+ self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
+ except Exception as exc:
+ self.fail("Failed to update access tier: {0}".format(str(exc)))
+
+ update_tags, self.account_dict['tags'] = self.update_tags(self.account_dict['tags'])
+ if update_tags:
+ self.results['changed'] = True
+ if not self.check_mode:
+ parameters = self.storage_models.StorageAccountUpdateParameters(tags=self.account_dict['tags'])
+ try:
+ self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
+ except Exception as exc:
+ self.fail("Failed to update tags: {0}".format(str(exc)))
+
+ if self.blob_cors and not compare_cors(self.account_dict.get('blob_cors', []), self.blob_cors):
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.set_blob_cors()
+
+ def create_account(self):
+ self.log("Creating account {0}".format(self.name))
+
+ if not self.location:
+ self.fail('Parameter error: location required when creating a storage account.')
+
+ if not self.account_type:
+ self.fail('Parameter error: account_type required when creating a storage account.')
+
+ if not self.access_tier and self.kind == 'BlobStorage':
+ self.fail('Parameter error: access_tier required when creating a storage account of type BlobStorage.')
+
+ self.check_name_availability()
+ self.results['changed'] = True
+
+ if self.check_mode:
+ account_dict = dict(
+ location=self.location,
+ account_type=self.account_type,
+ name=self.name,
+ resource_group=self.resource_group,
+ enable_https_traffic_only=self.https_only,
+ tags=dict()
+ )
+ if self.tags:
+ account_dict['tags'] = self.tags
+ if self.blob_cors:
+ account_dict['blob_cors'] = self.blob_cors
+ return account_dict
+ sku = self.storage_models.Sku(name=self.storage_models.SkuName(self.account_type))
+ sku.tier = self.storage_models.SkuTier.standard if 'Standard' in self.account_type else \
+ self.storage_models.SkuTier.premium
+ parameters = self.storage_models.StorageAccountCreateParameters(sku=sku,
+ kind=self.kind,
+ location=self.location,
+ tags=self.tags,
+ access_tier=self.access_tier)
+ self.log(str(parameters))
+ try:
+ poller = self.storage_client.storage_accounts.create(self.resource_group, self.name, parameters)
+ self.get_poller_result(poller)
+ except CloudError as e:
+ self.log('Error creating storage account.')
+ self.fail("Failed to create account: {0}".format(str(e)))
+ if self.blob_cors:
+ self.set_blob_cors()
+ # the poller doesn't actually return anything
+ return self.get_account()
+
+ def delete_account(self):
+ if self.account_dict['provisioning_state'] == self.storage_models.ProvisioningState.succeeded.value and \
+ not self.force_delete_nonempty and self.account_has_blob_containers():
+ self.fail("Account contains blob containers. Is it in use? Use the force_delete_nonempty option to attempt deletion.")
+
+ self.log('Delete storage account {0}'.format(self.name))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ status = self.storage_client.storage_accounts.delete(self.resource_group, self.name)
+ self.log("delete status: ")
+ self.log(str(status))
+ except CloudError as e:
+ self.fail("Failed to delete the account: {0}".format(str(e)))
+ return True
+
+ def account_has_blob_containers(self):
+ '''
+ If there are blob containers, then there are likely VMs depending on this account and it should
+ not be deleted.
+ '''
+ self.log('Checking for existing blob containers')
+ blob_service = self.get_blob_client(self.resource_group, self.name)
+ try:
+ response = blob_service.list_containers()
+ except AzureMissingResourceHttpError:
+ # No blob storage available?
+ return False
+
+ if len(response.items) > 0:
+ return True
+ return False
+
+ def set_blob_cors(self):
+ try:
+ cors_rules = self.storage_models.CorsRules(cors_rules=[self.storage_models.CorsRule(**x) for x in self.blob_cors])
+ self.storage_client.blob_services.set_service_properties(self.resource_group,
+ self.name,
+ self.storage_models.BlobServiceProperties(cors=cors_rules))
+ except Exception as exc:
+ self.fail("Failed to set CORS rules: {0}".format(str(exc)))
+
+
+def main():
+ AzureRMStorageAccount()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_webapp.py b/test/support/integration/plugins/modules/azure_rm_webapp.py
new file mode 100644
index 0000000000..4f185f4580
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_webapp.py
@@ -0,0 +1,1070 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_webapp
+version_added: "2.7"
+short_description: Manage Web App instances
+description:
+ - Create, update and delete instance of Web App.
+
+options:
+ resource_group:
+ description:
+ - Name of the resource group to which the resource belongs.
+ required: True
+ name:
+ description:
+ - Unique name of the app to create or update. To create or update a deployment slot, use the {slot} parameter.
+ required: True
+
+ location:
+ description:
+ - Resource location. If not set, location from the resource group will be used as default.
+
+ plan:
+ description:
+ - App service plan. Required for creation.
+ - Can be name of existing app service plan in same resource group as web app.
+ - Can be the resource ID of an existing app service plan. For example
+ /subscriptions/<subs_id>/resourceGroups/<resource_group>/providers/Microsoft.Web/serverFarms/<plan_name>.
+ - Can be a dict containing five parameters, defined below.
+ - C(name), name of app service plan.
+ - C(resource_group), resource group of the app service plan.
+ - C(sku), SKU of app service plan, allowed values listed on U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/).
+ - C(is_linux), whether or not the app service plan is Linux. defaults to C(False).
+ - C(number_of_workers), number of workers for app service plan.
+
+ frameworks:
+ description:
+ - Set of run time framework settings. Each setting is a dictionary.
+ - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
+ suboptions:
+ name:
+ description:
+ - Name of the framework.
+ - Supported framework list for Windows web app and Linux web app is different.
+ - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018.
+ - Windows web apps support multiple framework at the same time.
+ - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018.
+ - Linux web apps support only one framework.
+ - Java framework is mutually exclusive with others.
+ choices:
+ - java
+ - net_framework
+ - php
+ - python
+ - ruby
+ - dotnetcore
+ - node
+ version:
+ description:
+ - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info.
+ - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5.
+ - C(php) supported value sample, C(5.5), C(5.6), C(7.0).
+ - C(python) supported value sample, C(5.5), C(5.6), C(7.0).
+ - C(node) supported value sample, C(6.6), C(6.9).
+ - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2).
+ - C(ruby) supported value sample, C(2.3).
+ - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app.
+ settings:
+ description:
+ - List of settings of the framework.
+ suboptions:
+ java_container:
+ description:
+ - Name of Java container.
+ - Supported only when I(frameworks=java). Sample values C(Tomcat), C(Jetty).
+ java_container_version:
+ description:
+ - Version of Java container.
+ - Supported only when I(frameworks=java).
+ - Sample values for C(Tomcat), C(8.0), C(8.5), C(9.0). For C(Jetty,), C(9.1), C(9.3).
+
+ container_settings:
+ description:
+ - Web app container settings.
+ suboptions:
+ name:
+ description:
+ - Name of container, for example C(imagename:tag).
+ registry_server_url:
+ description:
+ - Container registry server URL, for example C(mydockerregistry.io).
+ registry_server_user:
+ description:
+ - The container registry server user name.
+ registry_server_password:
+ description:
+ - The container registry server password.
+
+ scm_type:
+ description:
+ - Repository type of deployment source, for example C(LocalGit), C(GitHub).
+ - List of supported values maintained at U(https://docs.microsoft.com/en-us/rest/api/appservice/webapps/createorupdate#scmtype).
+
+ deployment_source:
+ description:
+ - Deployment source for git.
+ suboptions:
+ url:
+ description:
+ - Repository url of deployment source.
+
+ branch:
+ description:
+ - The branch name of the repository.
+ startup_file:
+ description:
+ - The web's startup file.
+ - Used only for Linux web apps.
+
+ client_affinity_enabled:
+ description:
+ - Whether or not to send session affinity cookies, which route client requests in the same session to the same instance.
+ type: bool
+ default: True
+
+ https_only:
+ description:
+ - Configures web site to accept only https requests.
+ type: bool
+
+ dns_registration:
+ description:
+ - Whether or not the web app hostname is registered with DNS on creation. Set to C(false) to register.
+ type: bool
+
+ skip_custom_domain_verification:
+ description:
+ - Whether or not to skip verification of custom (non *.azurewebsites.net) domains associated with web app. Set to C(true) to skip.
+ type: bool
+
+ ttl_in_seconds:
+ description:
+ - Time to live in seconds for web app default domain name.
+
+ app_settings:
+ description:
+ - Configure web app application settings. Suboptions are in key value pair format.
+
+ purge_app_settings:
+ description:
+ - Purge any existing application settings. Replace web app application settings with app_settings.
+ type: bool
+
+ app_state:
+ description:
+ - Start/Stop/Restart the web app.
+ type: str
+ choices:
+ - started
+ - stopped
+ - restarted
+ default: started
+
+ state:
+ description:
+ - State of the Web App.
+ - Use C(present) to create or update a Web App and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Yunge Zhu (@yungezz)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a windows web app with non-exist app service plan
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myWinWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ is_linux: false
+ sku: S1
+
+ - name: Create a docker web app with some app settings, with docker image
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myDockerWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ is_linux: true
+ sku: S1
+ number_of_workers: 2
+ app_settings:
+ testkey: testvalue
+ testkey2: testvalue2
+ container_settings:
+ name: ansible/ansible:ubuntu1404
+
+ - name: Create a docker web app with private acr registry
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myDockerWebapp
+ plan: myAppServicePlan
+ app_settings:
+ testkey: testvalue
+ container_settings:
+ name: ansible/ubuntu1404
+ registry_server_url: myregistry.io
+ registry_server_user: user
+ registry_server_password: pass
+
+ - name: Create a linux web app with Node 6.6 framework
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myLinuxWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ app_settings:
+ testkey: testvalue
+ frameworks:
+ - name: "node"
+ version: "6.6"
+
+ - name: Create a windows web app with node, php
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myWinWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ app_settings:
+ testkey: testvalue
+ frameworks:
+ - name: "node"
+ version: 6.6
+ - name: "php"
+ version: "7.0"
+
+ - name: Create a stage deployment slot for an existing web app
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myWebapp/slots/stage
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ app_settings:
+ testkey:testvalue
+
+ - name: Create a linux web app with java framework
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myLinuxWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ app_settings:
+ testkey: testvalue
+ frameworks:
+ - name: "java"
+ version: "8"
+ settings:
+ java_container: "Tomcat"
+ java_container_version: "8.5"
+'''
+
+RETURN = '''
+azure_webapp:
+ description:
+ - ID of current web app.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp"
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrest.serialization import Model
+ from azure.mgmt.web.models import (
+ site_config, app_service_plan, Site,
+ AppServicePlan, SkuDescription, NameValuePair
+ )
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+container_settings_spec = dict(
+ name=dict(type='str', required=True),
+ registry_server_url=dict(type='str'),
+ registry_server_user=dict(type='str'),
+ registry_server_password=dict(type='str', no_log=True)
+)
+
+deployment_source_spec = dict(
+ url=dict(type='str'),
+ branch=dict(type='str')
+)
+
+
+framework_settings_spec = dict(
+ java_container=dict(type='str', required=True),
+ java_container_version=dict(type='str', required=True)
+)
+
+
+framework_spec = dict(
+ name=dict(
+ type='str',
+ required=True,
+ choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']),
+ version=dict(type='str', required=True),
+ settings=dict(type='dict', options=framework_settings_spec)
+)
+
+
+def _normalize_sku(sku):
+ if sku is None:
+ return sku
+
+ sku = sku.upper()
+ if sku == 'FREE':
+ return 'F1'
+ elif sku == 'SHARED':
+ return 'D1'
+ return sku
+
+
+def get_sku_name(tier):
+ tier = tier.upper()
+ if tier == 'F1' or tier == "FREE":
+ return 'FREE'
+ elif tier == 'D1' or tier == "SHARED":
+ return 'SHARED'
+ elif tier in ['B1', 'B2', 'B3', 'BASIC']:
+ return 'BASIC'
+ elif tier in ['S1', 'S2', 'S3']:
+ return 'STANDARD'
+ elif tier in ['P1', 'P2', 'P3']:
+ return 'PREMIUM'
+ elif tier in ['P1V2', 'P2V2', 'P3V2']:
+ return 'PREMIUMV2'
+ else:
+ return None
+
+
+def appserviceplan_to_dict(plan):
+ return dict(
+ id=plan.id,
+ name=plan.name,
+ kind=plan.kind,
+ location=plan.location,
+ reserved=plan.reserved,
+ is_linux=plan.reserved,
+ provisioning_state=plan.provisioning_state,
+ tags=plan.tags if plan.tags else None
+ )
+
+
+def webapp_to_dict(webapp):
+ return dict(
+ id=webapp.id,
+ name=webapp.name,
+ location=webapp.location,
+ client_cert_enabled=webapp.client_cert_enabled,
+ enabled=webapp.enabled,
+ reserved=webapp.reserved,
+ client_affinity_enabled=webapp.client_affinity_enabled,
+ server_farm_id=webapp.server_farm_id,
+ host_names_disabled=webapp.host_names_disabled,
+ https_only=webapp.https_only if hasattr(webapp, 'https_only') else None,
+ skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None,
+ ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None,
+ state=webapp.state,
+ tags=webapp.tags if webapp.tags else None
+ )
+
+
+class Actions:
+ CreateOrUpdate, UpdateAppSettings, Delete = range(3)
+
+
+class AzureRMWebApps(AzureRMModuleBase):
+ """Configuration class for an Azure RM Web App resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ location=dict(
+ type='str'
+ ),
+ plan=dict(
+ type='raw'
+ ),
+ frameworks=dict(
+ type='list',
+ elements='dict',
+ options=framework_spec
+ ),
+ container_settings=dict(
+ type='dict',
+ options=container_settings_spec
+ ),
+ scm_type=dict(
+ type='str',
+ ),
+ deployment_source=dict(
+ type='dict',
+ options=deployment_source_spec
+ ),
+ startup_file=dict(
+ type='str'
+ ),
+ client_affinity_enabled=dict(
+ type='bool',
+ default=True
+ ),
+ dns_registration=dict(
+ type='bool'
+ ),
+ https_only=dict(
+ type='bool'
+ ),
+ skip_custom_domain_verification=dict(
+ type='bool'
+ ),
+ ttl_in_seconds=dict(
+ type='int'
+ ),
+ app_settings=dict(
+ type='dict'
+ ),
+ purge_app_settings=dict(
+ type='bool',
+ default=False
+ ),
+ app_state=dict(
+ type='str',
+ choices=['started', 'stopped', 'restarted'],
+ default='started'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ mutually_exclusive = [['container_settings', 'frameworks']]
+
+ self.resource_group = None
+ self.name = None
+ self.location = None
+
+ # update in create_or_update as parameters
+ self.client_affinity_enabled = True
+ self.dns_registration = None
+ self.skip_custom_domain_verification = None
+ self.ttl_in_seconds = None
+ self.https_only = None
+
+ self.tags = None
+
+ # site config, e.g app settings, ssl
+ self.site_config = dict()
+ self.app_settings = dict()
+ self.app_settings_strDic = None
+
+ # app service plan
+ self.plan = None
+
+ # siteSourceControl
+ self.deployment_source = dict()
+
+ # site, used at level creation, or update. e.g windows/linux, client_affinity etc first level args
+ self.site = None
+
+ # property for internal usage, not used for sdk
+ self.container_settings = None
+
+ self.purge_app_settings = False
+ self.app_state = 'started'
+
+ self.results = dict(
+ changed=False,
+ id=None,
+ )
+ self.state = None
+ self.to_do = []
+
+ self.frameworks = None
+
+ # set site_config value from kwargs
+ self.site_config_updatable_properties = ["net_framework_version",
+ "java_version",
+ "php_version",
+ "python_version",
+ "scm_type"]
+
+ # updatable_properties
+ self.updatable_properties = ["client_affinity_enabled",
+ "force_dns_registration",
+ "https_only",
+ "skip_custom_domain_verification",
+ "ttl_in_seconds"]
+
+ self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java']
+ self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java']
+
+ super(AzureRMWebApps, self).__init__(derived_arg_spec=self.module_arg_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ supports_tags=True)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs[key] is not None:
+ if key == "scm_type":
+ self.site_config[key] = kwargs[key]
+
+ old_response = None
+ response = None
+ to_be_updated = False
+
+ # set location
+ resource_group = self.get_resource_group(self.resource_group)
+ if not self.location:
+ self.location = resource_group.location
+
+ # get existing web app
+ old_response = self.get_webapp()
+
+ if old_response:
+ self.results['id'] = old_response['id']
+
+ if self.state == 'present':
+ if not self.plan and not old_response:
+ self.fail("Please specify plan for newly created web app.")
+
+ if not self.plan:
+ self.plan = old_response['server_farm_id']
+
+ self.plan = self.parse_resource_to_dict(self.plan)
+
+ # get app service plan
+ is_linux = False
+ old_plan = self.get_app_service_plan()
+ if old_plan:
+ is_linux = old_plan['reserved']
+ else:
+ is_linux = self.plan['is_linux'] if 'is_linux' in self.plan else False
+
+ if self.frameworks:
+ # java is mutually exclusive with other frameworks
+ if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks):
+ self.fail('Java is mutually exclusive with other frameworks.')
+
+ if is_linux:
+ if len(self.frameworks) != 1:
+ self.fail('Can specify one framework only for Linux web app.')
+
+ if self.frameworks[0]['name'] not in self.supported_linux_frameworks:
+ self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name']))
+
+ self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper()
+
+ if self.frameworks[0]['name'] == 'java':
+ if self.frameworks[0]['version'] != '8':
+ self.fail("Linux web app only supports java 8.")
+ if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() != 'tomcat':
+ self.fail("Linux web app only supports tomcat container.")
+
+ if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() == 'tomcat':
+ self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8'
+ else:
+ self.site_config['linux_fx_version'] = 'JAVA|8-jre8'
+ else:
+ for fx in self.frameworks:
+ if fx.get('name') not in self.supported_windows_frameworks:
+ self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name')))
+ else:
+ self.site_config[fx.get('name') + '_version'] = fx.get('version')
+
+ if 'settings' in fx and fx['settings'] is not None:
+ for key, value in fx['settings'].items():
+ self.site_config[key] = value
+
+ if not self.app_settings:
+ self.app_settings = dict()
+
+ if self.container_settings:
+ linux_fx_version = 'DOCKER|'
+
+ if self.container_settings.get('registry_server_url'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
+
+ linux_fx_version += self.container_settings['registry_server_url'] + '/'
+
+ linux_fx_version += self.container_settings['name']
+
+ self.site_config['linux_fx_version'] = linux_fx_version
+
+ if self.container_settings.get('registry_server_user'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user']
+
+ if self.container_settings.get('registry_server_password'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password']
+
+ # init site
+ self.site = Site(location=self.location, site_config=self.site_config)
+
+ if self.https_only is not None:
+ self.site.https_only = self.https_only
+
+ if self.client_affinity_enabled:
+ self.site.client_affinity_enabled = self.client_affinity_enabled
+
+ # check if the web app already present in the resource group
+ if not old_response:
+ self.log("Web App instance doesn't exist")
+
+ to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
+ self.site.tags = self.tags
+
+ # service plan is required for creation
+ if not self.plan:
+ self.fail("Please specify app service plan in plan parameter.")
+
+ if not old_plan:
+ # no existing service plan, create one
+ if (not self.plan.get('name') or not self.plan.get('sku')):
+ self.fail('Please specify name, is_linux, sku in plan')
+
+ if 'location' not in self.plan:
+ plan_resource_group = self.get_resource_group(self.plan['resource_group'])
+ self.plan['location'] = plan_resource_group.location
+
+ old_plan = self.create_app_service_plan()
+
+ self.site.server_farm_id = old_plan['id']
+
+ # if linux, setup startup_file
+ if old_plan['is_linux']:
+ if hasattr(self, 'startup_file'):
+ self.site_config['app_command_line'] = self.startup_file
+
+ # set app setting
+ if self.app_settings:
+ app_settings = []
+ for key in self.app_settings.keys():
+ app_settings.append(NameValuePair(name=key, value=self.app_settings[key]))
+
+ self.site_config['app_settings'] = app_settings
+ else:
+ # existing web app, do update
+ self.log("Web App instance already exists")
+
+ self.log('Result: {0}'.format(old_response))
+
+ update_tags, self.site.tags = self.update_tags(old_response.get('tags', None))
+
+ if update_tags:
+ to_be_updated = True
+
+ # check if root level property changed
+ if self.is_updatable_property_changed(old_response):
+ to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
+
+ # check if site_config changed
+ old_config = self.get_webapp_configuration()
+
+ if self.is_site_config_changed(old_config):
+ to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
+
+ # check if linux_fx_version changed
+ if old_config.linux_fx_version != self.site_config.get('linux_fx_version', ''):
+ to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
+
+ self.app_settings_strDic = self.list_app_settings()
+
+ # purge existing app_settings:
+ if self.purge_app_settings:
+ to_be_updated = True
+ self.app_settings_strDic = dict()
+ self.to_do.append(Actions.UpdateAppSettings)
+
+ # check if app settings changed
+ if self.purge_app_settings or self.is_app_settings_changed():
+ to_be_updated = True
+ self.to_do.append(Actions.UpdateAppSettings)
+
+ if self.app_settings:
+ for key in self.app_settings.keys():
+ self.app_settings_strDic[key] = self.app_settings[key]
+
+ elif self.state == 'absent':
+ if old_response:
+ self.log("Delete Web App instance")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_webapp()
+
+ self.log('Web App instance deleted')
+
+ else:
+ self.fail("Web app {0} not exists.".format(self.name))
+
+ if to_be_updated:
+ self.log('Need to Create/Update web app')
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ if Actions.CreateOrUpdate in self.to_do:
+ response = self.create_update_webapp()
+
+ self.results['id'] = response['id']
+
+ if Actions.UpdateAppSettings in self.to_do:
+ update_response = self.update_app_settings()
+ self.results['id'] = update_response.id
+
+ webapp = None
+ if old_response:
+ webapp = old_response
+ if response:
+ webapp = response
+
+ if webapp:
+ if (webapp['state'] != 'Stopped' and self.app_state == 'stopped') or \
+ (webapp['state'] != 'Running' and self.app_state == 'started') or \
+ self.app_state == 'restarted':
+
+ self.results['changed'] = True
+ if self.check_mode:
+ return self.results
+
+ self.set_webapp_state(self.app_state)
+
+ return self.results
+
+ # compare existing web app with input, determine weather it's update operation
+ def is_updatable_property_changed(self, existing_webapp):
+ for property_name in self.updatable_properties:
+ if hasattr(self, property_name) and getattr(self, property_name) is not None and \
+ getattr(self, property_name) != existing_webapp.get(property_name, None):
+ return True
+
+ return False
+
+ # compare xxx_version
+ def is_site_config_changed(self, existing_config):
+ for fx_version in self.site_config_updatable_properties:
+ if self.site_config.get(fx_version):
+ if not getattr(existing_config, fx_version) or \
+ getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper():
+ return True
+
+ return False
+
+ # comparing existing app setting with input, determine whether it's changed
+ def is_app_settings_changed(self):
+ if self.app_settings:
+ if self.app_settings_strDic:
+ for key in self.app_settings.keys():
+ if self.app_settings[key] != self.app_settings_strDic.get(key, None):
+ return True
+ else:
+ return True
+ return False
+
+ # comparing deployment source with input, determine wheather it's changed
+ def is_deployment_source_changed(self, existing_webapp):
+ if self.deployment_source:
+ if self.deployment_source.get('url') \
+ and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']:
+ return True
+
+ if self.deployment_source.get('branch') \
+ and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']:
+ return True
+
+ return False
+
+ def create_update_webapp(self):
+ '''
+ Creates or updates Web App with the specified configuration.
+
+ :return: deserialized Web App instance state dictionary
+ '''
+ self.log(
+ "Creating / Updating the Web App instance {0}".format(self.name))
+
+ try:
+ skip_dns_registration = self.dns_registration
+ force_dns_registration = None if self.dns_registration is None else not self.dns_registration
+
+ response = self.web_client.web_apps.create_or_update(resource_group_name=self.resource_group,
+ name=self.name,
+ site_envelope=self.site,
+ skip_dns_registration=skip_dns_registration,
+ skip_custom_domain_verification=self.skip_custom_domain_verification,
+ force_dns_registration=force_dns_registration,
+ ttl_in_seconds=self.ttl_in_seconds)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the Web App instance.')
+ self.fail(
+ "Error creating the Web App instance: {0}".format(str(exc)))
+ return webapp_to_dict(response)
+
+ def delete_webapp(self):
+ '''
+ Deletes specified Web App instance in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the Web App instance {0}".format(self.name))
+ try:
+ response = self.web_client.web_apps.delete(resource_group_name=self.resource_group,
+ name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the Web App instance.')
+ self.fail(
+ "Error deleting the Web App instance: {0}".format(str(e)))
+
+ return True
+
+ def get_webapp(self):
+ '''
+ Gets the properties of the specified Web App.
+
+ :return: deserialized Web App instance state dictionary
+ '''
+ self.log(
+ "Checking if the Web App instance {0} is present".format(self.name))
+
+ response = None
+
+ try:
+ response = self.web_client.web_apps.get(resource_group_name=self.resource_group,
+ name=self.name)
+
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ if response is not None:
+ self.log("Response : {0}".format(response))
+ self.log("Web App instance : {0} found".format(response.name))
+ return webapp_to_dict(response)
+
+ except CloudError as ex:
+ pass
+
+ self.log("Didn't find web app {0} in resource group {1}".format(
+ self.name, self.resource_group))
+
+ return False
+
+ def get_app_service_plan(self):
+ '''
+ Gets app service plan
+ :return: deserialized app service plan dictionary
+ '''
+ self.log("Get App Service Plan {0}".format(self.plan['name']))
+
+ try:
+ response = self.web_client.app_service_plans.get(
+ resource_group_name=self.plan['resource_group'],
+ name=self.plan['name'])
+
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ if response is not None:
+ self.log("Response : {0}".format(response))
+ self.log("App Service Plan : {0} found".format(response.name))
+
+ return appserviceplan_to_dict(response)
+ except CloudError as ex:
+ pass
+
+ self.log("Didn't find app service plan {0} in resource group {1}".format(
+ self.plan['name'], self.plan['resource_group']))
+
+ return False
+
+ def create_app_service_plan(self):
+ '''
+ Creates app service plan
+ :return: deserialized app service plan dictionary
+ '''
+ self.log("Create App Service Plan {0}".format(self.plan['name']))
+
+ try:
+ # normalize sku
+ sku = _normalize_sku(self.plan['sku'])
+
+ sku_def = SkuDescription(tier=get_sku_name(
+ sku), name=sku, capacity=(self.plan.get('number_of_workers', None)))
+ plan_def = AppServicePlan(
+ location=self.plan['location'], app_service_plan_name=self.plan['name'], sku=sku_def, reserved=(self.plan.get('is_linux', None)))
+
+ poller = self.web_client.app_service_plans.create_or_update(
+ self.plan['resource_group'], self.plan['name'], plan_def)
+
+ if isinstance(poller, LROPoller):
+ response = self.get_poller_result(poller)
+
+ self.log("Response : {0}".format(response))
+
+ return appserviceplan_to_dict(response)
+ except CloudError as ex:
+ self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(
+ self.plan['name'], self.plan['resource_group'], str(ex)))
+
+ def list_app_settings(self):
+ '''
+ List application settings
+ :return: deserialized list response
+ '''
+ self.log("List application setting")
+
+ try:
+
+ response = self.web_client.web_apps.list_application_settings(
+ resource_group_name=self.resource_group, name=self.name)
+ self.log("Response : {0}".format(response))
+
+ return response.properties
+ except CloudError as ex:
+ self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def update_app_settings(self):
+ '''
+ Update application settings
+ :return: deserialized updating response
+ '''
+ self.log("Update application setting")
+
+ try:
+ response = self.web_client.web_apps.update_application_settings(
+ resource_group_name=self.resource_group, name=self.name, properties=self.app_settings_strDic)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to update application settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def create_or_update_source_control(self):
+ '''
+ Update site source control
+ :return: deserialized updating response
+ '''
+ self.log("Update site source control")
+
+ if self.deployment_source is None:
+ return False
+
+ self.deployment_source['is_manual_integration'] = False
+ self.deployment_source['is_mercurial'] = False
+
+ try:
+ response = self.web_client.web_client.create_or_update_source_control(
+ self.resource_group, self.name, self.deployment_source)
+ self.log("Response : {0}".format(response))
+
+ return response.as_dict()
+ except CloudError as ex:
+ self.fail("Failed to update site source control for web app {0} in resource group {1}".format(
+ self.name, self.resource_group))
+
+ def get_webapp_configuration(self):
+ '''
+ Get web app configuration
+ :return: deserialized web app configuration response
+ '''
+ self.log("Get web app configuration")
+
+ try:
+
+ response = self.web_client.web_apps.get_configuration(
+ resource_group_name=self.resource_group, name=self.name)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.log("Failed to get configuration for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ return False
+
+ def set_webapp_state(self, appstate):
+ '''
+ Start/stop/restart web app
+ :return: deserialized updating response
+ '''
+ try:
+ if appstate == 'started':
+ response = self.web_client.web_apps.start(resource_group_name=self.resource_group, name=self.name)
+ elif appstate == 'stopped':
+ response = self.web_client.web_apps.stop(resource_group_name=self.resource_group, name=self.name)
+ elif appstate == 'restarted':
+ response = self.web_client.web_apps.restart(resource_group_name=self.resource_group, name=self.name)
+ else:
+ self.fail("Invalid web app state {0}".format(appstate))
+
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.log("Failed to {0} web app {1} in resource group {2}, request_id {3} - {4}".format(
+ appstate, self.name, self.resource_group, request_id, str(ex)))
+
+
+def main():
+ """Main execution"""
+ AzureRMWebApps()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_webapp_info.py b/test/support/integration/plugins/modules/azure_rm_webapp_info.py
new file mode 100644
index 0000000000..4a3b4cd484
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_webapp_info.py
@@ -0,0 +1,488 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_webapp_info
+
+version_added: "2.9"
+
+short_description: Get Azure web app facts
+
+description:
+ - Get facts for a specific web app or all web app in a resource group, or all web app in current subscription.
+
+options:
+ name:
+ description:
+ - Only show results for a specific web app.
+ resource_group:
+ description:
+ - Limit results by resource group.
+ return_publish_profile:
+ description:
+ - Indicate whether to return publishing profile of the web app.
+ default: False
+ type: bool
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Yunge Zhu (@yungezz)
+'''
+
+EXAMPLES = '''
+ - name: Get facts for web app by name
+ azure_rm_webapp_info:
+ resource_group: myResourceGroup
+ name: winwebapp1
+
+ - name: Get facts for web apps in resource group
+ azure_rm_webapp_info:
+ resource_group: myResourceGroup
+
+ - name: Get facts for web apps with tags
+ azure_rm_webapp_info:
+ tags:
+ - testtag
+ - foo:bar
+'''
+
+RETURN = '''
+webapps:
+ description:
+ - List of web apps.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - ID of the web app.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp
+ name:
+ description:
+ - Name of the web app.
+ returned: always
+ type: str
+ sample: winwebapp1
+ resource_group:
+ description:
+ - Resource group of the web app.
+ returned: always
+ type: str
+ sample: myResourceGroup
+ location:
+ description:
+ - Location of the web app.
+ returned: always
+ type: str
+ sample: eastus
+ plan:
+ description:
+ - ID of app service plan used by the web app.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppServicePlan
+ app_settings:
+ description:
+ - App settings of the application. Only returned when web app has app settings.
+ returned: always
+ type: dict
+ sample: {
+ "testkey": "testvalue",
+ "testkey2": "testvalue2"
+ }
+ frameworks:
+ description:
+ - Frameworks of the application. Only returned when web app has frameworks.
+ returned: always
+ type: list
+ sample: [
+ {
+ "name": "net_framework",
+ "version": "v4.0"
+ },
+ {
+ "name": "java",
+ "settings": {
+ "java_container": "tomcat",
+ "java_container_version": "8.5"
+ },
+ "version": "1.7"
+ },
+ {
+ "name": "php",
+ "version": "5.6"
+ }
+ ]
+ availability_state:
+ description:
+ - Availability of this web app.
+ returned: always
+ type: str
+ sample: Normal
+ default_host_name:
+ description:
+ - Host name of the web app.
+ returned: always
+ type: str
+ sample: vxxisurg397winapp4.azurewebsites.net
+ enabled:
+ description:
+ - Indicates the web app enabled or not.
+ returned: always
+ type: bool
+ sample: true
+ enabled_host_names:
+ description:
+ - Enabled host names of the web app.
+ returned: always
+ type: list
+ sample: [
+ "vxxisurg397winapp4.azurewebsites.net",
+ "vxxisurg397winapp4.scm.azurewebsites.net"
+ ]
+ host_name_ssl_states:
+ description:
+ - SSL state per host names of the web app.
+ returned: always
+ type: list
+ sample: [
+ {
+ "hostType": "Standard",
+ "name": "vxxisurg397winapp4.azurewebsites.net",
+ "sslState": "Disabled"
+ },
+ {
+ "hostType": "Repository",
+ "name": "vxxisurg397winapp4.scm.azurewebsites.net",
+ "sslState": "Disabled"
+ }
+ ]
+ host_names:
+ description:
+ - Host names of the web app.
+ returned: always
+ type: list
+ sample: [
+ "vxxisurg397winapp4.azurewebsites.net"
+ ]
+ outbound_ip_addresses:
+ description:
+ - Outbound IP address of the web app.
+ returned: always
+ type: str
+ sample: "40.71.11.131,40.85.166.200,168.62.166.67,137.135.126.248,137.135.121.45"
+ ftp_publish_url:
+ description:
+ - Publishing URL of the web app when deployment type is FTP.
+ returned: always
+ type: str
+ sample: ftp://xxxx.ftp.azurewebsites.windows.net
+ state:
+ description:
+ - State of the web app.
+ returned: always
+ type: str
+ sample: running
+ publishing_username:
+ description:
+ - Publishing profile user name.
+ returned: only when I(return_publish_profile=True).
+ type: str
+ sample: "$vxxisuRG397winapp4"
+ publishing_password:
+ description:
+ - Publishing profile password.
+ returned: only when I(return_publish_profile=True).
+ type: str
+ sample: "uvANsPQpGjWJmrFfm4Ssd5rpBSqGhjMk11pMSgW2vCsQtNx9tcgZ0xN26s9A"
+ tags:
+ description:
+ - Tags assigned to the resource. Dictionary of string:string pairs.
+ returned: always
+ type: dict
+ sample: { tag1: abc }
+'''
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from azure.common import AzureMissingResourceHttpError, AzureHttpError
+except Exception:
+ # This is handled in azure_rm_common
+ pass
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+AZURE_OBJECT_CLASS = 'WebApp'
+
+
+class AzureRMWebAppInfo(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ name=dict(type='str'),
+ resource_group=dict(type='str'),
+ tags=dict(type='list'),
+ return_publish_profile=dict(type='bool', default=False),
+ )
+
+ self.results = dict(
+ changed=False,
+ webapps=[],
+ )
+
+ self.name = None
+ self.resource_group = None
+ self.tags = None
+ self.return_publish_profile = False
+
+ self.framework_names = ['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']
+
+ super(AzureRMWebAppInfo, self).__init__(self.module_arg_spec,
+ supports_tags=False,
+ facts_module=True)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_webapp_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_webapp_facts' module has been renamed to 'azure_rm_webapp_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if self.name:
+ self.results['webapps'] = self.list_by_name()
+ elif self.resource_group:
+ self.results['webapps'] = self.list_by_resource_group()
+ else:
+ self.results['webapps'] = self.list_all()
+
+ return self.results
+
+ def list_by_name(self):
+ self.log('Get web app {0}'.format(self.name))
+ item = None
+ result = []
+
+ try:
+ item = self.web_client.web_apps.get(self.resource_group, self.name)
+ except CloudError:
+ pass
+
+ if item and self.has_tags(item.tags, self.tags):
+ curated_result = self.get_curated_webapp(self.resource_group, self.name, item)
+ result = [curated_result]
+
+ return result
+
+ def list_by_resource_group(self):
+ self.log('List web apps in resource groups {0}'.format(self.resource_group))
+ try:
+ response = list(self.web_client.web_apps.list_by_resource_group(self.resource_group))
+ except CloudError as exc:
+ request_id = exc.request_id if exc.request_id else ''
+ self.fail("Error listing web apps in resource groups {0}, request id: {1} - {2}".format(self.resource_group, request_id, str(exc)))
+
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ curated_output = self.get_curated_webapp(self.resource_group, item.name, item)
+ results.append(curated_output)
+ return results
+
+ def list_all(self):
+ self.log('List web apps in current subscription')
+ try:
+ response = list(self.web_client.web_apps.list())
+ except CloudError as exc:
+ request_id = exc.request_id if exc.request_id else ''
+ self.fail("Error listing web apps, request id {0} - {1}".format(request_id, str(exc)))
+
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ curated_output = self.get_curated_webapp(item.resource_group, item.name, item)
+ results.append(curated_output)
+ return results
+
+ def list_webapp_configuration(self, resource_group, name):
+ self.log('Get web app {0} configuration'.format(name))
+
+ response = []
+
+ try:
+ response = self.web_client.web_apps.get_configuration(resource_group_name=resource_group, name=name)
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.fail('Error getting web app {0} configuration, request id {1} - {2}'.format(name, request_id, str(ex)))
+
+ return response.as_dict()
+
+ def list_webapp_appsettings(self, resource_group, name):
+ self.log('Get web app {0} app settings'.format(name))
+
+ response = []
+
+ try:
+ response = self.web_client.web_apps.list_application_settings(resource_group_name=resource_group, name=name)
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.fail('Error getting web app {0} app settings, request id {1} - {2}'.format(name, request_id, str(ex)))
+
+ return response.as_dict()
+
+ def get_publish_credentials(self, resource_group, name):
+ self.log('Get web app {0} publish credentials'.format(name))
+ try:
+ poller = self.web_client.web_apps.list_publishing_credentials(resource_group, name)
+ if isinstance(poller, LROPoller):
+ response = self.get_poller_result(poller)
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.fail('Error getting web app {0} publishing credentials - {1}'.format(request_id, str(ex)))
+ return response
+
+ def get_webapp_ftp_publish_url(self, resource_group, name):
+ import xmltodict
+
+ self.log('Get web app {0} app publish profile'.format(name))
+
+ url = None
+ try:
+ content = self.web_client.web_apps.list_publishing_profile_xml_with_secrets(resource_group_name=resource_group, name=name)
+ if not content:
+ return url
+
+ full_xml = ''
+ for f in content:
+ full_xml += f.decode()
+ profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
+
+ if not profiles:
+ return url
+
+ for profile in profiles:
+ if profile['@publishMethod'] == 'FTP':
+ url = profile['@publishUrl']
+
+ except CloudError as ex:
+ self.fail('Error getting web app {0} app settings'.format(name))
+
+ return url
+
+ def get_curated_webapp(self, resource_group, name, webapp):
+ pip = self.serialize_obj(webapp, AZURE_OBJECT_CLASS)
+
+ try:
+ site_config = self.list_webapp_configuration(resource_group, name)
+ app_settings = self.list_webapp_appsettings(resource_group, name)
+ publish_cred = self.get_publish_credentials(resource_group, name)
+ ftp_publish_url = self.get_webapp_ftp_publish_url(resource_group, name)
+ except CloudError as ex:
+ pass
+ return self.construct_curated_webapp(webapp=pip,
+ configuration=site_config,
+ app_settings=app_settings,
+ deployment_slot=None,
+ ftp_publish_url=ftp_publish_url,
+ publish_credentials=publish_cred)
+
+ def construct_curated_webapp(self,
+ webapp,
+ configuration=None,
+ app_settings=None,
+ deployment_slot=None,
+ ftp_publish_url=None,
+ publish_credentials=None):
+ curated_output = dict()
+ curated_output['id'] = webapp['id']
+ curated_output['name'] = webapp['name']
+ curated_output['resource_group'] = webapp['properties']['resourceGroup']
+ curated_output['location'] = webapp['location']
+ curated_output['plan'] = webapp['properties']['serverFarmId']
+ curated_output['tags'] = webapp.get('tags', None)
+
+ # important properties from output. not match input arguments.
+ curated_output['app_state'] = webapp['properties']['state']
+ curated_output['availability_state'] = webapp['properties']['availabilityState']
+ curated_output['default_host_name'] = webapp['properties']['defaultHostName']
+ curated_output['host_names'] = webapp['properties']['hostNames']
+ curated_output['enabled'] = webapp['properties']['enabled']
+ curated_output['enabled_host_names'] = webapp['properties']['enabledHostNames']
+ curated_output['host_name_ssl_states'] = webapp['properties']['hostNameSslStates']
+ curated_output['outbound_ip_addresses'] = webapp['properties']['outboundIpAddresses']
+
+ # curated site_config
+ if configuration:
+ curated_output['frameworks'] = []
+ for fx_name in self.framework_names:
+ fx_version = configuration.get(fx_name + '_version', None)
+ if fx_version:
+ fx = {
+ 'name': fx_name,
+ 'version': fx_version
+ }
+ # java container setting
+ if fx_name == 'java':
+ if configuration['java_container'] and configuration['java_container_version']:
+ settings = {
+ 'java_container': configuration['java_container'].lower(),
+ 'java_container_version': configuration['java_container_version']
+ }
+ fx['settings'] = settings
+
+ curated_output['frameworks'].append(fx)
+
+ # linux_fx_version
+ if configuration.get('linux_fx_version', None):
+ tmp = configuration.get('linux_fx_version').split("|")
+ if len(tmp) == 2:
+ curated_output['frameworks'].append({'name': tmp[0].lower(), 'version': tmp[1]})
+
+ # curated app_settings
+ if app_settings and app_settings.get('properties', None):
+ curated_output['app_settings'] = dict()
+ for item in app_settings['properties']:
+ curated_output['app_settings'][item] = app_settings['properties'][item]
+
+ # curated deploymenet_slot
+ if deployment_slot:
+ curated_output['deployment_slot'] = deployment_slot
+
+ # ftp_publish_url
+ if ftp_publish_url:
+ curated_output['ftp_publish_url'] = ftp_publish_url
+
+ # curated publish credentials
+ if publish_credentials and self.return_publish_profile:
+ curated_output['publishing_username'] = publish_credentials.publishing_user_name
+ curated_output['publishing_password'] = publish_credentials.publishing_password
+ return curated_output
+
+
+def main():
+ AzureRMWebAppInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_webappslot.py b/test/support/integration/plugins/modules/azure_rm_webappslot.py
new file mode 100644
index 0000000000..ddba710b9d
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_webappslot.py
@@ -0,0 +1,1058 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_webappslot
+version_added: "2.8"
+short_description: Manage Azure Web App slot
+description:
+ - Create, update and delete Azure Web App slot.
+
+options:
+ resource_group:
+ description:
+ - Name of the resource group to which the resource belongs.
+ required: True
+ name:
+ description:
+ - Unique name of the deployment slot to create or update.
+ required: True
+ webapp_name:
+ description:
+ - Web app name which this deployment slot belongs to.
+ required: True
+ location:
+ description:
+ - Resource location. If not set, location from the resource group will be used as default.
+ configuration_source:
+ description:
+ - Source slot to clone configurations from when creating slot. Use webapp's name to refer to the production slot.
+ auto_swap_slot_name:
+ description:
+ - Used to configure target slot name to auto swap, or disable auto swap.
+ - Set it target slot name to auto swap.
+ - Set it to False to disable auto slot swap.
+ swap:
+ description:
+ - Swap deployment slots of a web app.
+ suboptions:
+ action:
+ description:
+ - Swap types.
+ - C(preview) is to apply target slot settings on source slot first.
+ - C(swap) is to complete swapping.
+ - C(reset) is to reset the swap.
+ choices:
+ - preview
+ - swap
+ - reset
+ default: preview
+ target_slot:
+ description:
+ - Name of target slot to swap. If set to None, then swap with production slot.
+ preserve_vnet:
+ description:
+ - C(True) to preserve virtual network to the slot during swap. Otherwise C(False).
+ type: bool
+ default: True
+ frameworks:
+ description:
+ - Set of run time framework settings. Each setting is a dictionary.
+ - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
+ suboptions:
+ name:
+ description:
+ - Name of the framework.
+ - Supported framework list for Windows web app and Linux web app is different.
+ - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018.
+ - Windows web apps support multiple framework at same time.
+ - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018.
+ - Linux web apps support only one framework.
+ - Java framework is mutually exclusive with others.
+ choices:
+ - java
+ - net_framework
+ - php
+ - python
+ - ruby
+ - dotnetcore
+ - node
+ version:
+ description:
+ - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info.
+ - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5.
+ - C(php) supported value sample, C(5.5), C(5.6), C(7.0).
+ - C(python) supported value sample, C(5.5), C(5.6), C(7.0).
+ - C(node) supported value sample, C(6.6), C(6.9).
+ - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2).
+ - C(ruby) supported value sample, 2.3.
+ - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app.
+ settings:
+ description:
+ - List of settings of the framework.
+ suboptions:
+ java_container:
+ description:
+ - Name of Java container. This is supported by specific framework C(java) onlys, for example C(Tomcat), C(Jetty).
+ java_container_version:
+ description:
+ - Version of Java container. This is supported by specific framework C(java) only.
+ - For C(Tomcat), for example C(8.0), C(8.5), C(9.0). For C(Jetty), for example C(9.1), C(9.3).
+ container_settings:
+ description:
+ - Web app slot container settings.
+ suboptions:
+ name:
+ description:
+ - Name of container, for example C(imagename:tag).
+ registry_server_url:
+ description:
+ - Container registry server URL, for example C(mydockerregistry.io).
+ registry_server_user:
+ description:
+ - The container registry server user name.
+ registry_server_password:
+ description:
+ - The container registry server password.
+ startup_file:
+ description:
+ - The slot startup file.
+ - This only applies for Linux web app slot.
+ app_settings:
+ description:
+ - Configure web app slot application settings. Suboptions are in key value pair format.
+ purge_app_settings:
+ description:
+ - Purge any existing application settings. Replace slot application settings with app_settings.
+ type: bool
+ deployment_source:
+ description:
+ - Deployment source for git.
+ suboptions:
+ url:
+ description:
+ - Repository URL of deployment source.
+ branch:
+ description:
+ - The branch name of the repository.
+ app_state:
+ description:
+ - Start/Stop/Restart the slot.
+ type: str
+ choices:
+ - started
+ - stopped
+ - restarted
+ default: started
+ state:
+ description:
+ - State of the Web App deployment slot.
+ - Use C(present) to create or update a slot and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Yunge Zhu(@yungezz)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a webapp slot
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ configuration_source: myJavaWebApp
+ app_settings:
+ testkey: testvalue
+
+ - name: swap the slot with production slot
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ swap:
+ action: swap
+
+ - name: stop the slot
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ app_state: stopped
+
+ - name: udpate a webapp slot app settings
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ app_settings:
+ testkey: testvalue2
+
+ - name: udpate a webapp slot frameworks
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ frameworks:
+ - name: "node"
+ version: "10.1"
+'''
+
+RETURN = '''
+id:
+ description:
+ - ID of current slot.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/testapp/slots/stage1
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrest.serialization import Model
+ from azure.mgmt.web.models import (
+ site_config, app_service_plan, Site,
+ AppServicePlan, SkuDescription, NameValuePair
+ )
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+swap_spec = dict(
+ action=dict(
+ type='str',
+ choices=[
+ 'preview',
+ 'swap',
+ 'reset'
+ ],
+ default='preview'
+ ),
+ target_slot=dict(
+ type='str'
+ ),
+ preserve_vnet=dict(
+ type='bool',
+ default=True
+ )
+)
+
+container_settings_spec = dict(
+ name=dict(type='str', required=True),
+ registry_server_url=dict(type='str'),
+ registry_server_user=dict(type='str'),
+ registry_server_password=dict(type='str', no_log=True)
+)
+
+deployment_source_spec = dict(
+ url=dict(type='str'),
+ branch=dict(type='str')
+)
+
+
+framework_settings_spec = dict(
+ java_container=dict(type='str', required=True),
+ java_container_version=dict(type='str', required=True)
+)
+
+
+framework_spec = dict(
+ name=dict(
+ type='str',
+ required=True,
+ choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']),
+ version=dict(type='str', required=True),
+ settings=dict(type='dict', options=framework_settings_spec)
+)
+
+
+def webapp_to_dict(webapp):
+ return dict(
+ id=webapp.id,
+ name=webapp.name,
+ location=webapp.location,
+ client_cert_enabled=webapp.client_cert_enabled,
+ enabled=webapp.enabled,
+ reserved=webapp.reserved,
+ client_affinity_enabled=webapp.client_affinity_enabled,
+ server_farm_id=webapp.server_farm_id,
+ host_names_disabled=webapp.host_names_disabled,
+ https_only=webapp.https_only if hasattr(webapp, 'https_only') else None,
+ skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None,
+ ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None,
+ state=webapp.state,
+ tags=webapp.tags if webapp.tags else None
+ )
+
+
+def slot_to_dict(slot):
+ return dict(
+ id=slot.id,
+ resource_group=slot.resource_group,
+ server_farm_id=slot.server_farm_id,
+ target_swap_slot=slot.target_swap_slot,
+ enabled_host_names=slot.enabled_host_names,
+ slot_swap_status=slot.slot_swap_status,
+ name=slot.name,
+ location=slot.location,
+ enabled=slot.enabled,
+ reserved=slot.reserved,
+ host_names_disabled=slot.host_names_disabled,
+ state=slot.state,
+ repository_site_name=slot.repository_site_name,
+ default_host_name=slot.default_host_name,
+ kind=slot.kind,
+ site_config=slot.site_config,
+ tags=slot.tags if slot.tags else None
+ )
+
+
+class Actions:
+ NoAction, CreateOrUpdate, UpdateAppSettings, Delete = range(4)
+
+
+class AzureRMWebAppSlots(AzureRMModuleBase):
+ """Configuration class for an Azure RM Web App slot resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ webapp_name=dict(
+ type='str',
+ required=True
+ ),
+ location=dict(
+ type='str'
+ ),
+ configuration_source=dict(
+ type='str'
+ ),
+ auto_swap_slot_name=dict(
+ type='raw'
+ ),
+ swap=dict(
+ type='dict',
+ options=swap_spec
+ ),
+ frameworks=dict(
+ type='list',
+ elements='dict',
+ options=framework_spec
+ ),
+ container_settings=dict(
+ type='dict',
+ options=container_settings_spec
+ ),
+ deployment_source=dict(
+ type='dict',
+ options=deployment_source_spec
+ ),
+ startup_file=dict(
+ type='str'
+ ),
+ app_settings=dict(
+ type='dict'
+ ),
+ purge_app_settings=dict(
+ type='bool',
+ default=False
+ ),
+ app_state=dict(
+ type='str',
+ choices=['started', 'stopped', 'restarted'],
+ default='started'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ mutually_exclusive = [['container_settings', 'frameworks']]
+
+ self.resource_group = None
+ self.name = None
+ self.webapp_name = None
+ self.location = None
+
+ self.auto_swap_slot_name = None
+ self.swap = None
+ self.tags = None
+ self.startup_file = None
+ self.configuration_source = None
+ self.clone = False
+
+ # site config, e.g app settings, ssl
+ self.site_config = dict()
+ self.app_settings = dict()
+ self.app_settings_strDic = None
+
+ # siteSourceControl
+ self.deployment_source = dict()
+
+ # site, used at level creation, or update.
+ self.site = None
+
+ # property for internal usage, not used for sdk
+ self.container_settings = None
+
+ self.purge_app_settings = False
+ self.app_state = 'started'
+
+ self.results = dict(
+ changed=False,
+ id=None,
+ )
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ self.frameworks = None
+
+ # set site_config value from kwargs
+ self.site_config_updatable_frameworks = ["net_framework_version",
+ "java_version",
+ "php_version",
+ "python_version",
+ "linux_fx_version"]
+
+ self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java']
+ self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java']
+
+ super(AzureRMWebAppSlots, self).__init__(derived_arg_spec=self.module_arg_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ supports_tags=True)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs[key] is not None:
+ if key == "scm_type":
+ self.site_config[key] = kwargs[key]
+
+ old_response = None
+ response = None
+ to_be_updated = False
+
+ # set location
+ resource_group = self.get_resource_group(self.resource_group)
+ if not self.location:
+ self.location = resource_group.location
+
+ # get web app
+ webapp_response = self.get_webapp()
+
+ if not webapp_response:
+ self.fail("Web app {0} does not exist in resource group {1}.".format(self.webapp_name, self.resource_group))
+
+ # get slot
+ old_response = self.get_slot()
+
+ # set is_linux
+ is_linux = True if webapp_response['reserved'] else False
+
+ if self.state == 'present':
+ if self.frameworks:
+ # java is mutually exclusive with other frameworks
+ if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks):
+ self.fail('Java is mutually exclusive with other frameworks.')
+
+ if is_linux:
+ if len(self.frameworks) != 1:
+ self.fail('Can specify one framework only for Linux web app.')
+
+ if self.frameworks[0]['name'] not in self.supported_linux_frameworks:
+ self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name']))
+
+ self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper()
+
+ if self.frameworks[0]['name'] == 'java':
+ if self.frameworks[0]['version'] != '8':
+ self.fail("Linux web app only supports java 8.")
+
+ if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \
+ self.frameworks[0]['settings']['java_container'].lower() != 'tomcat':
+ self.fail("Linux web app only supports tomcat container.")
+
+ if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \
+ self.frameworks[0]['settings']['java_container'].lower() == 'tomcat':
+ self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8'
+ else:
+ self.site_config['linux_fx_version'] = 'JAVA|8-jre8'
+ else:
+ for fx in self.frameworks:
+ if fx.get('name') not in self.supported_windows_frameworks:
+ self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name')))
+ else:
+ self.site_config[fx.get('name') + '_version'] = fx.get('version')
+
+ if 'settings' in fx and fx['settings'] is not None:
+ for key, value in fx['settings'].items():
+ self.site_config[key] = value
+
+ if not self.app_settings:
+ self.app_settings = dict()
+
+ if self.container_settings:
+ linux_fx_version = 'DOCKER|'
+
+ if self.container_settings.get('registry_server_url'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
+
+ linux_fx_version += self.container_settings['registry_server_url'] + '/'
+
+ linux_fx_version += self.container_settings['name']
+
+ self.site_config['linux_fx_version'] = linux_fx_version
+
+ if self.container_settings.get('registry_server_user'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user']
+
+ if self.container_settings.get('registry_server_password'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password']
+
+ # set auto_swap_slot_name
+ if self.auto_swap_slot_name and isinstance(self.auto_swap_slot_name, str):
+ self.site_config['auto_swap_slot_name'] = self.auto_swap_slot_name
+ if self.auto_swap_slot_name is False:
+ self.site_config['auto_swap_slot_name'] = None
+
+ # init site
+ self.site = Site(location=self.location, site_config=self.site_config)
+
+ # check if the slot already present in the webapp
+ if not old_response:
+ self.log("Web App slot doesn't exist")
+
+ to_be_updated = True
+ self.to_do = Actions.CreateOrUpdate
+ self.site.tags = self.tags
+
+ # if linux, setup startup_file
+ if self.startup_file:
+ self.site_config['app_command_line'] = self.startup_file
+
+ # set app setting
+ if self.app_settings:
+ app_settings = []
+ for key in self.app_settings.keys():
+ app_settings.append(NameValuePair(name=key, value=self.app_settings[key]))
+
+ self.site_config['app_settings'] = app_settings
+
+ # clone slot
+ if self.configuration_source:
+ self.clone = True
+
+ else:
+ # existing slot, do update
+ self.log("Web App slot already exists")
+
+ self.log('Result: {0}'.format(old_response))
+
+ update_tags, self.site.tags = self.update_tags(old_response.get('tags', None))
+
+ if update_tags:
+ to_be_updated = True
+
+ # check if site_config changed
+ old_config = self.get_configuration_slot(self.name)
+
+ if self.is_site_config_changed(old_config):
+ to_be_updated = True
+ self.to_do = Actions.CreateOrUpdate
+
+ self.app_settings_strDic = self.list_app_settings_slot(self.name)
+
+ # purge existing app_settings:
+ if self.purge_app_settings:
+ to_be_updated = True
+ self.to_do = Actions.UpdateAppSettings
+ self.app_settings_strDic = dict()
+
+ # check if app settings changed
+ if self.purge_app_settings or self.is_app_settings_changed():
+ to_be_updated = True
+ self.to_do = Actions.UpdateAppSettings
+
+ if self.app_settings:
+ for key in self.app_settings.keys():
+ self.app_settings_strDic[key] = self.app_settings[key]
+
+ elif self.state == 'absent':
+ if old_response:
+ self.log("Delete Web App slot")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_slot()
+
+ self.log('Web App slot deleted')
+
+ else:
+ self.log("Web app slot {0} not exists.".format(self.name))
+
+ if to_be_updated:
+ self.log('Need to Create/Update web app')
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ if self.to_do == Actions.CreateOrUpdate:
+ response = self.create_update_slot()
+
+ self.results['id'] = response['id']
+
+ if self.clone:
+ self.clone_slot()
+
+ if self.to_do == Actions.UpdateAppSettings:
+ self.update_app_settings_slot()
+
+ slot = None
+ if response:
+ slot = response
+ if old_response:
+ slot = old_response
+
+ if slot:
+ if (slot['state'] != 'Stopped' and self.app_state == 'stopped') or \
+ (slot['state'] != 'Running' and self.app_state == 'started') or \
+ self.app_state == 'restarted':
+
+ self.results['changed'] = True
+ if self.check_mode:
+ return self.results
+
+ self.set_state_slot(self.app_state)
+
+ if self.swap:
+ self.results['changed'] = True
+ if self.check_mode:
+ return self.results
+
+ self.swap_slot()
+
+ return self.results
+
+ # compare site config
+ def is_site_config_changed(self, existing_config):
+ for fx_version in self.site_config_updatable_frameworks:
+ if self.site_config.get(fx_version):
+ if not getattr(existing_config, fx_version) or \
+ getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper():
+ return True
+
+ if self.auto_swap_slot_name is False and existing_config.auto_swap_slot_name is not None:
+ return True
+ elif self.auto_swap_slot_name and self.auto_swap_slot_name != getattr(existing_config, 'auto_swap_slot_name', None):
+ return True
+ return False
+
+ # comparing existing app setting with input, determine whether it's changed
+ def is_app_settings_changed(self):
+ if self.app_settings:
+ if len(self.app_settings_strDic) != len(self.app_settings):
+ return True
+
+ if self.app_settings_strDic != self.app_settings:
+ return True
+ return False
+
+ # comparing deployment source with input, determine whether it's changed
+ def is_deployment_source_changed(self, existing_webapp):
+ if self.deployment_source:
+ if self.deployment_source.get('url') \
+ and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']:
+ return True
+
+ if self.deployment_source.get('branch') \
+ and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']:
+ return True
+
+ return False
+
+ def create_update_slot(self):
+ '''
+ Creates or updates Web App slot with the specified configuration.
+
+ :return: deserialized Web App instance state dictionary
+ '''
+ self.log(
+ "Creating / Updating the Web App slot {0}".format(self.name))
+
+ try:
+ response = self.web_client.web_apps.create_or_update_slot(resource_group_name=self.resource_group,
+ slot=self.name,
+ name=self.webapp_name,
+ site_envelope=self.site)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the Web App slot instance.')
+ self.fail("Error creating the Web App slot: {0}".format(str(exc)))
+ return slot_to_dict(response)
+
+ def delete_slot(self):
+ '''
+ Deletes specified Web App slot in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the Web App slot {0}".format(self.name))
+ try:
+ response = self.web_client.web_apps.delete_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the Web App slot.')
+ self.fail(
+ "Error deleting the Web App slots: {0}".format(str(e)))
+
+ return True
+
+ def get_webapp(self):
+ '''
+ Gets the properties of the specified Web App.
+
+ :return: deserialized Web App instance state dictionary
+ '''
+ self.log(
+ "Checking if the Web App instance {0} is present".format(self.webapp_name))
+
+ response = None
+
+ try:
+ response = self.web_client.web_apps.get(resource_group_name=self.resource_group,
+ name=self.webapp_name)
+
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ if response is not None:
+ self.log("Response : {0}".format(response))
+ self.log("Web App instance : {0} found".format(response.name))
+ return webapp_to_dict(response)
+
+ except CloudError as ex:
+ pass
+
+ self.log("Didn't find web app {0} in resource group {1}".format(
+ self.webapp_name, self.resource_group))
+
+ return False
+
+ def get_slot(self):
+ '''
+ Gets the properties of the specified Web App slot.
+
+ :return: deserialized Web App slot state dictionary
+ '''
+ self.log(
+ "Checking if the Web App slot {0} is present".format(self.name))
+
+ response = None
+
+ try:
+ response = self.web_client.web_apps.get_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name)
+
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ if response is not None:
+ self.log("Response : {0}".format(response))
+ self.log("Web App slot: {0} found".format(response.name))
+ return slot_to_dict(response)
+
+ except CloudError as ex:
+ pass
+
+ self.log("Does not find web app slot {0} in resource group {1}".format(self.name, self.resource_group))
+
+ return False
+
+ def list_app_settings(self):
+ '''
+ List webapp application settings
+ :return: deserialized list response
+ '''
+ self.log("List webapp application setting")
+
+ try:
+
+ response = self.web_client.web_apps.list_application_settings(
+ resource_group_name=self.resource_group, name=self.webapp_name)
+ self.log("Response : {0}".format(response))
+
+ return response.properties
+ except CloudError as ex:
+ self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def list_app_settings_slot(self, slot_name):
+ '''
+ List application settings
+ :return: deserialized list response
+ '''
+ self.log("List application setting")
+
+ try:
+
+ response = self.web_client.web_apps.list_application_settings_slot(
+ resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name)
+ self.log("Response : {0}".format(response))
+
+ return response.properties
+ except CloudError as ex:
+ self.fail("Failed to list application settings for web app slot {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def update_app_settings_slot(self, slot_name=None, app_settings=None):
+ '''
+ Update application settings
+ :return: deserialized updating response
+ '''
+ self.log("Update application setting")
+
+ if slot_name is None:
+ slot_name = self.name
+ if app_settings is None:
+ app_settings = self.app_settings_strDic
+ try:
+ response = self.web_client.web_apps.update_application_settings_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=slot_name,
+ kind=None,
+ properties=app_settings)
+ self.log("Response : {0}".format(response))
+
+ return response.as_dict()
+ except CloudError as ex:
+ self.fail("Failed to update application settings for web app slot {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ return response
+
+ def create_or_update_source_control_slot(self):
+ '''
+ Update site source control
+ :return: deserialized updating response
+ '''
+ self.log("Update site source control")
+
+ if self.deployment_source is None:
+ return False
+
+ self.deployment_source['is_manual_integration'] = False
+ self.deployment_source['is_mercurial'] = False
+
+ try:
+ response = self.web_client.web_client.create_or_update_source_control_slot(
+ resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ site_source_control=self.deployment_source,
+ slot=self.name)
+ self.log("Response : {0}".format(response))
+
+ return response.as_dict()
+ except CloudError as ex:
+ self.fail("Failed to update site source control for web app slot {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def get_configuration(self):
+ '''
+ Get web app configuration
+ :return: deserialized web app configuration response
+ '''
+ self.log("Get web app configuration")
+
+ try:
+
+ response = self.web_client.web_apps.get_configuration(
+ resource_group_name=self.resource_group, name=self.webapp_name)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to get configuration for web app {0} in resource group {1}: {2}".format(
+ self.webapp_name, self.resource_group, str(ex)))
+
+ def get_configuration_slot(self, slot_name):
+ '''
+ Get slot configuration
+ :return: deserialized slot configuration response
+ '''
+ self.log("Get web app slot configuration")
+
+ try:
+
+ response = self.web_client.web_apps.get_configuration_slot(
+ resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to get configuration for web app slot {0} in resource group {1}: {2}".format(
+ slot_name, self.resource_group, str(ex)))
+
+ def update_configuration_slot(self, slot_name=None, site_config=None):
+ '''
+ Update slot configuration
+ :return: deserialized slot configuration response
+ '''
+ self.log("Update web app slot configuration")
+
+ if slot_name is None:
+ slot_name = self.name
+ if site_config is None:
+ site_config = self.site_config
+ try:
+
+ response = self.web_client.web_apps.update_configuration_slot(
+ resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name, site_config=site_config)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to update configuration for web app slot {0} in resource group {1}: {2}".format(
+ slot_name, self.resource_group, str(ex)))
+
+ def set_state_slot(self, appstate):
+ '''
+ Start/stop/restart web app slot
+ :return: deserialized updating response
+ '''
+ try:
+ if appstate == 'started':
+ response = self.web_client.web_apps.start_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
+ elif appstate == 'stopped':
+ response = self.web_client.web_apps.stop_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
+ elif appstate == 'restarted':
+ response = self.web_client.web_apps.restart_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
+ else:
+ self.fail("Invalid web app slot state {0}".format(appstate))
+
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.fail("Failed to {0} web app slot {1} in resource group {2}, request_id {3} - {4}".format(
+ appstate, self.name, self.resource_group, request_id, str(ex)))
+
+ def swap_slot(self):
+ '''
+ Swap slot
+ :return: deserialized response
+ '''
+ self.log("Swap slot")
+
+ try:
+ if self.swap['action'] == 'swap':
+ if self.swap['target_slot'] is None:
+ response = self.web_client.web_apps.swap_slot_with_production(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ target_slot=self.name,
+ preserve_vnet=self.swap['preserve_vnet'])
+ else:
+ response = self.web_client.web_apps.swap_slot_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name,
+ target_slot=self.swap['target_slot'],
+ preserve_vnet=self.swap['preserve_vnet'])
+ elif self.swap['action'] == 'preview':
+ if self.swap['target_slot'] is None:
+ response = self.web_client.web_apps.apply_slot_config_to_production(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ target_slot=self.name,
+ preserve_vnet=self.swap['preserve_vnet'])
+ else:
+ response = self.web_client.web_apps.apply_slot_configuration_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name,
+ target_slot=self.swap['target_slot'],
+ preserve_vnet=self.swap['preserve_vnet'])
+ elif self.swap['action'] == 'reset':
+ if self.swap['target_slot'] is None:
+ response = self.web_client.web_apps.reset_production_slot_config(resource_group_name=self.resource_group,
+ name=self.webapp_name)
+ else:
+ response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.swap['target_slot'])
+ response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name)
+
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to swap web app slot {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex)))
+
+ def clone_slot(self):
+ if self.configuration_source:
+ src_slot = None if self.configuration_source.lower() == self.webapp_name.lower() else self.configuration_source
+
+ if src_slot is None:
+ site_config_clone_from = self.get_configuration()
+ else:
+ site_config_clone_from = self.get_configuration_slot(slot_name=src_slot)
+
+ self.update_configuration_slot(site_config=site_config_clone_from)
+
+ if src_slot is None:
+ app_setting_clone_from = self.list_app_settings()
+ else:
+ app_setting_clone_from = self.list_app_settings_slot(src_slot)
+
+ if self.app_settings:
+ app_setting_clone_from.update(self.app_settings)
+
+ self.update_app_settings_slot(app_settings=app_setting_clone_from)
+
+
+def main():
+ """Main execution"""
+ AzureRMWebAppSlots()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cloudformation.py b/test/support/integration/plugins/modules/cloudformation.py
new file mode 100644
index 0000000000..cd03146501
--- /dev/null
+++ b/test/support/integration/plugins/modules/cloudformation.py
@@ -0,0 +1,837 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: cloudformation
+short_description: Create or delete an AWS CloudFormation stack
+description:
+ - Launches or updates an AWS CloudFormation stack and waits for it complete.
+notes:
+ - CloudFormation features change often, and this module tries to keep up. That means your botocore version should be fresh.
+ The version listed in the requirements is the oldest version that works with the module as a whole.
+ Some features may require recent versions, and we do not pinpoint a minimum version for each feature.
+ Instead of relying on the minimum version, keep botocore up to date. AWS is always releasing features and fixing bugs.
+version_added: "1.1"
+options:
+ stack_name:
+ description:
+ - Name of the CloudFormation stack.
+ required: true
+ type: str
+ disable_rollback:
+ description:
+ - If a stacks fails to form, rollback will remove the stack.
+ default: false
+ type: bool
+ on_create_failure:
+ description:
+ - Action to take upon failure of stack creation. Incompatible with the I(disable_rollback) option.
+ choices:
+ - DO_NOTHING
+ - ROLLBACK
+ - DELETE
+ version_added: "2.8"
+ type: str
+ create_timeout:
+ description:
+ - The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED
+ version_added: "2.6"
+ type: int
+ template_parameters:
+ description:
+ - A list of hashes of all the template variables for the stack. The value can be a string or a dict.
+ - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
+ default: {}
+ type: dict
+ state:
+ description:
+ - If I(state=present), stack will be created.
+ - If I(state=present) and if stack exists and template has changed, it will be updated.
+ - If I(state=absent), stack will be removed.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ template:
+ description:
+ - The local path of the CloudFormation template.
+ - This must be the full path to the file, relative to the working directory. If using roles this may look
+ like C(roles/cloudformation/files/cloudformation-example.json).
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template),
+ I(template_body) nor I(template_url) are specified, the previous template will be reused.
+ type: path
+ notification_arns:
+ description:
+ - A comma separated list of Simple Notification Service (SNS) topic ARNs to publish stack related events.
+ version_added: "2.0"
+ type: str
+ stack_policy:
+ description:
+ - The path of the CloudFormation stack policy. A policy cannot be removed once placed, but it can be modified.
+ for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
+ version_added: "1.9"
+ type: str
+ tags:
+ description:
+ - Dictionary of tags to associate with stack and its resources during stack creation.
+ - Can be updated later, updating tags removes previous entries.
+ version_added: "1.4"
+ type: dict
+ template_url:
+ description:
+ - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an
+ S3 bucket in the same region as the stack.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) are specified,
+ the previous template will be reused.
+ version_added: "2.0"
+ type: str
+ create_changeset:
+ description:
+ - "If stack already exists create a changeset instead of directly applying changes. See the AWS Change Sets docs
+ U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)."
+ - "WARNING: if the stack does not exist, it will be created without changeset. If I(state=absent), the stack will be
+ deleted immediately with no changeset."
+ type: bool
+ default: false
+ version_added: "2.4"
+ changeset_name:
+ description:
+ - Name given to the changeset when creating a changeset.
+ - Only used when I(create_changeset=true).
+ - By default a name prefixed with Ansible-STACKNAME is generated based on input parameters.
+ See the AWS Change Sets docs for more information
+ U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)
+ version_added: "2.4"
+ type: str
+ template_format:
+ description:
+ - This parameter is ignored since Ansible 2.3 and will be removed in Ansible 2.14.
+ - Templates are now passed raw to CloudFormation regardless of format.
+ version_added: "2.0"
+ type: str
+ role_arn:
+ description:
+ - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
+ docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
+ version_added: "2.3"
+ type: str
+ termination_protection:
+ description:
+ - Enable or disable termination protection on the stack. Only works with botocore >= 1.7.18.
+ type: bool
+ version_added: "2.5"
+ template_body:
+ description:
+ - Template body. Use this to pass in the actual body of the CloudFormation template.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
+ are specified, the previous template will be reused.
+ version_added: "2.5"
+ type: str
+ events_limit:
+ description:
+ - Maximum number of CloudFormation events to fetch from a stack when creating or updating it.
+ default: 200
+ version_added: "2.7"
+ type: int
+ backoff_delay:
+ description:
+ - Number of seconds to wait for the next retry.
+ default: 3
+ version_added: "2.8"
+ type: int
+ required: False
+ backoff_max_delay:
+ description:
+ - Maximum amount of time to wait between retries.
+ default: 30
+ version_added: "2.8"
+ type: int
+ required: False
+ backoff_retries:
+ description:
+ - Number of times to retry operation.
+ - AWS API throttling mechanism fails CloudFormation module so we have to retry a couple of times.
+ default: 10
+ version_added: "2.8"
+ type: int
+ required: False
+ capabilities:
+ description:
+ - Specify capabilities that stack template contains.
+ - Valid values are C(CAPABILITY_IAM), C(CAPABILITY_NAMED_IAM) and C(CAPABILITY_AUTO_EXPAND).
+ type: list
+ elements: str
+ version_added: "2.8"
+ default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ]
+
+author: "James S. Martin (@jsmartin)"
+extends_documentation_fragment:
+- aws
+- ec2
+requirements: [ boto3, botocore>=1.5.45 ]
+'''
+
+EXAMPLES = '''
+- name: create a cloudformation stack
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ disable_rollback: true
+ template: "files/cloudformation-example.json"
+ template_parameters:
+ KeyName: "jmartin"
+ DiskType: "ephemeral"
+ InstanceType: "m1.small"
+ ClusterSize: 3
+ tags:
+ Stack: "ansible-cloudformation"
+
+# Basic role example
+- name: create a stack, specify role that cloudformation assumes
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ disable_rollback: true
+ template: "roles/cloudformation/files/cloudformation-example.json"
+ role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
+
+- name: delete a stack
+ cloudformation:
+ stack_name: "ansible-cloudformation-old"
+ state: "absent"
+
+# Create a stack, pass in template from a URL, disable rollback if stack creation fails,
+# pass in some parameters to the template, provide tags for resources created
+- name: create a stack, pass in the template via an URL
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: present
+ region: us-east-1
+ disable_rollback: true
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ template_parameters:
+ KeyName: jmartin
+ DiskType: ephemeral
+ InstanceType: m1.small
+ ClusterSize: 3
+ tags:
+ Stack: ansible-cloudformation
+
+# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails,
+# pass in some parameters to the template, provide tags for resources created
+- name: create a stack, pass in the template body via lookup template
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: present
+ region: us-east-1
+ disable_rollback: true
+ template_body: "{{ lookup('template', 'cloudformation.j2') }}"
+ template_parameters:
+ KeyName: jmartin
+ DiskType: ephemeral
+ InstanceType: m1.small
+ ClusterSize: 3
+ tags:
+ Stack: ansible-cloudformation
+
+# Pass a template parameter which uses CloudFormation's UsePreviousValue attribute
+# When use_previous_value is set to True, the given value will be ignored and
+# CloudFormation will use the value from a previously submitted template.
+# If use_previous_value is set to False (default) the given value is used.
+- cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ template: "files/cloudformation-example.json"
+ template_parameters:
+ DBSnapshotIdentifier:
+ use_previous_value: True
+ value: arn:aws:rds:es-east-1:000000000000:snapshot:rds:my-db-snapshot
+ DBName:
+ use_previous_value: True
+ tags:
+ Stack: "ansible-cloudformation"
+
+# Enable termination protection on a stack.
+# If the stack already exists, this will update its termination protection
+- name: enable termination protection during stack creation
+ cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ termination_protection: yes
+
+# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED
+# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back.
+- name: enable termination protection during stack creation
+ cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ create_timeout: 5
+
+# Configure rollback behaviour on the unsuccessful creation of a stack allowing
+# CloudFormation to clean up, or do nothing in the event of an unsuccessful
+# deployment
+# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if
+# it fails to create
+- name: create stack which will delete on creation failure
+ cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ on_create_failure: DELETE
+'''
+
+RETURN = '''
+events:
+ type: list
+ description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
+ returned: always
+ sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"]
+log:
+ description: Debugging logs. Useful when modifying or finding an error.
+ returned: always
+ type: list
+ sample: ["updating stack"]
+change_set_id:
+ description: The ID of the stack change set if one was created
+ returned: I(state=present) and I(create_changeset=true)
+ type: str
+ sample: "arn:aws:cloudformation:us-east-1:012345678901:changeSet/Ansible-StackName-f4496805bd1b2be824d1e315c6884247ede41eb0"
+stack_resources:
+ description: AWS stack resources and their status. List of dictionaries, one dict per resource.
+ returned: state == present
+ type: list
+ sample: [
+ {
+ "last_updated_time": "2016-10-11T19:40:14.979000+00:00",
+ "logical_resource_id": "CFTestSg",
+ "physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F",
+ "resource_type": "AWS::EC2::SecurityGroup",
+ "status": "UPDATE_COMPLETE",
+ "status_reason": null
+ }
+ ]
+stack_outputs:
+ type: dict
+ description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
+ returned: state == present
+ sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
+''' # NOQA
+
+import json
+import time
+import uuid
+import traceback
+from hashlib import sha1
+
+try:
+ import boto3
+ import botocore
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, boto3_conn, boto_exception, ec2_argument_spec, get_aws_connection_info
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
+ '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
+ ret = {'events': [], 'log': []}
+
+ try:
+ pg = cfn.get_paginator(
+ 'describe_stack_events'
+ ).paginate(
+ StackName=stack_name,
+ PaginationConfig={'MaxItems': events_limit}
+ )
+ if token_filter is not None:
+ events = list(pg.search(
+ "StackEvents[?ClientRequestToken == '{0}']".format(token_filter)
+ ))
+ else:
+ events = list(pg.search("StackEvents[*]"))
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ if 'does not exist' in error_msg:
+ # missing stack, don't bail.
+ ret['log'].append('Stack does not exist.')
+ return ret
+ ret['log'].append('Unknown error: ' + str(error_msg))
+ return ret
+
+ for e in events:
+ eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e)
+ ret['events'].append(eventline)
+
+ if e['ResourceStatus'].endswith('FAILED'):
+ failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e)
+ ret['log'].append(failline)
+
+ return ret
+
+
+def create_stack(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.")
+
+ # 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and
+ # 'OnFailure' only apply on creation, not update.
+ if module.params.get('on_create_failure') is not None:
+ stack_params['OnFailure'] = module.params['on_create_failure']
+ else:
+ stack_params['DisableRollback'] = module.params['disable_rollback']
+
+ if module.params.get('create_timeout') is not None:
+ stack_params['TimeoutInMinutes'] = module.params['create_timeout']
+ if module.params.get('termination_protection') is not None:
+ if boto_supports_termination_protection(cfn):
+ stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection'))
+ else:
+ module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
+
+ try:
+ response = cfn.create_stack(**stack_params)
+ # Use stack ID to follow stack state in case of on_create_failure = DELETE
+ result = stack_operation(cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None))
+ except Exception as err:
+ error_msg = boto_exception(err)
+ module.fail_json(msg="Failed to create stack {0}: {1}.".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def list_changesets(cfn, stack_name):
+ res = cfn.list_change_sets(StackName=stack_name)
+ return [cs['ChangeSetName'] for cs in res['Summaries']]
+
+
+def create_changeset(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ module.fail_json(msg="Either 'template' or 'template_url' is required.")
+ if module.params['changeset_name'] is not None:
+ stack_params['ChangeSetName'] = module.params['changeset_name']
+
+ # changesets don't accept ClientRequestToken parameters
+ stack_params.pop('ClientRequestToken', None)
+
+ try:
+ changeset_name = build_changeset_name(stack_params)
+ stack_params['ChangeSetName'] = changeset_name
+
+ # Determine if this changeset already exists
+ pending_changesets = list_changesets(cfn, stack_params['StackName'])
+ if changeset_name in pending_changesets:
+ warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets)
+ result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning])
+ else:
+ cs = cfn.create_change_set(**stack_params)
+ # Make sure we don't enter an infinite loop
+ time_end = time.time() + 600
+ while time.time() < time_end:
+ try:
+ newcs = cfn.describe_change_set(ChangeSetName=cs['Id'])
+ except botocore.exceptions.BotoCoreError as err:
+ error_msg = boto_exception(err)
+ module.fail_json(msg=error_msg)
+ if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS':
+ time.sleep(1)
+ elif newcs['Status'] == 'FAILED' and "The submitted information didn't contain changes" in newcs['StatusReason']:
+ cfn.delete_change_set(ChangeSetName=cs['Id'])
+ result = dict(changed=False,
+ output='The created Change Set did not contain any changes to this stack and was deleted.')
+ # a failed change set does not trigger any stack events so we just want to
+ # skip any further processing of result and just return it directly
+ return result
+ else:
+ break
+ # Lets not hog the cpu/spam the AWS API
+ time.sleep(1)
+ result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit)
+ result['change_set_id'] = cs['Id']
+ result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
+ 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
+ 'NOTE that dependencies on this stack might fail due to pending changes!']
+ except Exception as err:
+ error_msg = boto_exception(err)
+ if 'No updates are to be performed.' in error_msg:
+ result = dict(changed=False, output='Stack is already up-to-date.')
+ else:
+ module.fail_json(msg="Failed to create change set: {0}".format(error_msg), exception=traceback.format_exc())
+
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def update_stack(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ stack_params['UsePreviousTemplate'] = True
+
+ # if the state is present and the stack already exists, we try to update it.
+ # AWS will tell us if the stack template and parameters are the same and
+ # don't need to be updated.
+ try:
+ cfn.update_stack(**stack_params)
+ result = stack_operation(cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None))
+ except Exception as err:
+ error_msg = boto_exception(err)
+ if 'No updates are to be performed.' in error_msg:
+ result = dict(changed=False, output='Stack is already up-to-date.')
+ else:
+ module.fail_json(msg="Failed to update stack {0}: {1}".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state):
+ '''updates termination protection of a stack'''
+ if not boto_supports_termination_protection(cfn):
+ module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
+ stack = get_stack_facts(cfn, stack_name)
+ if stack:
+ if stack['EnableTerminationProtection'] is not desired_termination_protection_state:
+ try:
+ cfn.update_termination_protection(
+ EnableTerminationProtection=desired_termination_protection_state,
+ StackName=stack_name)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=boto_exception(e), exception=traceback.format_exc())
+
+
+def boto_supports_termination_protection(cfn):
+ '''termination protection was added in botocore 1.7.18'''
+ return hasattr(cfn, "update_termination_protection")
+
+
+def stack_operation(cfn, stack_name, operation, events_limit, op_token=None):
+ '''gets the status of a stack while it is created/updated/deleted'''
+ existed = []
+ while True:
+ try:
+ stack = get_stack_facts(cfn, stack_name)
+ existed.append('yes')
+ except Exception:
+ # If the stack previously existed, and now can't be found then it's
+ # been deleted successfully.
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ ret.update({'changed': True, 'output': 'Stack Deleted'})
+ return ret
+ else:
+ return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ if not stack:
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ ret.update({'changed': True, 'output': 'Stack Deleted'})
+ return ret
+ else:
+ ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
+ return ret
+ # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
+ # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
+ elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET':
+ ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation})
+ return ret
+ elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE':
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'})
+ return ret
+ # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases.
+ elif stack['StackStatus'].endswith('_COMPLETE'):
+ ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
+ return ret
+ elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
+ return ret
+ # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases.
+ elif stack['StackStatus'].endswith('_FAILED'):
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation})
+ return ret
+ else:
+ # this can loop forever :/
+ time.sleep(5)
+ return {'failed': True, 'output': 'Failed for unknown reasons.'}
+
+
+def build_changeset_name(stack_params):
+ if 'ChangeSetName' in stack_params:
+ return stack_params['ChangeSetName']
+
+ json_params = json.dumps(stack_params, sort_keys=True)
+
+ return 'Ansible-{0}-{1}'.format(
+ stack_params['StackName'],
+ sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest()
+ )
+
+
+def check_mode_changeset(module, stack_params, cfn):
+ """Create a change set, describe it and delete it before returning check mode outputs."""
+ stack_params['ChangeSetName'] = build_changeset_name(stack_params)
+ # changesets don't accept ClientRequestToken parameters
+ stack_params.pop('ClientRequestToken', None)
+
+ try:
+ change_set = cfn.create_change_set(**stack_params)
+ for i in range(60): # total time 5 min
+ description = cfn.describe_change_set(ChangeSetName=change_set['Id'])
+ if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
+ break
+ time.sleep(5)
+ else:
+ # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail
+ module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName'])
+
+ cfn.delete_change_set(ChangeSetName=change_set['Id'])
+
+ reason = description.get('StatusReason')
+
+ if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']:
+ return {'changed': False, 'msg': reason, 'meta': description['StatusReason']}
+ return {'changed': True, 'msg': reason, 'meta': description['Changes']}
+
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def get_stack_facts(cfn, stack_name):
+ try:
+ stack_response = cfn.describe_stacks(StackName=stack_name)
+ stack_info = stack_response['Stacks'][0]
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ if 'does not exist' in error_msg:
+ # missing stack, don't bail.
+ return None
+
+ # other error, bail.
+ raise err
+
+ if stack_response and stack_response.get('Stacks', None):
+ stacks = stack_response['Stacks']
+ if len(stacks):
+ stack_info = stacks[0]
+
+ return stack_info
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ stack_name=dict(required=True),
+ template_parameters=dict(required=False, type='dict', default={}),
+ state=dict(default='present', choices=['present', 'absent']),
+ template=dict(default=None, required=False, type='path'),
+ notification_arns=dict(default=None, required=False),
+ stack_policy=dict(default=None, required=False),
+ disable_rollback=dict(default=False, type='bool'),
+ on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']),
+ create_timeout=dict(default=None, type='int'),
+ template_url=dict(default=None, required=False),
+ template_body=dict(default=None, required=False),
+ template_format=dict(removed_in_version='2.14'),
+ create_changeset=dict(default=False, type='bool'),
+ changeset_name=dict(default=None, required=False),
+ role_arn=dict(default=None, required=False),
+ tags=dict(default=None, type='dict'),
+ termination_protection=dict(default=None, type='bool'),
+ events_limit=dict(default=200, type='int'),
+ backoff_retries=dict(type='int', default=10, required=False),
+ backoff_delay=dict(type='int', default=3, required=False),
+ backoff_max_delay=dict(type='int', default=30, required=False),
+ capabilities=dict(type='list', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['template_url', 'template', 'template_body'],
+ ['disable_rollback', 'on_create_failure']],
+ supports_check_mode=True
+ )
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 and botocore are required for this module')
+
+ invalid_capabilities = []
+ user_capabilities = module.params.get('capabilities')
+ for user_cap in user_capabilities:
+ if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']:
+ invalid_capabilities.append(user_cap)
+
+ if invalid_capabilities:
+ module.fail_json(msg="Specified capabilities are invalid : %r,"
+ " please check documentation for valid capabilities" % invalid_capabilities)
+
+ # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
+ stack_params = {
+ 'Capabilities': user_capabilities,
+ 'ClientRequestToken': to_native(uuid.uuid4()),
+ }
+ state = module.params['state']
+ stack_params['StackName'] = module.params['stack_name']
+
+ if module.params['template'] is not None:
+ with open(module.params['template'], 'r') as template_fh:
+ stack_params['TemplateBody'] = template_fh.read()
+ elif module.params['template_body'] is not None:
+ stack_params['TemplateBody'] = module.params['template_body']
+ elif module.params['template_url'] is not None:
+ stack_params['TemplateURL'] = module.params['template_url']
+
+ if module.params.get('notification_arns'):
+ stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
+ else:
+ stack_params['NotificationARNs'] = []
+
+ # can't check the policy when verifying.
+ if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']:
+ with open(module.params['stack_policy'], 'r') as stack_policy_fh:
+ stack_params['StackPolicyBody'] = stack_policy_fh.read()
+
+ template_parameters = module.params['template_parameters']
+
+ stack_params['Parameters'] = []
+ for k, v in template_parameters.items():
+ if isinstance(v, dict):
+ # set parameter based on a dict to allow additional CFN Parameter Attributes
+ param = dict(ParameterKey=k)
+
+ if 'value' in v:
+ param['ParameterValue'] = str(v['value'])
+
+ if 'use_previous_value' in v and bool(v['use_previous_value']):
+ param['UsePreviousValue'] = True
+ param.pop('ParameterValue', None)
+
+ stack_params['Parameters'].append(param)
+ else:
+ # allow default k/v configuration to set a template parameter
+ stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
+
+ if isinstance(module.params.get('tags'), dict):
+ stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
+
+ if module.params.get('role_arn'):
+ stack_params['RoleARN'] = module.params['role_arn']
+
+ result = {}
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ cfn = boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg=boto_exception(e))
+
+ # Wrap the cloudformation client methods that this module uses with
+ # automatic backoff / retry for throttling error codes
+ backoff_wrapper = AWSRetry.jittered_backoff(
+ retries=module.params.get('backoff_retries'),
+ delay=module.params.get('backoff_delay'),
+ max_delay=module.params.get('backoff_max_delay')
+ )
+ cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events)
+ cfn.create_stack = backoff_wrapper(cfn.create_stack)
+ cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets)
+ cfn.create_change_set = backoff_wrapper(cfn.create_change_set)
+ cfn.update_stack = backoff_wrapper(cfn.update_stack)
+ cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks)
+ cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources)
+ cfn.delete_stack = backoff_wrapper(cfn.delete_stack)
+ if boto_supports_termination_protection(cfn):
+ cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection)
+
+ stack_info = get_stack_facts(cfn, stack_params['StackName'])
+
+ if module.check_mode:
+ if state == 'absent' and stack_info:
+ module.exit_json(changed=True, msg='Stack would be deleted', meta=[])
+ elif state == 'absent' and not stack_info:
+ module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[])
+ elif state == 'present' and not stack_info:
+ module.exit_json(changed=True, msg='New stack would be created', meta=[])
+ else:
+ module.exit_json(**check_mode_changeset(module, stack_params, cfn))
+
+ if state == 'present':
+ if not stack_info:
+ result = create_stack(module, stack_params, cfn, module.params.get('events_limit'))
+ elif module.params.get('create_changeset'):
+ result = create_changeset(module, stack_params, cfn, module.params.get('events_limit'))
+ else:
+ if module.params.get('termination_protection') is not None:
+ update_termination_protection(module, cfn, stack_params['StackName'],
+ bool(module.params.get('termination_protection')))
+ result = update_stack(module, stack_params, cfn, module.params.get('events_limit'))
+
+ # format the stack output
+
+ stack = get_stack_facts(cfn, stack_params['StackName'])
+ if stack is not None:
+ if result.get('stack_outputs') is None:
+ # always define stack_outputs, but it may be empty
+ result['stack_outputs'] = {}
+ for output in stack.get('Outputs', []):
+ result['stack_outputs'][output['OutputKey']] = output['OutputValue']
+ stack_resources = []
+ reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
+ for res in reslist.get('StackResourceSummaries', []):
+ stack_resources.append({
+ "logical_resource_id": res['LogicalResourceId'],
+ "physical_resource_id": res.get('PhysicalResourceId', ''),
+ "resource_type": res['ResourceType'],
+ "last_updated_time": res['LastUpdatedTimestamp'],
+ "status": res['ResourceStatus'],
+ "status_reason": res.get('ResourceStatusReason') # can be blank, apparently
+ })
+ result['stack_resources'] = stack_resources
+
+ elif state == 'absent':
+ # absent state is different because of the way delete_stack works.
+ # problem is it it doesn't give an error if stack isn't found
+ # so must describe the stack first
+
+ try:
+ stack = get_stack_facts(cfn, stack_params['StackName'])
+ if not stack:
+ result = {'changed': False, 'output': 'Stack not found.'}
+ else:
+ if stack_params.get('RoleARN') is None:
+ cfn.delete_stack(StackName=stack_params['StackName'])
+ else:
+ cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN'])
+ result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'),
+ stack_params.get('ClientRequestToken', None))
+ except Exception as err:
+ module.fail_json(msg=boto_exception(err), exception=traceback.format_exc())
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cloudformation_info.py b/test/support/integration/plugins/modules/cloudformation_info.py
new file mode 100644
index 0000000000..f62b80235d
--- /dev/null
+++ b/test/support/integration/plugins/modules/cloudformation_info.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: cloudformation_info
+short_description: Obtain information about an AWS CloudFormation stack
+description:
+ - Gets information about an AWS CloudFormation stack.
+ - This module was called C(cloudformation_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(cloudformation_info) module no longer returns C(ansible_facts)!
+requirements:
+ - boto3 >= 1.0.0
+ - python >= 2.6
+version_added: "2.2"
+author:
+ - Justin Menga (@jmenga)
+ - Kevin Coming (@waffie1)
+options:
+ stack_name:
+ description:
+ - The name or id of the CloudFormation stack. Gathers information on all stacks by default.
+ type: str
+ all_facts:
+ description:
+ - Get all stack information for the stack.
+ type: bool
+ default: false
+ stack_events:
+ description:
+ - Get stack events for the stack.
+ type: bool
+ default: false
+ stack_template:
+ description:
+ - Get stack template body for the stack.
+ type: bool
+ default: false
+ stack_resources:
+ description:
+ - Get stack resources for the stack.
+ type: bool
+ default: false
+ stack_policy:
+ description:
+ - Get stack policy for the stack.
+ type: bool
+ default: false
+ stack_change_sets:
+ description:
+ - Get stack change sets for the stack
+ type: bool
+ default: false
+ version_added: '2.10'
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Get summary information about a stack
+- cloudformation_info:
+ stack_name: my-cloudformation-stack
+ register: output
+
+- debug:
+ msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}"
+
+# When the module is called as cloudformation_facts, return values are published
+# in ansible_facts['cloudformation'][<stack_name>] and can be used as follows.
+# Note that this is deprecated and will stop working in Ansible 2.13.
+
+- cloudformation_facts:
+ stack_name: my-cloudformation-stack
+
+- debug:
+ msg: "{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}"
+
+# Get stack outputs, when you have the stack name available as a fact
+- set_fact:
+ stack_name: my-awesome-stack
+
+- cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: my_stack
+
+- debug:
+ msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}"
+
+# Get all stack information about a stack
+- cloudformation_info:
+ stack_name: my-cloudformation-stack
+ all_facts: true
+
+# Get stack resource and stack policy information about a stack
+- cloudformation_info:
+ stack_name: my-cloudformation-stack
+ stack_resources: true
+ stack_policy: true
+
+# Fail if the stack doesn't exist
+- name: try to get facts about a stack but fail if it doesn't exist
+ cloudformation_info:
+ stack_name: nonexistent-stack
+ all_facts: yes
+ failed_when: cloudformation['nonexistent-stack'] is undefined
+'''
+
+RETURN = '''
+stack_description:
+ description: Summary facts about the stack
+ returned: if the stack exists
+ type: dict
+stack_outputs:
+ description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each
+ output 'OutputValue' parameter
+ returned: if the stack exists
+ type: dict
+ sample:
+ ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com
+stack_parameters:
+ description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of
+ each parameter 'ParameterValue' parameter
+ returned: if the stack exists
+ type: dict
+ sample:
+ DatabaseEngine: mysql
+ DatabasePassword: "***"
+stack_events:
+ description: All stack events for the stack
+ returned: only if all_facts or stack_events is true and the stack exists
+ type: list
+stack_policy:
+ description: Describes the stack policy for the stack
+ returned: only if all_facts or stack_policy is true and the stack exists
+ type: dict
+stack_template:
+ description: Describes the stack template for the stack
+ returned: only if all_facts or stack_template is true and the stack exists
+ type: dict
+stack_resource_list:
+ description: Describes stack resources for the stack
+ returned: only if all_facts or stack_resourses is true and the stack exists
+ type: list
+stack_resources:
+ description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each
+ resource 'PhysicalResourceId' parameter
+ returned: only if all_facts or stack_resourses is true and the stack exists
+ type: dict
+ sample:
+ AutoScalingGroup: "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7"
+ AutoScalingSecurityGroup: "sg-abcd1234"
+ ApplicationDatabase: "dazvlpr01xj55a"
+stack_change_sets:
+ description: A list of stack change sets. Each item in the list represents the details of a specific changeset
+
+ returned: only if all_facts or stack_change_sets is true and the stack exists
+ type: list
+'''
+
+import json
+import traceback
+
+from functools import partial
+from ansible.module_utils._text import to_native
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry, boto3_tag_list_to_ansible_dict)
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+class CloudFormationServiceManager:
+ """Handles CloudFormation Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudformation')
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stacks_with_backoff(self, **kwargs):
+ paginator = self.client.get_paginator('describe_stacks')
+ return paginator.paginate(**kwargs).build_full_result()['Stacks']
+
+ def describe_stacks(self, stack_name=None):
+ try:
+ kwargs = {'StackName': stack_name} if stack_name else {}
+ response = self.describe_stacks_with_backoff(**kwargs)
+ if response is not None:
+ return response
+ self.module.fail_json(msg="Error describing stack(s) - an empty response was returned")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ if 'does not exist' in e.response['Error']['Message']:
+ # missing stack, don't bail.
+ return {}
+ self.module.fail_json_aws(e, msg="Error describing stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def list_stack_resources_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('list_stack_resources')
+ return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries']
+
+ def list_stack_resources(self, stack_name):
+ try:
+ return self.list_stack_resources_with_backoff(stack_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stack_events_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('describe_stack_events')
+ return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents']
+
+ def describe_stack_events(self, stack_name):
+ try:
+ return self.describe_stack_events_with_backoff(stack_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def list_stack_change_sets_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('list_change_sets')
+ return paginator.paginate(StackName=stack_name).build_full_result()['Summaries']
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stack_change_set_with_backoff(self, **kwargs):
+ paginator = self.client.get_paginator('describe_change_set')
+ return paginator.paginate(**kwargs).build_full_result()
+
+ def describe_stack_change_sets(self, stack_name):
+ changes = []
+ try:
+ change_sets = self.list_stack_change_sets_with_backoff(stack_name)
+ for item in change_sets:
+ changes.append(self.describe_stack_change_set_with_backoff(
+ StackName=stack_name,
+ ChangeSetName=item['ChangeSetName']))
+ return changes
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def get_stack_policy_with_backoff(self, stack_name):
+ return self.client.get_stack_policy(StackName=stack_name)
+
+ def get_stack_policy(self, stack_name):
+ try:
+ response = self.get_stack_policy_with_backoff(stack_name)
+ stack_policy = response.get('StackPolicyBody')
+ if stack_policy:
+ return json.loads(stack_policy)
+ return dict()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def get_template_with_backoff(self, stack_name):
+ return self.client.get_template(StackName=stack_name)
+
+ def get_template(self, stack_name):
+ try:
+ response = self.get_template_with_backoff(stack_name)
+ return response.get('TemplateBody')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name)
+
+
+def to_dict(items, key, value):
+ ''' Transforms a list of items to a Key/Value dictionary '''
+ if items:
+ return dict(zip([i.get(key) for i in items], [i.get(value) for i in items]))
+ else:
+ return dict()
+
+
+def main():
+ argument_spec = dict(
+ stack_name=dict(),
+ all_facts=dict(required=False, default=False, type='bool'),
+ stack_policy=dict(required=False, default=False, type='bool'),
+ stack_events=dict(required=False, default=False, type='bool'),
+ stack_resources=dict(required=False, default=False, type='bool'),
+ stack_template=dict(required=False, default=False, type='bool'),
+ stack_change_sets=dict(required=False, default=False, type='bool'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ is_old_facts = module._name == 'cloudformation_facts'
+ if is_old_facts:
+ module.deprecate("The 'cloudformation_facts' module has been renamed to 'cloudformation_info', "
+ "and the renamed one no longer returns ansible_facts", version='2.13')
+
+ service_mgr = CloudFormationServiceManager(module)
+
+ if is_old_facts:
+ result = {'ansible_facts': {'cloudformation': {}}}
+ else:
+ result = {'cloudformation': {}}
+
+ for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')):
+ facts = {'stack_description': stack_description}
+ stack_name = stack_description.get('StackName')
+
+ # Create stack output and stack parameter dictionaries
+ if facts['stack_description']:
+ facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')
+ facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'),
+ 'ParameterKey', 'ParameterValue')
+ facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags'))
+
+ # Create optional stack outputs
+ all_facts = module.params.get('all_facts')
+ if all_facts or module.params.get('stack_resources'):
+ facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
+ facts['stack_resources'] = to_dict(facts.get('stack_resource_list'),
+ 'LogicalResourceId', 'PhysicalResourceId')
+ if all_facts or module.params.get('stack_template'):
+ facts['stack_template'] = service_mgr.get_template(stack_name)
+ if all_facts or module.params.get('stack_policy'):
+ facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
+ if all_facts or module.params.get('stack_events'):
+ facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
+ if all_facts or module.params.get('stack_change_sets'):
+ facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name)
+
+ if is_old_facts:
+ result['ansible_facts']['cloudformation'][stack_name] = facts
+ else:
+ result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs',
+ 'stack_parameters',
+ 'stack_policy',
+ 'stack_resources',
+ 'stack_tags',
+ 'stack_template'))
+
+ module.exit_json(changed=False, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cs_role.py b/test/support/integration/plugins/modules/cs_role.py
new file mode 100644
index 0000000000..6db295bd81
--- /dev/null
+++ b/test/support/integration/plugins/modules/cs_role.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: cs_role
+short_description: Manages user roles on Apache CloudStack based clouds.
+description:
+ - Create, update, delete user roles.
+version_added: '2.3'
+author: René Moser (@resmo)
+options:
+ name:
+ description:
+ - Name of the role.
+ type: str
+ required: true
+ uuid:
+ description:
+ - ID of the role.
+ - If provided, I(uuid) is used as key.
+ type: str
+ aliases: [ id ]
+ role_type:
+ description:
+ - Type of the role.
+ - Only considered for creation.
+ type: str
+ default: User
+ choices: [ User, DomainAdmin, ResourceAdmin, Admin ]
+ description:
+ description:
+ - Description of the role.
+ type: str
+ state:
+ description:
+ - State of the role.
+ type: str
+ default: present
+ choices: [ present, absent ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+- name: Ensure an user role is present
+ cs_role:
+ name: myrole_user
+ delegate_to: localhost
+
+- name: Ensure a role having particular ID is named as myrole_user
+ cs_role:
+ name: myrole_user
+ id: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+ delegate_to: localhost
+
+- name: Ensure a role is absent
+ cs_role:
+ name: myrole_user
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the role.
+ returned: success
+ type: str
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the role.
+ returned: success
+ type: str
+ sample: myrole
+description:
+ description: Description of the role.
+ returned: success
+ type: str
+ sample: "This is my role description"
+role_type:
+ description: Type of the role.
+ returned: success
+ type: str
+ sample: User
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.cloudstack import (
+ AnsibleCloudStack,
+ cs_argument_spec,
+ cs_required_together,
+)
+
+
+class AnsibleCloudStackRole(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackRole, self).__init__(module)
+ self.returns = {
+ 'type': 'role_type',
+ }
+
+ def get_role(self):
+ uuid = self.module.params.get('uuid')
+ if uuid:
+ args = {
+ 'id': uuid,
+ }
+ roles = self.query_api('listRoles', **args)
+ if roles:
+ return roles['role'][0]
+ else:
+ args = {
+ 'name': self.module.params.get('name'),
+ }
+ roles = self.query_api('listRoles', **args)
+ if roles:
+ return roles['role'][0]
+ return None
+
+ def present_role(self):
+ role = self.get_role()
+ if role:
+ role = self._update_role(role)
+ else:
+ role = self._create_role(role)
+ return role
+
+ def _create_role(self, role):
+ self.result['changed'] = True
+ args = {
+ 'name': self.module.params.get('name'),
+ 'type': self.module.params.get('role_type'),
+ 'description': self.module.params.get('description'),
+ }
+ if not self.module.check_mode:
+ res = self.query_api('createRole', **args)
+ role = res['role']
+ return role
+
+ def _update_role(self, role):
+ args = {
+ 'id': role['id'],
+ 'name': self.module.params.get('name'),
+ 'description': self.module.params.get('description'),
+ }
+ if self.has_changed(args, role):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.query_api('updateRole', **args)
+
+ # The API as in 4.9 does not return an updated role yet
+ if 'role' not in res:
+ role = self.get_role()
+ else:
+ role = res['role']
+ return role
+
+ def absent_role(self):
+ role = self.get_role()
+ if role:
+ self.result['changed'] = True
+ args = {
+ 'id': role['id'],
+ }
+ if not self.module.check_mode:
+ self.query_api('deleteRole', **args)
+ return role
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ uuid=dict(aliases=['id']),
+ name=dict(required=True),
+ description=dict(),
+ role_type=dict(choices=['User', 'DomainAdmin', 'ResourceAdmin', 'Admin'], default='User'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ acs_role = AnsibleCloudStackRole(module)
+ state = module.params.get('state')
+ if state == 'absent':
+ role = acs_role.absent_role()
+ else:
+ role = acs_role.present_role()
+
+ result = acs_role.get_result(role)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cs_role_permission.py b/test/support/integration/plugins/modules/cs_role_permission.py
new file mode 100644
index 0000000000..30392b2f87
--- /dev/null
+++ b/test/support/integration/plugins/modules/cs_role_permission.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017, David Passante (@dpassante)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: cs_role_permission
+short_description: Manages role permissions on Apache CloudStack based clouds.
+description:
+ - Create, update and remove CloudStack role permissions.
+ - Managing role permissions only supported in CloudStack >= 4.9.
+version_added: '2.6'
+author: David Passante (@dpassante)
+options:
+ name:
+ description:
+ - The API name of the permission.
+ type: str
+ required: true
+ role:
+ description:
+ - Name or ID of the role.
+ type: str
+ required: true
+ permission:
+ description:
+ - The rule permission, allow or deny. Defaulted to deny.
+ type: str
+ choices: [ allow, deny ]
+ default: deny
+ state:
+ description:
+ - State of the role permission.
+ type: str
+ choices: [ present, absent ]
+ default: present
+ description:
+ description:
+ - The description of the role permission.
+ type: str
+ parent:
+ description:
+ - The parent role permission uuid. use 0 to move this rule at the top of the list.
+ type: str
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+- name: Create a role permission
+ cs_role_permission:
+ role: My_Custom_role
+ name: createVPC
+ permission: allow
+ description: My comments
+ delegate_to: localhost
+
+- name: Remove a role permission
+ cs_role_permission:
+ state: absent
+ role: My_Custom_role
+ name: createVPC
+ delegate_to: localhost
+
+- name: Update a system role permission
+ cs_role_permission:
+ role: Domain Admin
+ name: createVPC
+ permission: deny
+ delegate_to: localhost
+
+- name: Update rules order. Move the rule at the top of list
+ cs_role_permission:
+ role: Domain Admin
+ name: createVPC
+ parent: 0
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+id:
+ description: The ID of the role permission.
+ returned: success
+ type: str
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+name:
+ description: The API name of the permission.
+ returned: success
+ type: str
+ sample: createVPC
+permission:
+ description: The permission type of the api name.
+ returned: success
+ type: str
+ sample: allow
+role_id:
+ description: The ID of the role to which the role permission belongs.
+ returned: success
+ type: str
+ sample: c6f7a5fc-43f8-11e5-a151-feff819cdc7f
+description:
+ description: The description of the role permission
+ returned: success
+ type: str
+ sample: Deny createVPC for users
+'''
+
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.cloudstack import (
+ AnsibleCloudStack,
+ cs_argument_spec,
+ cs_required_together,
+)
+
+
+class AnsibleCloudStackRolePermission(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackRolePermission, self).__init__(module)
+ cloudstack_min_version = LooseVersion('4.9.2')
+
+ self.returns = {
+ 'id': 'id',
+ 'roleid': 'role_id',
+ 'rule': 'name',
+ 'permission': 'permission',
+ 'description': 'description',
+ }
+ self.role_permission = None
+
+ self.cloudstack_version = self._cloudstack_ver()
+
+ if self.cloudstack_version < cloudstack_min_version:
+ self.fail_json(msg="This module requires CloudStack >= %s." % cloudstack_min_version)
+
+ def _cloudstack_ver(self):
+ capabilities = self.get_capabilities()
+ return LooseVersion(capabilities['cloudstackversion'])
+
+ def _get_role_id(self):
+ role = self.module.params.get('role')
+ if not role:
+ return None
+
+ res = self.query_api('listRoles')
+ roles = res['role']
+ if roles:
+ for r in roles:
+ if role in [r['name'], r['id']]:
+ return r['id']
+ self.fail_json(msg="Role '%s' not found" % role)
+
+ def _get_role_perm(self):
+ role_permission = self.role_permission
+
+ args = {
+ 'roleid': self._get_role_id(),
+ }
+
+ rp = self.query_api('listRolePermissions', **args)
+
+ if rp:
+ role_permission = rp['rolepermission']
+
+ return role_permission
+
+ def _get_rule(self, rule=None):
+ if not rule:
+ rule = self.module.params.get('name')
+
+ if self._get_role_perm():
+ for _rule in self._get_role_perm():
+ if rule == _rule['rule'] or rule == _rule['id']:
+ return _rule
+
+ return None
+
+ def _get_rule_order(self):
+ perms = self._get_role_perm()
+ rules = []
+
+ if perms:
+ for i, rule in enumerate(perms):
+ rules.append(rule['id'])
+
+ return rules
+
+ def replace_rule(self):
+ old_rule = self._get_rule()
+
+ if old_rule:
+ rules_order = self._get_rule_order()
+ old_pos = rules_order.index(old_rule['id'])
+
+ self.remove_role_perm()
+
+ new_rule = self.create_role_perm()
+
+ if new_rule:
+ perm_order = self.order_permissions(int(old_pos - 1), new_rule['id'])
+
+ return perm_order
+
+ return None
+
+ def order_permissions(self, parent, rule_id):
+ rules = self._get_rule_order()
+
+ if isinstance(parent, int):
+ parent_pos = parent
+ elif parent == '0':
+ parent_pos = -1
+ else:
+ parent_rule = self._get_rule(parent)
+ if not parent_rule:
+ self.fail_json(msg="Parent rule '%s' not found" % parent)
+
+ parent_pos = rules.index(parent_rule['id'])
+
+ r_id = rules.pop(rules.index(rule_id))
+
+ rules.insert((parent_pos + 1), r_id)
+ rules = ','.join(map(str, rules))
+
+ return rules
+
+ def create_or_update_role_perm(self):
+ role_permission = self._get_rule()
+
+ if not role_permission:
+ role_permission = self.create_role_perm()
+ else:
+ role_permission = self.update_role_perm(role_permission)
+
+ return role_permission
+
+ def create_role_perm(self):
+ role_permission = None
+
+ self.result['changed'] = True
+
+ args = {
+ 'rule': self.module.params.get('name'),
+ 'description': self.module.params.get('description'),
+ 'roleid': self._get_role_id(),
+ 'permission': self.module.params.get('permission'),
+ }
+
+ if not self.module.check_mode:
+ res = self.query_api('createRolePermission', **args)
+ role_permission = res['rolepermission']
+
+ return role_permission
+
+ def update_role_perm(self, role_perm):
+ perm_order = None
+
+ if not self.module.params.get('parent'):
+ args = {
+ 'ruleid': role_perm['id'],
+ 'roleid': role_perm['roleid'],
+ 'permission': self.module.params.get('permission'),
+ }
+
+ if self.has_changed(args, role_perm, only_keys=['permission']):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ if self.cloudstack_version >= LooseVersion('4.11.0'):
+ self.query_api('updateRolePermission', **args)
+ role_perm = self._get_rule()
+ else:
+ perm_order = self.replace_rule()
+ else:
+ perm_order = self.order_permissions(self.module.params.get('parent'), role_perm['id'])
+
+ if perm_order:
+ args = {
+ 'roleid': role_perm['roleid'],
+ 'ruleorder': perm_order,
+ }
+
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ self.query_api('updateRolePermission', **args)
+ role_perm = self._get_rule()
+
+ return role_perm
+
+ def remove_role_perm(self):
+ role_permission = self._get_rule()
+
+ if role_permission:
+ self.result['changed'] = True
+
+ args = {
+ 'id': role_permission['id'],
+ }
+
+ if not self.module.check_mode:
+ self.query_api('deleteRolePermission', **args)
+
+ return role_permission
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ role=dict(required=True),
+ name=dict(required=True),
+ permission=dict(choices=['allow', 'deny'], default='deny'),
+ description=dict(),
+ state=dict(choices=['present', 'absent'], default='present'),
+ parent=dict(),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ mutually_exclusive=(
+ ['permission', 'parent'],
+ ),
+ supports_check_mode=True
+ )
+
+ acs_role_perm = AnsibleCloudStackRolePermission(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ role_permission = acs_role_perm.remove_role_perm()
+ else:
+ role_permission = acs_role_perm.create_or_update_role_perm()
+
+ result = acs_role_perm.get_result(role_permission)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cs_service_offering.py b/test/support/integration/plugins/modules/cs_service_offering.py
new file mode 100644
index 0000000000..3b15fe7f1e
--- /dev/null
+++ b/test/support/integration/plugins/modules/cs_service_offering.py
@@ -0,0 +1,583 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2017, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: cs_service_offering
+description:
+ - Create and delete service offerings for guest and system VMs.
+ - Update display_text of existing service offering.
+short_description: Manages service offerings on Apache CloudStack based clouds.
+version_added: '2.5'
+author: René Moser (@resmo)
+options:
+ disk_bytes_read_rate:
+ description:
+ - Bytes read rate of the disk offering.
+ type: int
+ aliases: [ bytes_read_rate ]
+ disk_bytes_write_rate:
+ description:
+ - Bytes write rate of the disk offering.
+ type: int
+ aliases: [ bytes_write_rate ]
+ cpu_number:
+ description:
+ - The number of CPUs of the service offering.
+ type: int
+ cpu_speed:
+ description:
+ - The CPU speed of the service offering in MHz.
+ type: int
+ limit_cpu_usage:
+ description:
+ - Restrict the CPU usage to committed service offering.
+ type: bool
+ deployment_planner:
+ description:
+ - The deployment planner heuristics used to deploy a VM of this offering.
+ - If not set, the value of global config I(vm.deployment.planner) is used.
+ type: str
+ display_text:
+ description:
+ - Display text of the service offering.
+ - If not set, I(name) will be used as I(display_text) while creating.
+ type: str
+ domain:
+ description:
+ - Domain the service offering is related to.
+ - Public for all domains and subdomains if not set.
+ type: str
+ host_tags:
+ description:
+ - The host tags for this service offering.
+ type: list
+ aliases:
+ - host_tag
+ hypervisor_snapshot_reserve:
+ description:
+ - Hypervisor snapshot reserve space as a percent of a volume.
+ - Only for managed storage using Xen or VMware.
+ type: int
+ is_iops_customized:
+ description:
+ - Whether compute offering iops is custom or not.
+ type: bool
+ aliases: [ disk_iops_customized ]
+ disk_iops_read_rate:
+ description:
+ - IO requests read rate of the disk offering.
+ type: int
+ disk_iops_write_rate:
+ description:
+ - IO requests write rate of the disk offering.
+ type: int
+ disk_iops_max:
+ description:
+ - Max. iops of the compute offering.
+ type: int
+ disk_iops_min:
+ description:
+ - Min. iops of the compute offering.
+ type: int
+ is_system:
+ description:
+ - Whether it is a system VM offering or not.
+ type: bool
+ default: no
+ is_volatile:
+ description:
+ - Whether the virtual machine needs to be volatile or not.
+ - Every reboot of VM the root disk is detached then destroyed and a fresh root disk is created and attached to VM.
+ type: bool
+ memory:
+ description:
+ - The total memory of the service offering in MB.
+ type: int
+ name:
+ description:
+ - Name of the service offering.
+ type: str
+ required: true
+ network_rate:
+ description:
+ - Data transfer rate in Mb/s allowed.
+ - Supported only for non-system offering and system offerings having I(system_vm_type=domainrouter).
+ type: int
+ offer_ha:
+ description:
+ - Whether HA is set for the service offering.
+ type: bool
+ default: no
+ provisioning_type:
+ description:
+ - Provisioning type used to create volumes.
+ type: str
+ choices:
+ - thin
+ - sparse
+ - fat
+ service_offering_details:
+ description:
+ - Details for planner, used to store specific parameters.
+ - A list of dictionaries having keys C(key) and C(value).
+ type: list
+ state:
+ description:
+ - State of the service offering.
+ type: str
+ choices:
+ - present
+ - absent
+ default: present
+ storage_type:
+ description:
+ - The storage type of the service offering.
+ type: str
+ choices:
+ - local
+ - shared
+ system_vm_type:
+ description:
+ - The system VM type.
+ - Required if I(is_system=yes).
+ type: str
+ choices:
+ - domainrouter
+ - consoleproxy
+ - secondarystoragevm
+ storage_tags:
+ description:
+ - The storage tags for this service offering.
+ type: list
+ aliases:
+ - storage_tag
+ is_customized:
+ description:
+ - Whether the offering is customizable or not.
+ type: bool
+ version_added: '2.8'
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+- name: Create a non-volatile compute service offering with local storage
+ cs_service_offering:
+ name: Micro
+ display_text: Micro 512mb 1cpu
+ cpu_number: 1
+ cpu_speed: 2198
+ memory: 512
+ host_tags: eco
+ storage_type: local
+ delegate_to: localhost
+
+- name: Create a volatile compute service offering with shared storage
+ cs_service_offering:
+ name: Tiny
+ display_text: Tiny 1gb 1cpu
+ cpu_number: 1
+ cpu_speed: 2198
+ memory: 1024
+ storage_type: shared
+ is_volatile: yes
+ host_tags: eco
+ storage_tags: eco
+ delegate_to: localhost
+
+- name: Create or update a volatile compute service offering with shared storage
+ cs_service_offering:
+ name: Tiny
+ display_text: Tiny 1gb 1cpu
+ cpu_number: 1
+ cpu_speed: 2198
+ memory: 1024
+ storage_type: shared
+ is_volatile: yes
+ host_tags: eco
+ storage_tags: eco
+ delegate_to: localhost
+
+- name: Create or update a custom compute service offering
+ cs_service_offering:
+ name: custom
+ display_text: custom compute offer
+ is_customized: yes
+ storage_type: shared
+ host_tags: eco
+ storage_tags: eco
+ delegate_to: localhost
+
+- name: Remove a compute service offering
+ cs_service_offering:
+ name: Tiny
+ state: absent
+ delegate_to: localhost
+
+- name: Create or update a system offering for the console proxy
+ cs_service_offering:
+ name: System Offering for Console Proxy 2GB
+ display_text: System Offering for Console Proxy 2GB RAM
+ is_system: yes
+ system_vm_type: consoleproxy
+ cpu_number: 1
+ cpu_speed: 2198
+ memory: 2048
+ storage_type: shared
+ storage_tags: perf
+ delegate_to: localhost
+
+- name: Remove a system offering
+ cs_service_offering:
+ name: System Offering for Console Proxy 2GB
+ is_system: yes
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the service offering
+ returned: success
+ type: str
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+cpu_number:
+ description: Number of CPUs in the service offering
+ returned: success
+ type: int
+ sample: 4
+cpu_speed:
+ description: Speed of CPUs in MHz in the service offering
+ returned: success
+ type: int
+ sample: 2198
+disk_iops_max:
+ description: Max iops of the disk offering
+ returned: success
+ type: int
+ sample: 1000
+disk_iops_min:
+ description: Min iops of the disk offering
+ returned: success
+ type: int
+ sample: 500
+disk_bytes_read_rate:
+ description: Bytes read rate of the service offering
+ returned: success
+ type: int
+ sample: 1000
+disk_bytes_write_rate:
+ description: Bytes write rate of the service offering
+ returned: success
+ type: int
+ sample: 1000
+disk_iops_read_rate:
+ description: IO requests per second read rate of the service offering
+ returned: success
+ type: int
+ sample: 1000
+disk_iops_write_rate:
+ description: IO requests per second write rate of the service offering
+ returned: success
+ type: int
+ sample: 1000
+created:
+ description: Date the offering was created
+ returned: success
+ type: str
+ sample: 2017-11-19T10:48:59+0000
+display_text:
+ description: Display text of the offering
+ returned: success
+ type: str
+ sample: Micro 512mb 1cpu
+domain:
+ description: Domain the offering is into
+ returned: success
+ type: str
+ sample: ROOT
+host_tags:
+ description: List of host tags
+ returned: success
+ type: list
+ sample: [ 'eco' ]
+storage_tags:
+ description: List of storage tags
+ returned: success
+ type: list
+ sample: [ 'eco' ]
+is_system:
+ description: Whether the offering is for system VMs or not
+ returned: success
+ type: bool
+ sample: false
+is_iops_customized:
+ description: Whether the offering uses custom IOPS or not
+ returned: success
+ type: bool
+ sample: false
+is_volatile:
+ description: Whether the offering is volatile or not
+ returned: success
+ type: bool
+ sample: false
+limit_cpu_usage:
+ description: Whether the CPU usage is restricted to committed service offering
+ returned: success
+ type: bool
+ sample: false
+memory:
+ description: Memory of the system offering
+ returned: success
+ type: int
+ sample: 512
+name:
+ description: Name of the system offering
+ returned: success
+ type: str
+ sample: Micro
+offer_ha:
+ description: Whether HA support is enabled in the offering or not
+ returned: success
+ type: bool
+ sample: false
+provisioning_type:
+ description: Provisioning type used to create volumes
+ returned: success
+ type: str
+ sample: thin
+storage_type:
+ description: Storage type used to create volumes
+ returned: success
+ type: str
+ sample: shared
+system_vm_type:
+ description: System VM type of this offering
+ returned: success
+ type: str
+ sample: consoleproxy
+service_offering_details:
+ description: Additioanl service offering details
+ returned: success
+ type: dict
+ sample: "{'vgpuType': 'GRID K180Q','pciDevice':'Group of NVIDIA Corporation GK107GL [GRID K1] GPUs'}"
+network_rate:
+ description: Data transfer rate in megabits per second allowed
+ returned: success
+ type: int
+ sample: 1000
+is_customized:
+ description: Whether the offering is customizable or not
+ returned: success
+ type: bool
+ sample: false
+ version_added: '2.8'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.cloudstack import (
+ AnsibleCloudStack,
+ cs_argument_spec,
+ cs_required_together,
+)
+
+
+class AnsibleCloudStackServiceOffering(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackServiceOffering, self).__init__(module)
+ self.returns = {
+ 'cpunumber': 'cpu_number',
+ 'cpuspeed': 'cpu_speed',
+ 'deploymentplanner': 'deployment_planner',
+ 'diskBytesReadRate': 'disk_bytes_read_rate',
+ 'diskBytesWriteRate': 'disk_bytes_write_rate',
+ 'diskIopsReadRate': 'disk_iops_read_rate',
+ 'diskIopsWriteRate': 'disk_iops_write_rate',
+ 'maxiops': 'disk_iops_max',
+ 'miniops': 'disk_iops_min',
+ 'hypervisorsnapshotreserve': 'hypervisor_snapshot_reserve',
+ 'iscustomized': 'is_customized',
+ 'iscustomizediops': 'is_iops_customized',
+ 'issystem': 'is_system',
+ 'isvolatile': 'is_volatile',
+ 'limitcpuuse': 'limit_cpu_usage',
+ 'memory': 'memory',
+ 'networkrate': 'network_rate',
+ 'offerha': 'offer_ha',
+ 'provisioningtype': 'provisioning_type',
+ 'serviceofferingdetails': 'service_offering_details',
+ 'storagetype': 'storage_type',
+ 'systemvmtype': 'system_vm_type',
+ 'tags': 'storage_tags',
+ }
+
+ def get_service_offering(self):
+ args = {
+ 'name': self.module.params.get('name'),
+ 'domainid': self.get_domain(key='id'),
+ 'issystem': self.module.params.get('is_system'),
+ 'systemvmtype': self.module.params.get('system_vm_type'),
+ }
+ service_offerings = self.query_api('listServiceOfferings', **args)
+ if service_offerings:
+ return service_offerings['serviceoffering'][0]
+
+ def present_service_offering(self):
+ service_offering = self.get_service_offering()
+ if not service_offering:
+ service_offering = self._create_offering(service_offering)
+ else:
+ service_offering = self._update_offering(service_offering)
+
+ return service_offering
+
+ def absent_service_offering(self):
+ service_offering = self.get_service_offering()
+ if service_offering:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ args = {
+ 'id': service_offering['id'],
+ }
+ self.query_api('deleteServiceOffering', **args)
+ return service_offering
+
+ def _create_offering(self, service_offering):
+ self.result['changed'] = True
+
+ system_vm_type = self.module.params.get('system_vm_type')
+ is_system = self.module.params.get('is_system')
+
+ required_params = []
+ if is_system and not system_vm_type:
+ required_params.append('system_vm_type')
+ self.module.fail_on_missing_params(required_params=required_params)
+
+ args = {
+ 'name': self.module.params.get('name'),
+ 'displaytext': self.get_or_fallback('display_text', 'name'),
+ 'bytesreadrate': self.module.params.get('disk_bytes_read_rate'),
+ 'byteswriterate': self.module.params.get('disk_bytes_write_rate'),
+ 'cpunumber': self.module.params.get('cpu_number'),
+ 'cpuspeed': self.module.params.get('cpu_speed'),
+ 'customizediops': self.module.params.get('is_iops_customized'),
+ 'deploymentplanner': self.module.params.get('deployment_planner'),
+ 'domainid': self.get_domain(key='id'),
+ 'hosttags': self.module.params.get('host_tags'),
+ 'hypervisorsnapshotreserve': self.module.params.get('hypervisor_snapshot_reserve'),
+ 'iopsreadrate': self.module.params.get('disk_iops_read_rate'),
+ 'iopswriterate': self.module.params.get('disk_iops_write_rate'),
+ 'maxiops': self.module.params.get('disk_iops_max'),
+ 'miniops': self.module.params.get('disk_iops_min'),
+ 'issystem': is_system,
+ 'isvolatile': self.module.params.get('is_volatile'),
+ 'memory': self.module.params.get('memory'),
+ 'networkrate': self.module.params.get('network_rate'),
+ 'offerha': self.module.params.get('offer_ha'),
+ 'provisioningtype': self.module.params.get('provisioning_type'),
+ 'serviceofferingdetails': self.module.params.get('service_offering_details'),
+ 'storagetype': self.module.params.get('storage_type'),
+ 'systemvmtype': system_vm_type,
+ 'tags': self.module.params.get('storage_tags'),
+ 'limitcpuuse': self.module.params.get('limit_cpu_usage'),
+ 'customized': self.module.params.get('is_customized')
+ }
+ if not self.module.check_mode:
+ res = self.query_api('createServiceOffering', **args)
+ service_offering = res['serviceoffering']
+ return service_offering
+
+ def _update_offering(self, service_offering):
+ args = {
+ 'id': service_offering['id'],
+ 'name': self.module.params.get('name'),
+ 'displaytext': self.get_or_fallback('display_text', 'name'),
+ }
+ if self.has_changed(args, service_offering):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.query_api('updateServiceOffering', **args)
+ service_offering = res['serviceoffering']
+ return service_offering
+
+ def get_result(self, service_offering):
+ super(AnsibleCloudStackServiceOffering, self).get_result(service_offering)
+ if service_offering:
+ if 'hosttags' in service_offering:
+ self.result['host_tags'] = service_offering['hosttags'].split(',') or [service_offering['hosttags']]
+
+ # Prevent confusion, the api returns a tags key for storage tags.
+ if 'tags' in service_offering:
+ self.result['storage_tags'] = service_offering['tags'].split(',') or [service_offering['tags']]
+ if 'tags' in self.result:
+ del self.result['tags']
+
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ display_text=dict(),
+ cpu_number=dict(type='int'),
+ cpu_speed=dict(type='int'),
+ limit_cpu_usage=dict(type='bool'),
+ deployment_planner=dict(),
+ domain=dict(),
+ host_tags=dict(type='list', aliases=['host_tag']),
+ hypervisor_snapshot_reserve=dict(type='int'),
+ disk_bytes_read_rate=dict(type='int', aliases=['bytes_read_rate']),
+ disk_bytes_write_rate=dict(type='int', aliases=['bytes_write_rate']),
+ disk_iops_read_rate=dict(type='int'),
+ disk_iops_write_rate=dict(type='int'),
+ disk_iops_max=dict(type='int'),
+ disk_iops_min=dict(type='int'),
+ is_system=dict(type='bool', default=False),
+ is_volatile=dict(type='bool'),
+ is_iops_customized=dict(type='bool', aliases=['disk_iops_customized']),
+ memory=dict(type='int'),
+ network_rate=dict(type='int'),
+ offer_ha=dict(type='bool'),
+ provisioning_type=dict(choices=['thin', 'sparse', 'fat']),
+ service_offering_details=dict(type='list'),
+ storage_type=dict(choices=['local', 'shared']),
+ system_vm_type=dict(choices=['domainrouter', 'consoleproxy', 'secondarystoragevm']),
+ storage_tags=dict(type='list', aliases=['storage_tag']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ is_customized=dict(type='bool'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ acs_so = AnsibleCloudStackServiceOffering(module)
+
+ state = module.params.get('state')
+ if state == "absent":
+ service_offering = acs_so.absent_service_offering()
+ else:
+ service_offering = acs_so.present_service_offering()
+
+ result = acs_so.get_result(service_offering)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2.py b/test/support/integration/plugins/modules/ec2.py
new file mode 100644
index 0000000000..91503bbf8e
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2.py
@@ -0,0 +1,1766 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2
+short_description: create, terminate, start or stop an instance in ec2
+description:
+ - Creates or terminates ec2 instances.
+ - >
+ Note: This module uses the older boto Python module to interact with the EC2 API.
+ M(ec2) will still receive bug fixes, but no new features.
+ Consider using the M(ec2_instance) module instead.
+ If M(ec2_instance) does not support a feature you need that is available in M(ec2), please
+ file a feature request.
+version_added: "0.9"
+options:
+ key_name:
+ description:
+ - Key pair to use on the instance.
+ - The SSH key must already exist in AWS in order to use this argument.
+ - Keys can be created / deleted using the M(ec2_key) module.
+ aliases: ['keypair']
+ type: str
+ id:
+ version_added: "1.1"
+ description:
+ - Identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances.
+ - This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on.
+ - For details, see the description of client token at U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
+ type: str
+ group:
+ description:
+ - Security group (or list of groups) to use with the instance.
+ aliases: [ 'groups' ]
+ type: list
+ elements: str
+ group_id:
+ version_added: "1.1"
+ description:
+ - Security group id (or list of ids) to use with the instance.
+ type: list
+ elements: str
+ zone:
+ version_added: "1.2"
+ description:
+ - AWS availability zone in which to launch the instance.
+ aliases: [ 'aws_zone', 'ec2_zone' ]
+ type: str
+ instance_type:
+ description:
+ - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
+ - Required when creating a new instance.
+ type: str
+ aliases: ['type']
+ tenancy:
+ version_added: "1.9"
+ description:
+ - An instance with a tenancy of C(dedicated) runs on single-tenant hardware and can only be launched into a VPC.
+ - Note that to use dedicated tenancy you MUST specify a I(vpc_subnet_id) as well.
+ - Dedicated tenancy is not available for EC2 "micro" instances.
+ default: default
+ choices: [ "default", "dedicated" ]
+ type: str
+ spot_price:
+ version_added: "1.5"
+ description:
+ - Maximum spot price to bid. If not set, a regular on-demand instance is requested.
+ - A spot request is made with this maximum bid. When it is filled, the instance is started.
+ type: str
+ spot_type:
+ version_added: "2.0"
+ description:
+ - The type of spot request.
+ - After being interrupted a C(persistent) spot instance will be started once there is capacity to fill the request again.
+ default: "one-time"
+ choices: [ "one-time", "persistent" ]
+ type: str
+ image:
+ description:
+ - I(ami) ID to use for the instance.
+ - Required when I(state=present).
+ type: str
+ kernel:
+ description:
+ - Kernel eki to use for the instance.
+ type: str
+ ramdisk:
+ description:
+ - Ramdisk eri to use for the instance.
+ type: str
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ - Does not wait for SSH, see the 'wait_for_connection' example for details.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ default: 300
+ type: int
+ spot_wait_timeout:
+ version_added: "1.5"
+ description:
+ - How long to wait for the spot instance request to be fulfilled. Affects 'Request valid until' for setting spot request lifespan.
+ default: 600
+ type: int
+ count:
+ description:
+ - Number of instances to launch.
+ default: 1
+ type: int
+ monitoring:
+ version_added: "1.1"
+ description:
+ - Enable detailed monitoring (CloudWatch) for instance.
+ type: bool
+ default: false
+ user_data:
+ version_added: "0.9"
+ description:
+ - Opaque blob of data which is made available to the EC2 instance.
+ type: str
+ instance_tags:
+ version_added: "1.0"
+ description:
+ - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'.
+ type: dict
+ placement_group:
+ version_added: "1.3"
+ description:
+ - Placement group for the instance when using EC2 Clustered Compute.
+ type: str
+ vpc_subnet_id:
+ version_added: "1.1"
+ description:
+ - the subnet ID in which to launch the instance (VPC).
+ type: str
+ assign_public_ip:
+ version_added: "1.5"
+ description:
+ - When provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+.
+ type: bool
+ private_ip:
+ version_added: "1.2"
+ description:
+ - The private ip address to assign the instance (from the vpc subnet).
+ type: str
+ instance_profile_name:
+ version_added: "1.3"
+ description:
+ - Name of the IAM instance profile (i.e. what the EC2 console refers to as an "IAM Role") to use. Boto library must be 2.5.0+.
+ type: str
+ instance_ids:
+ version_added: "1.3"
+ description:
+ - "list of instance ids, currently used for states: absent, running, stopped"
+ aliases: ['instance_id']
+ type: list
+ elements: str
+ source_dest_check:
+ version_added: "1.6"
+ description:
+ - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers).
+ When initially creating an instance the EC2 API defaults this to C(True).
+ type: bool
+ termination_protection:
+ version_added: "2.0"
+ description:
+ - Enable or Disable the Termination Protection.
+ type: bool
+ default: false
+ instance_initiated_shutdown_behavior:
+ version_added: "2.2"
+ description:
+ - Set whether AWS will Stop or Terminate an instance on shutdown. This parameter is ignored when using instance-store.
+ images (which require termination on shutdown).
+ default: 'stop'
+ choices: [ "stop", "terminate" ]
+ type: str
+ state:
+ version_added: "1.3"
+ description:
+ - Create, terminate, start, stop or restart instances. The state 'restarted' was added in Ansible 2.2.
+ - When I(state=absent), I(instance_ids) is required.
+ - When I(state=running), I(state=stopped) or I(state=restarted) then either I(instance_ids) or I(instance_tags) is required.
+ default: 'present'
+ choices: ['absent', 'present', 'restarted', 'running', 'stopped']
+ type: str
+ volumes:
+ version_added: "1.5"
+ description:
+ - A list of hash/dictionaries of volumes to add to the new instance.
+ type: list
+ elements: dict
+ suboptions:
+ device_name:
+ type: str
+ required: true
+ description:
+ - A name for the device (For example C(/dev/sda)).
+ delete_on_termination:
+ type: bool
+ default: false
+ description:
+ - Whether the volume should be automatically deleted when the instance is terminated.
+ ephemeral:
+ type: str
+ description:
+ - Whether the volume should be ephemeral.
+ - Data on ephemeral volumes is lost when the instance is stopped.
+ - Mutually exclusive with the I(snapshot) parameter.
+ encrypted:
+ type: bool
+ default: false
+ description:
+ - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK.
+ snapshot:
+ type: str
+ description:
+ - The ID of an EBS snapshot to copy when creating the volume.
+ - Mutually exclusive with the I(ephemeral) parameter.
+ volume_type:
+ type: str
+ description:
+ - The type of volume to create.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types.
+ volume_size:
+ type: int
+ description:
+ - The size of the volume (in GiB).
+ iops:
+ type: int
+ description:
+ - The number of IOPS per second to provision for the volume.
+ - Required when I(volume_type=io1).
+ ebs_optimized:
+ version_added: "1.6"
+ description:
+ - Whether instance is using optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
+ default: false
+ type: bool
+ exact_count:
+ version_added: "1.5"
+ description:
+ - An integer value which indicates how many instances that match the 'count_tag' parameter should be running.
+ Instances are either created or terminated based on this value.
+ type: int
+ count_tag:
+ version_added: "1.5"
+ description:
+ - Used with I(exact_count) to determine how many nodes based on a specific tag criteria should be running.
+ This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers
+ that are tagged with "class=webserver". The specified tag must already exist or be passed in as the I(instance_tags) option.
+ type: raw
+ network_interfaces:
+ version_added: "2.0"
+ description:
+ - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces,
+ none of the I(assign_public_ip), I(private_ip), I(vpc_subnet_id), I(group), or I(group_id) parameters may be used. (Those parameters are
+ for creating a new network interface at launch.)
+ aliases: ['network_interface']
+ type: list
+ elements: str
+ spot_launch_group:
+ version_added: "2.1"
+ description:
+ - Launch group for spot requests, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group).
+ type: str
+author:
+ - "Tim Gerla (@tgerla)"
+ - "Lester Wade (@lwade)"
+ - "Seth Vidal (@skvidal)"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic provisioning example
+- ec2:
+ key_name: mykey
+ instance_type: t2.micro
+ image: ami-123456
+ wait: yes
+ group: webserver
+ count: 3
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Advanced example with tagging and CloudWatch
+- ec2:
+ key_name: mykey
+ group: databases
+ instance_type: t2.micro
+ image: ami-123456
+ wait: yes
+ wait_timeout: 500
+ count: 5
+ instance_tags:
+ db: postgres
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Single instance with additional IOPS volume from snapshot and volume delete on termination
+- ec2:
+ key_name: mykey
+ group: webserver
+ instance_type: c3.medium
+ image: ami-123456
+ wait: yes
+ wait_timeout: 500
+ volumes:
+ - device_name: /dev/sdb
+ snapshot: snap-abcdef12
+ volume_type: io1
+ iops: 1000
+ volume_size: 100
+ delete_on_termination: true
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Single instance with ssd gp2 root volume
+- ec2:
+ key_name: mykey
+ group: webserver
+ instance_type: c3.medium
+ image: ami-123456
+ wait: yes
+ wait_timeout: 500
+ volumes:
+ - device_name: /dev/xvda
+ volume_type: gp2
+ volume_size: 8
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ count_tag:
+ Name: dbserver
+ exact_count: 1
+
+# Multiple groups example
+- ec2:
+ key_name: mykey
+ group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
+ instance_type: m1.large
+ image: ami-6e649707
+ wait: yes
+ wait_timeout: 500
+ count: 5
+ instance_tags:
+ db: postgres
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Multiple instances with additional volume from snapshot
+- ec2:
+ key_name: mykey
+ group: webserver
+ instance_type: m1.large
+ image: ami-6e649707
+ wait: yes
+ wait_timeout: 500
+ count: 5
+ volumes:
+ - device_name: /dev/sdb
+ snapshot: snap-abcdef12
+ volume_size: 10
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Dedicated tenancy example
+- local_action:
+ module: ec2
+ assign_public_ip: yes
+ group_id: sg-1dc53f72
+ key_name: mykey
+ image: ami-6e649707
+ instance_type: m1.small
+ tenancy: dedicated
+ vpc_subnet_id: subnet-29e63245
+ wait: yes
+
+# Spot instance example
+- ec2:
+ spot_price: 0.24
+ spot_wait_timeout: 600
+ keypair: mykey
+ group_id: sg-1dc53f72
+ instance_type: m1.small
+ image: ami-6e649707
+ wait: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ spot_launch_group: report_generators
+ instance_initiated_shutdown_behavior: terminate
+
+# Examples using pre-existing network interfaces
+- ec2:
+ key_name: mykey
+ instance_type: t2.small
+ image: ami-f005ba11
+ network_interface: eni-deadbeef
+
+- ec2:
+ key_name: mykey
+ instance_type: t2.small
+ image: ami-f005ba11
+ network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e']
+
+# Launch instances, runs some tasks
+# and then terminate them
+
+- name: Create a sandbox instance
+ hosts: localhost
+ gather_facts: False
+ vars:
+ keypair: my_keypair
+ instance_type: m1.small
+ security_group: my_securitygroup
+ image: my_ami_id
+ region: us-east-1
+ tasks:
+ - name: Launch instance
+ ec2:
+ key_name: "{{ keypair }}"
+ group: "{{ security_group }}"
+ instance_type: "{{ instance_type }}"
+ image: "{{ image }}"
+ wait: true
+ region: "{{ region }}"
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ register: ec2
+
+ - name: Add new instance to host group
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: launched
+ loop: "{{ ec2.instances }}"
+
+ - name: Wait for SSH to come up
+ delegate_to: "{{ item.public_dns_name }}"
+ wait_for_connection:
+ delay: 60
+ timeout: 320
+ loop: "{{ ec2.instances }}"
+
+- name: Configure instance(s)
+ hosts: launched
+ become: True
+ gather_facts: True
+ roles:
+ - my_awesome_role
+ - my_awesome_test
+
+- name: Terminate instances
+ hosts: localhost
+ tasks:
+ - name: Terminate instances that were previously launched
+ ec2:
+ state: 'absent'
+ instance_ids: '{{ ec2.instance_ids }}'
+
+# Start a few existing instances, run some tasks
+# and stop the instances
+
+- name: Start sandbox instances
+ hosts: localhost
+ gather_facts: false
+ vars:
+ instance_ids:
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ region: us-east-1
+ tasks:
+ - name: Start the sandbox instances
+ ec2:
+ instance_ids: '{{ instance_ids }}'
+ region: '{{ region }}'
+ state: running
+ wait: True
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ roles:
+ - do_neat_stuff
+ - do_more_neat_stuff
+
+- name: Stop sandbox instances
+ hosts: localhost
+ gather_facts: false
+ vars:
+ instance_ids:
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ region: us-east-1
+ tasks:
+ - name: Stop the sandbox instances
+ ec2:
+ instance_ids: '{{ instance_ids }}'
+ region: '{{ region }}'
+ state: stopped
+ wait: True
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+#
+# Start stopped instances specified by tag
+#
+- local_action:
+ module: ec2
+ instance_tags:
+ Name: ExtraPower
+ state: running
+
+#
+# Restart instances specified by tag
+#
+- local_action:
+ module: ec2
+ instance_tags:
+ Name: ExtraPower
+ state: restarted
+
+#
+# Enforce that 5 instances with a tag "foo" are running
+# (Highly recommended!)
+#
+
+- ec2:
+ key_name: mykey
+ instance_type: c1.medium
+ image: ami-40603AD1
+ wait: yes
+ group: webserver
+ instance_tags:
+ foo: bar
+ exact_count: 5
+ count_tag: foo
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+#
+# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
+#
+
+- ec2:
+ key_name: mykey
+ instance_type: c1.medium
+ image: ami-40603AD1
+ wait: yes
+ group: webserver
+ instance_tags:
+ Name: database
+ dbtype: postgres
+ exact_count: 5
+ count_tag:
+ Name: database
+ dbtype: postgres
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+#
+# count_tag complex argument examples
+#
+
+ # instances with tag foo
+- ec2:
+ count_tag:
+ foo:
+
+ # instances with tag foo=bar
+- ec2:
+ count_tag:
+ foo: bar
+
+ # instances with tags foo=bar & baz
+- ec2:
+ count_tag:
+ foo: bar
+ baz:
+
+ # instances with tags foo & bar & baz=bang
+- ec2:
+ count_tag:
+ - foo
+ - bar
+ - baz: bang
+
+'''
+
+import time
+import datetime
+import traceback
+from ast import literal_eval
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, ec2_connect
+from ansible.module_utils.six import get_function_code, string_types
+from ansible.module_utils._text import to_bytes, to_text
+
+try:
+ import boto.ec2
+ from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
+ from boto.exception import EC2ResponseError
+ from boto import connect_ec2_endpoint
+ from boto import connect_vpc
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone=None):
+
+ # get reservations for instances that match tag(s) and are in the desired state
+ state = module.params.get('state')
+ if state not in ['running', 'stopped']:
+ state = None
+ reservations = get_reservations(module, ec2, vpc, tags=count_tag, state=state, zone=zone)
+
+ instances = []
+ for res in reservations:
+ if hasattr(res, 'instances'):
+ for inst in res.instances:
+ if inst.state == 'terminated' or inst.state == 'shutting-down':
+ continue
+ instances.append(inst)
+
+ return reservations, instances
+
+
+def _set_none_to_blank(dictionary):
+ result = dictionary
+ for k in result:
+ if isinstance(result[k], dict):
+ result[k] = _set_none_to_blank(result[k])
+ elif not result[k]:
+ result[k] = ""
+ return result
+
+
+def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None):
+ # TODO: filters do not work with tags that have underscores
+ filters = dict()
+
+ vpc_subnet_id = module.params.get('vpc_subnet_id')
+ vpc_id = None
+ if vpc_subnet_id:
+ filters.update({"subnet-id": vpc_subnet_id})
+ if vpc:
+ vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
+
+ if vpc_id:
+ filters.update({"vpc-id": vpc_id})
+
+ if tags is not None:
+
+ if isinstance(tags, str):
+ try:
+ tags = literal_eval(tags)
+ except Exception:
+ pass
+
+ # if not a string type, convert and make sure it's a text string
+ if isinstance(tags, int):
+ tags = to_text(tags)
+
+ # if string, we only care that a tag of that name exists
+ if isinstance(tags, str):
+ filters.update({"tag-key": tags})
+
+ # if list, append each item to filters
+ if isinstance(tags, list):
+ for x in tags:
+ if isinstance(x, dict):
+ x = _set_none_to_blank(x)
+ filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items()))
+ else:
+ filters.update({"tag-key": x})
+
+ # if dict, add the key and value to the filter
+ if isinstance(tags, dict):
+ tags = _set_none_to_blank(tags)
+ filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items()))
+
+ # lets check to see if the filters dict is empty, if so then stop
+ if not filters:
+ module.fail_json(msg="Filters based on tag is empty => tags: %s" % (tags))
+
+ if state:
+ # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
+ filters.update({'instance-state-name': state})
+
+ if zone:
+ filters.update({'availability-zone': zone})
+
+ if module.params.get('id'):
+ filters['client-token'] = module.params['id']
+
+ results = ec2.get_all_instances(filters=filters)
+
+ return results
+
+
+def get_instance_info(inst):
+ """
+ Retrieves instance information from an instance
+ ID and returns it as a dictionary
+ """
+ instance_info = {'id': inst.id,
+ 'ami_launch_index': inst.ami_launch_index,
+ 'private_ip': inst.private_ip_address,
+ 'private_dns_name': inst.private_dns_name,
+ 'public_ip': inst.ip_address,
+ 'dns_name': inst.dns_name,
+ 'public_dns_name': inst.public_dns_name,
+ 'state_code': inst.state_code,
+ 'architecture': inst.architecture,
+ 'image_id': inst.image_id,
+ 'key_name': inst.key_name,
+ 'placement': inst.placement,
+ 'region': inst.placement[:-1],
+ 'kernel': inst.kernel,
+ 'ramdisk': inst.ramdisk,
+ 'launch_time': inst.launch_time,
+ 'instance_type': inst.instance_type,
+ 'root_device_type': inst.root_device_type,
+ 'root_device_name': inst.root_device_name,
+ 'state': inst.state,
+ 'hypervisor': inst.hypervisor,
+ 'tags': inst.tags,
+ 'groups': dict((group.id, group.name) for group in inst.groups),
+ }
+ try:
+ instance_info['virtualization_type'] = getattr(inst, 'virtualization_type')
+ except AttributeError:
+ instance_info['virtualization_type'] = None
+
+ try:
+ instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
+ except AttributeError:
+ instance_info['ebs_optimized'] = False
+
+ try:
+ bdm_dict = {}
+ bdm = getattr(inst, 'block_device_mapping')
+ for device_name in bdm.keys():
+ bdm_dict[device_name] = {
+ 'status': bdm[device_name].status,
+ 'volume_id': bdm[device_name].volume_id,
+ 'delete_on_termination': bdm[device_name].delete_on_termination
+ }
+ instance_info['block_device_mapping'] = bdm_dict
+ except AttributeError:
+ instance_info['block_device_mapping'] = False
+
+ try:
+ instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
+ except AttributeError:
+ instance_info['tenancy'] = 'default'
+
+ return instance_info
+
+
+def boto_supports_associate_public_ip_address(ec2):
+ """
+ Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
+ class. Added in Boto 2.13.0
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if Boto library accepts associate_public_ip_address argument, else false
+ """
+
+ try:
+ network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
+ getattr(network_interface, "associate_public_ip_address")
+ return True
+ except AttributeError:
+ return False
+
+
+def boto_supports_profile_name_arg(ec2):
+ """
+ Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if Boto library accept instance_profile_name argument, else false
+ """
+ run_instances_method = getattr(ec2, 'run_instances')
+ return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames
+
+
+def boto_supports_volume_encryption():
+ """
+ Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
+
+ Returns:
+ True if boto library has the named param as an argument on the request_spot_instances method, else False
+ """
+ return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
+
+
+def create_block_device(module, ec2, volume):
+ # Not aware of a way to determine this programatically
+ # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
+ MAX_IOPS_TO_SIZE_RATIO = 30
+
+ volume_type = volume.get('volume_type')
+
+ if 'snapshot' not in volume and 'ephemeral' not in volume:
+ if 'volume_size' not in volume:
+ module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
+ if 'snapshot' in volume:
+ if volume_type == 'io1' and 'iops' not in volume:
+ module.fail_json(msg='io1 volumes must have an iops value set')
+ if 'iops' in volume:
+ snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
+ size = volume.get('volume_size', snapshot.volume_size)
+ if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
+ module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
+ if 'ephemeral' in volume:
+ if 'snapshot' in volume:
+ module.fail_json(msg='Cannot set both ephemeral and snapshot')
+ if boto_supports_volume_encryption():
+ return BlockDeviceType(snapshot_id=volume.get('snapshot'),
+ ephemeral_name=volume.get('ephemeral'),
+ size=volume.get('volume_size'),
+ volume_type=volume_type,
+ delete_on_termination=volume.get('delete_on_termination', False),
+ iops=volume.get('iops'),
+ encrypted=volume.get('encrypted', None))
+ else:
+ return BlockDeviceType(snapshot_id=volume.get('snapshot'),
+ ephemeral_name=volume.get('ephemeral'),
+ size=volume.get('volume_size'),
+ volume_type=volume_type,
+ delete_on_termination=volume.get('delete_on_termination', False),
+ iops=volume.get('iops'))
+
+
+def boto_supports_param_in_spot_request(ec2, param):
+ """
+ Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if boto library has the named param as an argument on the request_spot_instances method, else False
+ """
+ method = getattr(ec2, 'request_spot_instances')
+ return param in get_function_code(method).co_varnames
+
+
+def await_spot_requests(module, ec2, spot_requests, count):
+ """
+ Wait for a group of spot requests to be fulfilled, or fail.
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances
+ count: Total number of instances to be created by the spot requests
+
+ Returns:
+ list of instance ID's created by the spot request(s)
+ """
+ spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
+ wait_complete = time.time() + spot_wait_timeout
+
+ spot_req_inst_ids = dict()
+ while time.time() < wait_complete:
+ reqs = ec2.get_all_spot_instance_requests()
+ for sirb in spot_requests:
+ if sirb.id in spot_req_inst_ids:
+ continue
+ for sir in reqs:
+ if sir.id != sirb.id:
+ continue # this is not our spot instance
+ if sir.instance_id is not None:
+ spot_req_inst_ids[sirb.id] = sir.instance_id
+ elif sir.state == 'open':
+ continue # still waiting, nothing to do here
+ elif sir.state == 'active':
+ continue # Instance is created already, nothing to do here
+ elif sir.state == 'failed':
+ module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
+ sir.id, sir.status.code, sir.fault.code, sir.fault.message))
+ elif sir.state == 'cancelled':
+ module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id)
+ elif sir.state == 'closed':
+ # instance is terminating or marked for termination
+ # this may be intentional on the part of the operator,
+ # or it may have been terminated by AWS due to capacity,
+ # price, or group constraints in this case, we'll fail
+ # the module if the reason for the state is anything
+ # other than termination by user. Codes are documented at
+ # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html
+ if sir.status.code == 'instance-terminated-by-user':
+ # do nothing, since the user likely did this on purpose
+ pass
+ else:
+ spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s"
+ module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message))
+
+ if len(spot_req_inst_ids) < count:
+ time.sleep(5)
+ else:
+ return list(spot_req_inst_ids.values())
+ module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime())
+
+
+def enforce_count(module, ec2, vpc):
+
+ exact_count = module.params.get('exact_count')
+ count_tag = module.params.get('count_tag')
+ zone = module.params.get('zone')
+
+ # fail here if the exact count was specified without filtering
+ # on a tag, as this may lead to a undesired removal of instances
+ if exact_count and count_tag is None:
+ module.fail_json(msg="you must use the 'count_tag' option with exact_count")
+
+ reservations, instances = find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone)
+
+ changed = None
+ checkmode = False
+ instance_dict_array = []
+ changed_instance_ids = None
+
+ if len(instances) == exact_count:
+ changed = False
+ elif len(instances) < exact_count:
+ changed = True
+ to_create = exact_count - len(instances)
+ if not checkmode:
+ (instance_dict_array, changed_instance_ids, changed) \
+ = create_instances(module, ec2, vpc, override_count=to_create)
+
+ for inst in instance_dict_array:
+ instances.append(inst)
+ elif len(instances) > exact_count:
+ changed = True
+ to_remove = len(instances) - exact_count
+ if not checkmode:
+ all_instance_ids = sorted([x.id for x in instances])
+ remove_ids = all_instance_ids[0:to_remove]
+
+ instances = [x for x in instances if x.id not in remove_ids]
+
+ (changed, instance_dict_array, changed_instance_ids) \
+ = terminate_instances(module, ec2, remove_ids)
+ terminated_list = []
+ for inst in instance_dict_array:
+ inst['state'] = "terminated"
+ terminated_list.append(inst)
+ instance_dict_array = terminated_list
+
+ # ensure all instances are dictionaries
+ all_instances = []
+ for inst in instances:
+
+ if not isinstance(inst, dict):
+ warn_if_public_ip_assignment_changed(module, inst)
+ inst = get_instance_info(inst)
+ all_instances.append(inst)
+
+ return (all_instances, instance_dict_array, changed_instance_ids, changed)
+
+
+def create_instances(module, ec2, vpc, override_count=None):
+ """
+ Creates new instances
+
+ module : AnsibleModule object
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ A list of dictionaries with instance information
+ about the instances that were launched
+ """
+
+ key_name = module.params.get('key_name')
+ id = module.params.get('id')
+ group_name = module.params.get('group')
+ group_id = module.params.get('group_id')
+ zone = module.params.get('zone')
+ instance_type = module.params.get('instance_type')
+ tenancy = module.params.get('tenancy')
+ spot_price = module.params.get('spot_price')
+ spot_type = module.params.get('spot_type')
+ image = module.params.get('image')
+ if override_count:
+ count = override_count
+ else:
+ count = module.params.get('count')
+ monitoring = module.params.get('monitoring')
+ kernel = module.params.get('kernel')
+ ramdisk = module.params.get('ramdisk')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
+ placement_group = module.params.get('placement_group')
+ user_data = module.params.get('user_data')
+ instance_tags = module.params.get('instance_tags')
+ vpc_subnet_id = module.params.get('vpc_subnet_id')
+ assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
+ private_ip = module.params.get('private_ip')
+ instance_profile_name = module.params.get('instance_profile_name')
+ volumes = module.params.get('volumes')
+ ebs_optimized = module.params.get('ebs_optimized')
+ exact_count = module.params.get('exact_count')
+ count_tag = module.params.get('count_tag')
+ source_dest_check = module.boolean(module.params.get('source_dest_check'))
+ termination_protection = module.boolean(module.params.get('termination_protection'))
+ network_interfaces = module.params.get('network_interfaces')
+ spot_launch_group = module.params.get('spot_launch_group')
+ instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior')
+
+ vpc_id = None
+ if vpc_subnet_id:
+ if not vpc:
+ module.fail_json(msg="region must be specified")
+ else:
+ vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
+ else:
+ vpc_id = None
+
+ try:
+ # Here we try to lookup the group id from the security group name - if group is set.
+ if group_name:
+ if vpc_id:
+ grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
+ else:
+ grp_details = ec2.get_all_security_groups()
+ if isinstance(group_name, string_types):
+ group_name = [group_name]
+ unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
+ if len(unmatched) > 0:
+ module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
+ group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
+ # Now we try to lookup the group id testing if group exists.
+ elif group_id:
+ # wrap the group_id in a list if it's not one already
+ if isinstance(group_id, string_types):
+ group_id = [group_id]
+ grp_details = ec2.get_all_security_groups(group_ids=group_id)
+ group_name = [grp_item.name for grp_item in grp_details]
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ # Lookup any instances that much our run id.
+
+ running_instances = []
+ count_remaining = int(count)
+
+ if id is not None:
+ filter_dict = {'client-token': id, 'instance-state-name': 'running'}
+ previous_reservations = ec2.get_all_instances(None, filter_dict)
+ for res in previous_reservations:
+ for prev_instance in res.instances:
+ running_instances.append(prev_instance)
+ count_remaining = count_remaining - len(running_instances)
+
+ # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
+
+ if count_remaining == 0:
+ changed = False
+ else:
+ changed = True
+ try:
+ params = {'image_id': image,
+ 'key_name': key_name,
+ 'monitoring_enabled': monitoring,
+ 'placement': zone,
+ 'instance_type': instance_type,
+ 'kernel_id': kernel,
+ 'ramdisk_id': ramdisk}
+ if user_data is not None:
+ params['user_data'] = to_bytes(user_data, errors='surrogate_or_strict')
+
+ if ebs_optimized:
+ params['ebs_optimized'] = ebs_optimized
+
+ # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
+ if not spot_price:
+ params['tenancy'] = tenancy
+
+ if boto_supports_profile_name_arg(ec2):
+ params['instance_profile_name'] = instance_profile_name
+ else:
+ if instance_profile_name is not None:
+ module.fail_json(
+ msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
+
+ if assign_public_ip is not None:
+ if not boto_supports_associate_public_ip_address(ec2):
+ module.fail_json(
+ msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
+ elif not vpc_subnet_id:
+ module.fail_json(
+ msg="assign_public_ip only available with vpc_subnet_id")
+
+ else:
+ if private_ip:
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ subnet_id=vpc_subnet_id,
+ private_ip_address=private_ip,
+ groups=group_id,
+ associate_public_ip_address=assign_public_ip)
+ else:
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ subnet_id=vpc_subnet_id,
+ groups=group_id,
+ associate_public_ip_address=assign_public_ip)
+ interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
+ params['network_interfaces'] = interfaces
+ else:
+ if network_interfaces:
+ if isinstance(network_interfaces, string_types):
+ network_interfaces = [network_interfaces]
+ interfaces = []
+ for i, network_interface_id in enumerate(network_interfaces):
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ network_interface_id=network_interface_id,
+ device_index=i)
+ interfaces.append(interface)
+ params['network_interfaces'] = \
+ boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
+ else:
+ params['subnet_id'] = vpc_subnet_id
+ if vpc_subnet_id:
+ params['security_group_ids'] = group_id
+ else:
+ params['security_groups'] = group_name
+
+ if volumes:
+ bdm = BlockDeviceMapping()
+ for volume in volumes:
+ if 'device_name' not in volume:
+ module.fail_json(msg='Device name must be set for volume')
+ # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0
+ # to be a signal not to create this volume
+ if 'volume_size' not in volume or int(volume['volume_size']) > 0:
+ bdm[volume['device_name']] = create_block_device(module, ec2, volume)
+
+ params['block_device_map'] = bdm
+
+ # check to see if we're using spot pricing first before starting instances
+ if not spot_price:
+ if assign_public_ip is not None and private_ip:
+ params.update(
+ dict(
+ min_count=count_remaining,
+ max_count=count_remaining,
+ client_token=id,
+ placement_group=placement_group,
+ )
+ )
+ else:
+ params.update(
+ dict(
+ min_count=count_remaining,
+ max_count=count_remaining,
+ client_token=id,
+ placement_group=placement_group,
+ private_ip_address=private_ip,
+ )
+ )
+
+ # For ordinary (not spot) instances, we can select 'stop'
+ # (the default) or 'terminate' here.
+ params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
+
+ try:
+ res = ec2.run_instances(**params)
+ except boto.exception.EC2ResponseError as e:
+ if (params['instance_initiated_shutdown_behavior'] != 'terminate' and
+ "InvalidParameterCombination" == e.error_code):
+ params['instance_initiated_shutdown_behavior'] = 'terminate'
+ res = ec2.run_instances(**params)
+ else:
+ raise
+
+ instids = [i.id for i in res.instances]
+ while True:
+ try:
+ ec2.get_all_instances(instids)
+ break
+ except boto.exception.EC2ResponseError as e:
+ if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
+ # there's a race between start and get an instance
+ continue
+ else:
+ module.fail_json(msg=str(e))
+
+ # The instances returned through ec2.run_instances above can be in
+ # terminated state due to idempotency. See commit 7f11c3d for a complete
+ # explanation.
+ terminated_instances = [
+ str(instance.id) for instance in res.instances if instance.state == 'terminated'
+ ]
+ if terminated_instances:
+ module.fail_json(msg="Instances with id(s) %s " % terminated_instances +
+ "were created previously but have since been terminated - " +
+ "use a (possibly different) 'instanceid' parameter")
+
+ else:
+ if private_ip:
+ module.fail_json(
+ msg='private_ip only available with on-demand (non-spot) instances')
+ if boto_supports_param_in_spot_request(ec2, 'placement_group'):
+ params['placement_group'] = placement_group
+ elif placement_group:
+ module.fail_json(
+ msg="placement_group parameter requires Boto version 2.3.0 or higher.")
+
+ # You can't tell spot instances to 'stop'; they will always be
+ # 'terminate'd. For convenience, we'll ignore the latter value.
+ if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate':
+ module.fail_json(
+ msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.")
+
+ if spot_launch_group and isinstance(spot_launch_group, string_types):
+ params['launch_group'] = spot_launch_group
+
+ params.update(dict(
+ count=count_remaining,
+ type=spot_type,
+ ))
+
+ # Set spot ValidUntil
+ # ValidUntil -> (timestamp). The end date of the request, in
+ # UTC format (for example, YYYY -MM -DD T*HH* :MM :SS Z).
+ utc_valid_until = (
+ datetime.datetime.utcnow()
+ + datetime.timedelta(seconds=spot_wait_timeout))
+ params['valid_until'] = utc_valid_until.strftime('%Y-%m-%dT%H:%M:%S.000Z')
+
+ res = ec2.request_spot_instances(spot_price, **params)
+
+ # Now we have to do the intermediate waiting
+ if wait:
+ instids = await_spot_requests(module, ec2, res, count)
+ else:
+ instids = []
+ except boto.exception.BotoServerError as e:
+ module.fail_json(msg="Instance creation failed => %s: %s" % (e.error_code, e.error_message))
+
+ # wait here until the instances are up
+ num_running = 0
+ wait_timeout = time.time() + wait_timeout
+ res_list = ()
+ while wait_timeout > time.time() and num_running < len(instids):
+ try:
+ res_list = ec2.get_all_instances(instids)
+ except boto.exception.BotoServerError as e:
+ if e.error_code == 'InvalidInstanceID.NotFound':
+ time.sleep(1)
+ continue
+ else:
+ raise
+
+ num_running = 0
+ for res in res_list:
+ num_running += len([i for i in res.instances if i.state == 'running'])
+ if len(res_list) <= 0:
+ # got a bad response of some sort, possibly due to
+ # stale/cached data. Wait a second and then try again
+ time.sleep(1)
+ continue
+ if wait and num_running < len(instids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
+
+ # We do this after the loop ends so that we end up with one list
+ for res in res_list:
+ running_instances.extend(res.instances)
+
+ # Enabled by default by AWS
+ if source_dest_check is False:
+ for inst in res.instances:
+ inst.modify_attribute('sourceDestCheck', False)
+
+ # Disabled by default by AWS
+ if termination_protection is True:
+ for inst in res.instances:
+ inst.modify_attribute('disableApiTermination', True)
+
+ # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
+ if instance_tags and instids:
+ try:
+ ec2.create_tags(instids, instance_tags)
+ except boto.exception.EC2ResponseError as e:
+ module.fail_json(msg="Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
+
+ instance_dict_array = []
+ created_instance_ids = []
+ for inst in running_instances:
+ inst.update()
+ d = get_instance_info(inst)
+ created_instance_ids.append(inst.id)
+ instance_dict_array.append(d)
+
+ return (instance_dict_array, created_instance_ids, changed)
+
+
+def terminate_instances(module, ec2, instance_ids):
+ """
+ Terminates a list of instances
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ termination_list: a list of instances to terminate in the form of
+ [ {id: <inst-id>}, ..]
+
+ Returns a dictionary of instance information
+ about the instances terminated.
+
+ If the instance to be terminated is running
+ "changed" will be set to False.
+
+ """
+
+ # Whether to wait for termination to complete before returning
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ changed = False
+ instance_dict_array = []
+
+ if not isinstance(instance_ids, list) or len(instance_ids) < 1:
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ terminated_instance_ids = []
+ for res in ec2.get_all_instances(instance_ids):
+ for inst in res.instances:
+ if inst.state == 'running' or inst.state == 'stopped':
+ terminated_instance_ids.append(inst.id)
+ instance_dict_array.append(get_instance_info(inst))
+ try:
+ ec2.terminate_instances([inst.id])
+ except EC2ResponseError as e:
+ module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
+ changed = True
+
+ # wait here until the instances are 'terminated'
+ if wait:
+ num_terminated = 0
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
+ response = ec2.get_all_instances(instance_ids=terminated_instance_ids,
+ filters={'instance-state-name': 'terminated'})
+ try:
+ num_terminated = sum([len(res.instances) for res in response])
+ except Exception as e:
+ # got a bad response of some sort, possibly due to
+ # stale/cached data. Wait a second and then try again
+ time.sleep(1)
+ continue
+
+ if num_terminated < len(terminated_instance_ids):
+ time.sleep(5)
+
+ # waiting took too long
+ if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
+ module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime())
+ # Lets get the current state of the instances after terminating - issue600
+ instance_dict_array = []
+ for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}):
+ for inst in res.instances:
+ instance_dict_array.append(get_instance_info(inst))
+
+ return (changed, instance_dict_array, terminated_instance_ids)
+
+
+def startstop_instances(module, ec2, instance_ids, state, instance_tags):
+ """
+ Starts or stops a list of existing instances
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ instance_ids: The list of instances to start in the form of
+ [ {id: <inst-id>}, ..]
+ instance_tags: A dict of tag keys and values in the form of
+ {key: value, ... }
+ state: Intended state ("running" or "stopped")
+
+ Returns a dictionary of instance information
+ about the instances started/stopped.
+
+ If the instance was not able to change state,
+ "changed" will be set to False.
+
+ Note that if instance_ids and instance_tags are both non-empty,
+ this method will process the intersection of the two
+ """
+
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ group_id = module.params.get('group_id')
+ group_name = module.params.get('group')
+ changed = False
+ instance_dict_array = []
+
+ if not isinstance(instance_ids, list) or len(instance_ids) < 1:
+ # Fail unless the user defined instance tags
+ if not instance_tags:
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ # To make an EC2 tag filter, we need to prepend 'tag:' to each key.
+ # An empty filter does no filtering, so it's safe to pass it to the
+ # get_all_instances method even if the user did not specify instance_tags
+ filters = {}
+ if instance_tags:
+ for key, value in instance_tags.items():
+ filters["tag:" + key] = value
+
+ if module.params.get('id'):
+ filters['client-token'] = module.params['id']
+ # Check that our instances are not in the state we want to take
+
+ # Check (and eventually change) instances attributes and instances state
+ existing_instances_array = []
+ for res in ec2.get_all_instances(instance_ids, filters=filters):
+ for inst in res.instances:
+
+ warn_if_public_ip_assignment_changed(module, inst)
+
+ changed = (check_source_dest_attr(module, inst, ec2) or
+ check_termination_protection(module, inst) or changed)
+
+ # Check security groups and if we're using ec2-vpc; ec2-classic security groups may not be modified
+ if inst.vpc_id and group_name:
+ grp_details = ec2.get_all_security_groups(filters={'vpc_id': inst.vpc_id})
+ if isinstance(group_name, string_types):
+ group_name = [group_name]
+ unmatched = set(group_name) - set(to_text(grp.name) for grp in grp_details)
+ if unmatched:
+ module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
+ group_ids = [to_text(grp.id) for grp in grp_details if to_text(grp.name) in group_name]
+ elif inst.vpc_id and group_id:
+ if isinstance(group_id, string_types):
+ group_id = [group_id]
+ grp_details = ec2.get_all_security_groups(group_ids=group_id)
+ group_ids = [grp_item.id for grp_item in grp_details]
+ if inst.vpc_id and (group_name or group_id):
+ if set(sg.id for sg in inst.groups) != set(group_ids):
+ changed = inst.modify_attribute('groupSet', group_ids)
+
+ # Check instance state
+ if inst.state != state:
+ instance_dict_array.append(get_instance_info(inst))
+ try:
+ if state == 'running':
+ inst.start()
+ else:
+ inst.stop()
+ except EC2ResponseError as e:
+ module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
+ changed = True
+ existing_instances_array.append(inst.id)
+
+ instance_ids = list(set(existing_instances_array + (instance_ids or [])))
+ # Wait for all the instances to finish starting or stopping
+ wait_timeout = time.time() + wait_timeout
+ while wait and wait_timeout > time.time():
+ instance_dict_array = []
+ matched_instances = []
+ for res in ec2.get_all_instances(instance_ids):
+ for i in res.instances:
+ if i.state == state:
+ instance_dict_array.append(get_instance_info(i))
+ matched_instances.append(i)
+ if len(matched_instances) < len(instance_ids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
+
+ return (changed, instance_dict_array, instance_ids)
+
+
+def restart_instances(module, ec2, instance_ids, state, instance_tags):
+ """
+ Restarts a list of existing instances
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ instance_ids: The list of instances to start in the form of
+ [ {id: <inst-id>}, ..]
+ instance_tags: A dict of tag keys and values in the form of
+ {key: value, ... }
+ state: Intended state ("restarted")
+
+ Returns a dictionary of instance information
+ about the instances.
+
+ If the instance was not able to change state,
+ "changed" will be set to False.
+
+ Wait will not apply here as this is a OS level operation.
+
+ Note that if instance_ids and instance_tags are both non-empty,
+ this method will process the intersection of the two.
+ """
+
+ changed = False
+ instance_dict_array = []
+
+ if not isinstance(instance_ids, list) or len(instance_ids) < 1:
+ # Fail unless the user defined instance tags
+ if not instance_tags:
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ # To make an EC2 tag filter, we need to prepend 'tag:' to each key.
+ # An empty filter does no filtering, so it's safe to pass it to the
+ # get_all_instances method even if the user did not specify instance_tags
+ filters = {}
+ if instance_tags:
+ for key, value in instance_tags.items():
+ filters["tag:" + key] = value
+ if module.params.get('id'):
+ filters['client-token'] = module.params['id']
+
+ # Check that our instances are not in the state we want to take
+
+ # Check (and eventually change) instances attributes and instances state
+ for res in ec2.get_all_instances(instance_ids, filters=filters):
+ for inst in res.instances:
+
+ warn_if_public_ip_assignment_changed(module, inst)
+
+ changed = (check_source_dest_attr(module, inst, ec2) or
+ check_termination_protection(module, inst) or changed)
+
+ # Check instance state
+ if inst.state != state:
+ instance_dict_array.append(get_instance_info(inst))
+ try:
+ inst.reboot()
+ except EC2ResponseError as e:
+ module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
+ changed = True
+
+ return (changed, instance_dict_array, instance_ids)
+
+
+def check_termination_protection(module, inst):
+ """
+ Check the instance disableApiTermination attribute.
+
+ module: Ansible module object
+ inst: EC2 instance object
+
+ returns: True if state changed None otherwise
+ """
+
+ termination_protection = module.params.get('termination_protection')
+
+ if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None):
+ inst.modify_attribute('disableApiTermination', termination_protection)
+ return True
+
+
+def check_source_dest_attr(module, inst, ec2):
+ """
+ Check the instance sourceDestCheck attribute.
+
+ module: Ansible module object
+ inst: EC2 instance object
+
+ returns: True if state changed None otherwise
+ """
+
+ source_dest_check = module.params.get('source_dest_check')
+
+ if source_dest_check is not None:
+ try:
+ if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
+ inst.modify_attribute('sourceDestCheck', source_dest_check)
+ return True
+ except boto.exception.EC2ResponseError as exc:
+ # instances with more than one Elastic Network Interface will
+ # fail, because they have the sourceDestCheck attribute defined
+ # per-interface
+ if exc.code == 'InvalidInstanceID':
+ for interface in inst.interfaces:
+ if interface.source_dest_check != source_dest_check:
+ ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
+ return True
+ else:
+ module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
+ exception=traceback.format_exc())
+
+
+def warn_if_public_ip_assignment_changed(module, instance):
+ # This is a non-modifiable attribute.
+ assign_public_ip = module.params.get('assign_public_ip')
+
+ # Check that public ip assignment is the same and warn if not
+ public_dns_name = getattr(instance, 'public_dns_name', None)
+ if (assign_public_ip or public_dns_name) and (not public_dns_name or assign_public_ip is False):
+ module.warn("Unable to modify public ip assignment to {0} for instance {1}. "
+ "Whether or not to assign a public IP is determined during instance creation.".format(assign_public_ip, instance.id))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ key_name=dict(aliases=['keypair']),
+ id=dict(),
+ group=dict(type='list', aliases=['groups']),
+ group_id=dict(type='list'),
+ zone=dict(aliases=['aws_zone', 'ec2_zone']),
+ instance_type=dict(aliases=['type']),
+ spot_price=dict(),
+ spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
+ spot_launch_group=dict(),
+ image=dict(),
+ kernel=dict(),
+ count=dict(type='int', default='1'),
+ monitoring=dict(type='bool', default=False),
+ ramdisk=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ spot_wait_timeout=dict(type='int', default=600),
+ placement_group=dict(),
+ user_data=dict(),
+ instance_tags=dict(type='dict'),
+ vpc_subnet_id=dict(),
+ assign_public_ip=dict(type='bool'),
+ private_ip=dict(),
+ instance_profile_name=dict(),
+ instance_ids=dict(type='list', aliases=['instance_id']),
+ source_dest_check=dict(type='bool', default=None),
+ termination_protection=dict(type='bool', default=None),
+ state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
+ instance_initiated_shutdown_behavior=dict(default='stop', choices=['stop', 'terminate']),
+ exact_count=dict(type='int', default=None),
+ count_tag=dict(type='raw'),
+ volumes=dict(type='list'),
+ ebs_optimized=dict(type='bool', default=False),
+ tenancy=dict(default='default', choices=['default', 'dedicated']),
+ network_interfaces=dict(type='list', aliases=['network_interface'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ # Can be uncommented when we finish the deprecation cycle.
+ # ['group', 'group_id'],
+ ['exact_count', 'count'],
+ ['exact_count', 'state'],
+ ['exact_count', 'instance_ids'],
+ ['network_interfaces', 'assign_public_ip'],
+ ['network_interfaces', 'group'],
+ ['network_interfaces', 'group_id'],
+ ['network_interfaces', 'private_ip'],
+ ['network_interfaces', 'vpc_subnet_id'],
+ ],
+ )
+
+ if module.params.get('group') and module.params.get('group_id'):
+ module.deprecate(
+ msg='Support for passing both group and group_id has been deprecated. '
+ 'Currently group_id is ignored, in future passing both will result in an error',
+ version='2.14')
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+ if module.params.get('region') or not module.params.get('ec2_url'):
+ ec2 = ec2_connect(module)
+ elif module.params.get('ec2_url'):
+ ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs)
+
+ if 'region' not in aws_connect_kwargs:
+ aws_connect_kwargs['region'] = ec2.region
+
+ vpc = connect_vpc(**aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg="Failed to get connection: %s" % e.message, exception=traceback.format_exc())
+
+ tagged_instances = []
+
+ state = module.params['state']
+
+ if state == 'absent':
+ instance_ids = module.params['instance_ids']
+ if not instance_ids:
+ module.fail_json(msg='instance_ids list is required for absent state')
+
+ (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
+
+ elif state in ('running', 'stopped'):
+ instance_ids = module.params.get('instance_ids')
+ instance_tags = module.params.get('instance_tags')
+ if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
+ module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
+
+ (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
+
+ elif state in ('restarted'):
+ instance_ids = module.params.get('instance_ids')
+ instance_tags = module.params.get('instance_tags')
+ if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
+ module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
+
+ (changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
+
+ elif state == 'present':
+ # Changed is always set to true when provisioning new instances
+ if not module.params.get('image'):
+ module.fail_json(msg='image parameter is required for new instance')
+
+ if module.params.get('exact_count') is None:
+ (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
+ else:
+ (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
+
+ # Always return instances in the same order
+ if new_instance_ids:
+ new_instance_ids.sort()
+ if instance_dict_array:
+ instance_dict_array.sort(key=lambda x: x['id'])
+ if tagged_instances:
+ tagged_instances.sort(key=lambda x: x['id'])
+
+ module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_ami_info.py b/test/support/integration/plugins/modules/ec2_ami_info.py
new file mode 100644
index 0000000000..41e1aa83f9
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_ami_info.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_info
+version_added: '2.5'
+short_description: Gather information about ec2 AMIs
+description:
+ - Gather information about ec2 AMIs
+ - This module was called C(ec2_ami_facts) before Ansible 2.9. The usage did not change.
+author:
+ - Prasad Katti (@prasadkatti)
+requirements: [ boto3 ]
+options:
+ image_ids:
+ description: One or more image IDs.
+ aliases: [image_id]
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters.
+ - Filter names and values are case sensitive.
+ type: dict
+ owners:
+ description:
+ - Filter the images by the owner. Valid options are an AWS account ID, self,
+ or an AWS owner alias ( amazon | aws-marketplace | microsoft ).
+ aliases: [owner]
+ type: list
+ elements: str
+ executable_users:
+ description:
+ - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs).
+ aliases: [executable_user]
+ type: list
+ elements: str
+ describe_image_attributes:
+ description:
+ - Describe attributes (like launchPermission) of the images found.
+ default: no
+ type: bool
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: gather information about an AMI using ami-id
+ ec2_ami_info:
+ image_ids: ami-5b488823
+
+- name: gather information about all AMIs with tag key Name and value webapp
+ ec2_ami_info:
+ filters:
+ "tag:Name": webapp
+
+- name: gather information about an AMI with 'AMI Name' equal to foobar
+ ec2_ami_info:
+ filters:
+ name: foobar
+
+- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477)
+ ec2_ami_info:
+ owners: 099720109477
+ filters:
+ name: "ubuntu/images/ubuntu-zesty-17.04-*"
+'''
+
+RETURN = '''
+images:
+ description: A list of images.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ architecture:
+ description: The architecture of the image.
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ device_name:
+ description: The device name exposed to the instance.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ ebs:
+ description: EBS volumes
+ returned: always
+ type: complex
+ creation_date:
+ description: The date and time the image was created.
+ returned: always
+ type: str
+ sample: '2017-10-16T19:22:13.000Z'
+ description:
+ description: The description of the AMI.
+ returned: always
+ type: str
+ sample: ''
+ ena_support:
+ description: Whether enhanced networking with ENA is enabled.
+ returned: always
+ type: bool
+ sample: true
+ hypervisor:
+ description: The hypervisor type of the image.
+ returned: always
+ type: str
+ sample: xen
+ image_id:
+ description: The ID of the AMI.
+ returned: always
+ type: str
+ sample: ami-5b466623
+ image_location:
+ description: The location of the AMI.
+ returned: always
+ type: str
+ sample: 408466080000/Webapp
+ image_type:
+ description: The type of image.
+ returned: always
+ type: str
+ sample: machine
+ launch_permissions:
+ description: A List of AWS accounts may launch the AMI.
+ returned: When image is owned by calling account and I(describe_image_attributes) is yes.
+ type: list
+ elements: dict
+ contains:
+ group:
+ description: A value of 'all' means the AMI is public.
+ type: str
+ user_id:
+ description: An AWS account ID with permissions to launch the AMI.
+ type: str
+ sample: [{"group": "all"}, {"user_id": "408466080000"}]
+ name:
+ description: The name of the AMI that was provided during image creation.
+ returned: always
+ type: str
+ sample: Webapp
+ owner_id:
+ description: The AWS account ID of the image owner.
+ returned: always
+ type: str
+ sample: '408466080000'
+ public:
+ description: Whether the image has public launch permissions.
+ returned: always
+ type: bool
+ sample: true
+ root_device_name:
+ description: The device name of the root device.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ sriov_net_support:
+ description: Whether enhanced networking is enabled.
+ returned: always
+ type: str
+ sample: simple
+ state:
+ description: The current state of the AMI.
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: Any tags assigned to the image.
+ returned: always
+ type: dict
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
+
+
+def list_ec2_images(ec2_client, module):
+
+ image_ids = module.params.get("image_ids")
+ owners = module.params.get("owners")
+ executable_users = module.params.get("executable_users")
+ filters = module.params.get("filters")
+ owner_param = []
+
+ # describe_images is *very* slow if you pass the `Owners`
+ # param (unless it's self), for some reason.
+ # Converting the owners to filters and removing from the
+ # owners param greatly speeds things up.
+ # Implementation based on aioue's suggestion in #24886
+ for owner in owners:
+ if owner.isdigit():
+ if 'owner-id' not in filters:
+ filters['owner-id'] = list()
+ filters['owner-id'].append(owner)
+ elif owner == 'self':
+ # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
+ owner_param.append(owner)
+ else:
+ if 'owner-alias' not in filters:
+ filters['owner-alias'] = list()
+ filters['owner-alias'].append(owner)
+
+ filters = ansible_dict_to_boto3_filter_list(filters)
+
+ try:
+ images = ec2_client.describe_images(ImageIds=image_ids, Filters=filters, Owners=owner_param, ExecutableUsers=executable_users)
+ images = [camel_dict_to_snake_dict(image) for image in images["Images"]]
+ except (ClientError, BotoCoreError) as err:
+ module.fail_json_aws(err, msg="error describing images")
+ for image in images:
+ try:
+ image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', []))
+ if module.params.get("describe_image_attributes"):
+ launch_permissions = ec2_client.describe_image_attribute(Attribute='launchPermission', ImageId=image['image_id'])['LaunchPermissions']
+ image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions]
+ except (ClientError, BotoCoreError) as err:
+ # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures
+ pass
+
+ images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist
+ module.exit_json(images=images)
+
+
+def main():
+
+ argument_spec = dict(
+ image_ids=dict(default=[], type='list', aliases=['image_id']),
+ filters=dict(default={}, type='dict'),
+ owners=dict(default=[], type='list', aliases=['owner']),
+ executable_users=dict(default=[], type='list', aliases=['executable_user']),
+ describe_image_attributes=dict(default=False, type='bool')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._module._name == 'ec2_ami_facts':
+ module._module.deprecate("The 'ec2_ami_facts' module has been renamed to 'ec2_ami_info'", version='2.13')
+
+ ec2_client = module.client('ec2')
+
+ list_ec2_images(ec2_client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_eni.py b/test/support/integration/plugins/modules/ec2_eni.py
new file mode 100644
index 0000000000..8b6dbd1c32
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_eni.py
@@ -0,0 +1,633 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eni
+short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
+description:
+ - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is
+ provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status
+ of the network interface.
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+options:
+ eni_id:
+ description:
+ - The ID of the ENI (to modify).
+ - If I(eni_id=None) and I(state=present), a new eni will be created.
+ type: str
+ instance_id:
+ description:
+ - Instance ID that you wish to attach ENI to.
+ - Since version 2.2, use the I(attached) parameter to attach or detach an ENI. Prior to 2.2, to detach an ENI from an instance, use C(None).
+ type: str
+ private_ip_address:
+ description:
+ - Private IP address.
+ type: str
+ subnet_id:
+ description:
+ - ID of subnet in which to create the ENI.
+ type: str
+ description:
+ description:
+ - Optional description of the ENI.
+ type: str
+ security_groups:
+ description:
+ - List of security groups associated with the interface. Only used when I(state=present).
+ - Since version 2.2, you can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.
+ type: list
+ elements: str
+ state:
+ description:
+ - Create or delete ENI.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ device_index:
+ description:
+ - The index of the device for the network interface attachment on the instance.
+ default: 0
+ type: int
+ attached:
+ description:
+ - Specifies if network interface should be attached or detached from instance. If omitted, attachment status
+ won't change
+ version_added: 2.2
+ type: bool
+ force_detach:
+ description:
+ - Force detachment of the interface. This applies either when explicitly detaching the interface by setting I(instance_id=None)
+ or when deleting an interface with I(state=absent).
+ default: false
+ type: bool
+ delete_on_termination:
+ description:
+ - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the
+ interface is being modified, not on creation.
+ required: false
+ type: bool
+ source_dest_check:
+ description:
+ - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled.
+ You can only specify this flag when the interface is being modified, not on creation.
+ required: false
+ type: bool
+ secondary_private_ip_addresses:
+ description:
+ - A list of IP addresses to assign as secondary IP addresses to the network interface.
+ This option is mutually exclusive of I(secondary_private_ip_address_count)
+ required: false
+ version_added: 2.2
+ type: list
+ elements: str
+ purge_secondary_private_ip_addresses:
+ description:
+ - To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified.
+ - Set I(secondary_private_ip_addresses=[]) to purge all secondary addresses.
+ default: false
+ type: bool
+ version_added: 2.5
+ secondary_private_ip_address_count:
+ description:
+ - The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of I(secondary_private_ip_addresses)
+ required: false
+ version_added: 2.2
+ type: int
+ allow_reassignment:
+ description:
+ - Indicates whether to allow an IP address that is already assigned to another network interface or instance
+ to be reassigned to the specified network interface.
+ required: false
+ default: false
+ type: bool
+ version_added: 2.7
+extends_documentation_fragment:
+ - aws
+ - ec2
+notes:
+ - This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id),
+ or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI.
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an ENI. As no security group is defined, ENI will be created in default security group
+- ec2_eni:
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI and attach it to an instance
+- ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI with two secondary addresses
+- ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ secondary_private_ip_address_count: 2
+
+# Assign a secondary IP address to an existing ENI
+# This will purge any existing IPs
+- ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ eni_id: eni-yyyyyyyy
+ state: present
+ secondary_private_ip_addresses:
+ - 172.16.1.1
+
+# Remove any secondary IP addresses from an existing ENI
+- ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ eni_id: eni-yyyyyyyy
+ state: present
+ secondary_private_ip_address_count: 0
+
+# Destroy an ENI, detaching it from any instance if necessary
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ force_detach: true
+ state: absent
+
+# Update an ENI
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ description: "My new description"
+ state: present
+
+# Update an ENI identifying it by private_ip_address and subnet_id
+- ec2_eni:
+ subnet_id: subnet-xxxxxxx
+ private_ip_address: 172.16.1.1
+ description: "My new description"
+
+# Detach an ENI from an instance
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ instance_id: None
+ state: present
+
+### Delete an interface on termination
+# First create the interface
+- ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ register: eni
+
+# Modify the interface to enable the delete_on_terminaton flag
+- ec2_eni:
+ eni_id: "{{ eni.interface.id }}"
+ delete_on_termination: true
+
+'''
+
+
+RETURN = '''
+interface:
+ description: Network interface attributes
+ returned: when state != absent
+ type: complex
+ contains:
+ description:
+ description: interface description
+ type: str
+ sample: Firewall network interface
+ groups:
+ description: list of security groups
+ type: list
+ elements: dict
+ sample: [ { "sg-f8a8a9da": "default" } ]
+ id:
+ description: network interface id
+ type: str
+ sample: "eni-1d889198"
+ mac_address:
+ description: interface's physical address
+ type: str
+ sample: "00:00:5E:00:53:23"
+ owner_id:
+ description: aws account id
+ type: str
+ sample: 812381371
+ private_ip_address:
+ description: primary ip address of this interface
+ type: str
+ sample: 10.20.30.40
+ private_ip_addresses:
+ description: list of all private ip addresses associated to this interface
+ type: list
+ elements: dict
+ sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
+ source_dest_check:
+ description: value of source/dest check flag
+ type: bool
+ sample: True
+ status:
+ description: network interface status
+ type: str
+ sample: "pending"
+ subnet_id:
+ description: which vpc subnet the interface is bound
+ type: str
+ sample: subnet-b0a0393c
+ vpc_id:
+ description: which vpc this network interface is bound
+ type: str
+ sample: vpc-9a9a9da
+
+'''
+
+import time
+import re
+
+try:
+ import boto.ec2
+ import boto.vpc
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import (AnsibleAWSError, connect_to_aws,
+ ec2_argument_spec, get_aws_connection_info,
+ get_ec2_security_group_ids_from_names)
+
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ for ip in interface.private_ip_addresses:
+ private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
+
+ interface_info = {'id': interface.id,
+ 'subnet_id': interface.subnet_id,
+ 'vpc_id': interface.vpc_id,
+ 'description': interface.description,
+ 'owner_id': interface.owner_id,
+ 'status': interface.status,
+ 'mac_address': interface.mac_address,
+ 'private_ip_address': interface.private_ip_address,
+ 'source_dest_check': interface.source_dest_check,
+ 'groups': dict((group.id, group.name) for group in interface.groups),
+ 'private_ip_addresses': private_addresses
+ }
+
+ if interface.attachment is not None:
+ interface_info['attachment'] = {'attachment_id': interface.attachment.id,
+ 'instance_id': interface.attachment.instance_id,
+ 'device_index': interface.attachment.device_index,
+ 'status': interface.attachment.status,
+ 'attach_time': interface.attachment.attach_time,
+ 'delete_on_termination': interface.attachment.delete_on_termination,
+ }
+
+ return interface_info
+
+
+def wait_for_eni(eni, status):
+
+ while True:
+ time.sleep(3)
+ eni.update()
+ # If the status is detached we just need attachment to disappear
+ if eni.attachment is None:
+ if status == "detached":
+ break
+ else:
+ if status == "attached" and eni.attachment.status == "attached":
+ break
+
+
+def create_eni(connection, vpc_id, module):
+
+ instance_id = module.params.get("instance_id")
+ attached = module.params.get("attached")
+ if instance_id == 'None':
+ instance_id = None
+ device_index = module.params.get("device_index")
+ subnet_id = module.params.get('subnet_id')
+ private_ip_address = module.params.get('private_ip_address')
+ description = module.params.get('description')
+ security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection, vpc_id=vpc_id, boto3=False)
+ secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
+ secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
+ changed = False
+
+ try:
+ eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
+ if attached and instance_id is not None:
+ try:
+ eni.attach(instance_id, device_index)
+ except BotoServerError:
+ eni.delete()
+ raise
+ # Wait to allow creation / attachment to finish
+ wait_for_eni(eni, "attached")
+ eni.update()
+
+ if secondary_private_ip_address_count is not None:
+ try:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id, secondary_private_ip_address_count=secondary_private_ip_address_count)
+ except BotoServerError:
+ eni.delete()
+ raise
+
+ if secondary_private_ip_addresses is not None:
+ try:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses)
+ except BotoServerError:
+ eni.delete()
+ raise
+
+ changed = True
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def modify_eni(connection, vpc_id, module, eni):
+
+ instance_id = module.params.get("instance_id")
+ attached = module.params.get("attached")
+ do_detach = module.params.get('state') == 'detached'
+ device_index = module.params.get("device_index")
+ description = module.params.get('description')
+ security_groups = module.params.get('security_groups')
+ force_detach = module.params.get("force_detach")
+ source_dest_check = module.params.get("source_dest_check")
+ delete_on_termination = module.params.get("delete_on_termination")
+ secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
+ purge_secondary_private_ip_addresses = module.params.get("purge_secondary_private_ip_addresses")
+ secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
+ allow_reassignment = module.params.get("allow_reassignment")
+ changed = False
+
+ try:
+ if description is not None:
+ if eni.description != description:
+ connection.modify_network_interface_attribute(eni.id, "description", description)
+ changed = True
+ if len(security_groups) > 0:
+ groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=vpc_id, boto3=False)
+ if sorted(get_sec_group_list(eni.groups)) != sorted(groups):
+ connection.modify_network_interface_attribute(eni.id, "groupSet", groups)
+ changed = True
+ if source_dest_check is not None:
+ if eni.source_dest_check != source_dest_check:
+ connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
+ changed = True
+ if delete_on_termination is not None and eni.attachment is not None:
+ if eni.attachment.delete_on_termination is not delete_on_termination:
+ connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
+ changed = True
+
+ current_secondary_addresses = [i.private_ip_address for i in eni.private_ip_addresses if not i.primary]
+ if secondary_private_ip_addresses is not None:
+ secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))
+ if secondary_addresses_to_remove and purge_secondary_private_ip_addresses:
+ connection.unassign_private_ip_addresses(network_interface_id=eni.id,
+ private_ip_addresses=list(set(current_secondary_addresses) -
+ set(secondary_private_ip_addresses)),
+ dry_run=False)
+ changed = True
+
+ secondary_addresses_to_add = list(set(secondary_private_ip_addresses) - set(current_secondary_addresses))
+ if secondary_addresses_to_add:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id,
+ private_ip_addresses=secondary_addresses_to_add,
+ secondary_private_ip_address_count=None,
+ allow_reassignment=allow_reassignment, dry_run=False)
+ changed = True
+ if secondary_private_ip_address_count is not None:
+ current_secondary_address_count = len(current_secondary_addresses)
+
+ if secondary_private_ip_address_count > current_secondary_address_count:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id,
+ private_ip_addresses=None,
+ secondary_private_ip_address_count=(secondary_private_ip_address_count -
+ current_secondary_address_count),
+ allow_reassignment=allow_reassignment, dry_run=False)
+ changed = True
+ elif secondary_private_ip_address_count < current_secondary_address_count:
+ # How many of these addresses do we want to remove
+ secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count
+ connection.unassign_private_ip_addresses(network_interface_id=eni.id,
+ private_ip_addresses=current_secondary_addresses[:secondary_addresses_to_remove_count],
+ dry_run=False)
+
+ if attached is True:
+ if eni.attachment and eni.attachment.instance_id != instance_id:
+ detach_eni(eni, module)
+ eni.attach(instance_id, device_index)
+ wait_for_eni(eni, "attached")
+ changed = True
+ if eni.attachment is None:
+ eni.attach(instance_id, device_index)
+ wait_for_eni(eni, "attached")
+ changed = True
+ elif attached is False:
+ detach_eni(eni, module)
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ eni.update()
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def delete_eni(connection, module):
+
+ eni_id = module.params.get("eni_id")
+ force_detach = module.params.get("force_detach")
+
+ try:
+ eni_result_set = connection.get_all_network_interfaces(eni_id)
+ eni = eni_result_set[0]
+
+ if force_detach is True:
+ if eni.attachment is not None:
+ eni.detach(force_detach)
+ # Wait to allow detachment to finish
+ wait_for_eni(eni, "detached")
+ eni.update()
+ eni.delete()
+ changed = True
+ else:
+ eni.delete()
+ changed = True
+
+ module.exit_json(changed=changed)
+ except BotoServerError as e:
+ regex = re.compile('The networkInterface ID \'.*\' does not exist')
+ if regex.search(e.message) is not None:
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg=e.message)
+
+
+def detach_eni(eni, module):
+
+ attached = module.params.get("attached")
+
+ force_detach = module.params.get("force_detach")
+ if eni.attachment is not None:
+ eni.detach(force_detach)
+ wait_for_eni(eni, "detached")
+ if attached:
+ return
+ eni.update()
+ module.exit_json(changed=True, interface=get_eni_info(eni))
+ else:
+ module.exit_json(changed=False, interface=get_eni_info(eni))
+
+
+def uniquely_find_eni(connection, module):
+
+ eni_id = module.params.get("eni_id")
+ private_ip_address = module.params.get('private_ip_address')
+ subnet_id = module.params.get('subnet_id')
+ instance_id = module.params.get('instance_id')
+ device_index = module.params.get('device_index')
+ attached = module.params.get('attached')
+
+ try:
+ filters = {}
+
+ # proceed only if we're univocally specifying an ENI
+ if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None):
+ return None
+
+ if private_ip_address and subnet_id:
+ filters['private-ip-address'] = private_ip_address
+ filters['subnet-id'] = subnet_id
+
+ if not attached and instance_id and device_index:
+ filters['attachment.instance-id'] = instance_id
+ filters['attachment.device-index'] = device_index
+
+ if eni_id is None and len(filters) == 0:
+ return None
+
+ eni_result = connection.get_all_network_interfaces(eni_id, filters=filters)
+ if len(eni_result) == 1:
+ return eni_result[0]
+ else:
+ return None
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ return None
+
+
+def get_sec_group_list(groups):
+
+ # Build list of remote security groups
+ remote_security_groups = []
+ for group in groups:
+ remote_security_groups.append(group.id.encode())
+
+ return remote_security_groups
+
+
+def _get_vpc_id(connection, module, subnet_id):
+
+ try:
+ return connection.get_all_subnets(subnet_ids=[subnet_id])[0].vpc_id
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ eni_id=dict(default=None, type='str'),
+ instance_id=dict(default=None, type='str'),
+ private_ip_address=dict(type='str'),
+ subnet_id=dict(type='str'),
+ description=dict(type='str'),
+ security_groups=dict(default=[], type='list'),
+ device_index=dict(default=0, type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ force_detach=dict(default='no', type='bool'),
+ source_dest_check=dict(default=None, type='bool'),
+ delete_on_termination=dict(default=None, type='bool'),
+ secondary_private_ip_addresses=dict(default=None, type='list'),
+ purge_secondary_private_ip_addresses=dict(default=False, type='bool'),
+ secondary_private_ip_address_count=dict(default=None, type='int'),
+ allow_reassignment=dict(default=False, type='bool'),
+ attached=dict(default=None, type='bool')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['secondary_private_ip_addresses', 'secondary_private_ip_address_count']
+ ],
+ required_if=([
+ ('state', 'absent', ['eni_id']),
+ ('attached', True, ['instance_id']),
+ ('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses'])
+ ])
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ vpc_connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ eni = uniquely_find_eni(connection, module)
+ if eni is None:
+ subnet_id = module.params.get("subnet_id")
+ if subnet_id is None:
+ module.fail_json(msg="subnet_id is required when creating a new ENI")
+
+ vpc_id = _get_vpc_id(vpc_connection, module, subnet_id)
+ create_eni(connection, vpc_id, module)
+ else:
+ vpc_id = eni.vpc_id
+ modify_eni(connection, vpc_id, module, eni)
+
+ elif state == 'absent':
+ delete_eni(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_eni_info.py b/test/support/integration/plugins/modules/ec2_eni_info.py
new file mode 100644
index 0000000000..99922a84d1
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_eni_info.py
@@ -0,0 +1,275 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eni_info
+short_description: Gather information about ec2 ENI interfaces in AWS
+description:
+ - Gather information about ec2 ENI interfaces in AWS.
+ - This module was called C(ec2_eni_facts) before Ansible 2.9. The usage did not change.
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+requirements: [ boto3 ]
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
+ type: dict
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all ENIs
+- ec2_eni_info:
+
+# Gather information about a particular ENI
+- ec2_eni_info:
+ filters:
+ network-interface-id: eni-xxxxxxx
+
+'''
+
+RETURN = '''
+network_interfaces:
+ description: List of matching elastic network interfaces
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: Info of associated elastic IP (EIP)
+ returned: always, empty dict if no association exists
+ type: dict
+ sample: {
+ allocation_id: "eipalloc-5sdf123",
+ association_id: "eipassoc-8sdf123",
+ ip_owner_id: "4415120123456",
+ public_dns_name: "ec2-52-1-0-63.compute-1.amazonaws.com",
+ public_ip: "52.1.0.63"
+ }
+ attachment:
+ description: Info about attached ec2 instance
+ returned: always, empty dict if ENI is not attached
+ type: dict
+ sample: {
+ attach_time: "2017-08-05T15:25:47+00:00",
+ attachment_id: "eni-attach-149d21234",
+ delete_on_termination: false,
+ device_index: 1,
+ instance_id: "i-15b8d3cadbafa1234",
+ instance_owner_id: "4415120123456",
+ status: "attached"
+ }
+ availability_zone:
+ description: Availability zone of ENI
+ returned: always
+ type: str
+ sample: "us-east-1b"
+ description:
+ description: Description text for ENI
+ returned: always
+ type: str
+ sample: "My favourite network interface"
+ groups:
+ description: List of attached security groups
+ returned: always
+ type: list
+ sample: [
+ {
+ group_id: "sg-26d0f1234",
+ group_name: "my_ec2_security_group"
+ }
+ ]
+ id:
+ description: The id of the ENI (alias for network_interface_id)
+ returned: always
+ type: str
+ sample: "eni-392fsdf"
+ interface_type:
+ description: Type of the network interface
+ returned: always
+ type: str
+ sample: "interface"
+ ipv6_addresses:
+ description: List of IPv6 addresses for this interface
+ returned: always
+ type: list
+ sample: []
+ mac_address:
+ description: MAC address of the network interface
+ returned: always
+ type: str
+ sample: "0a:f8:10:2f:ab:a1"
+ network_interface_id:
+ description: The id of the ENI
+ returned: always
+ type: str
+ sample: "eni-392fsdf"
+ owner_id:
+ description: AWS account id of the owner of the ENI
+ returned: always
+ type: str
+ sample: "4415120123456"
+ private_dns_name:
+ description: Private DNS name for the ENI
+ returned: always
+ type: str
+ sample: "ip-172-16-1-180.ec2.internal"
+ private_ip_address:
+ description: Private IP address for the ENI
+ returned: always
+ type: str
+ sample: "172.16.1.180"
+ private_ip_addresses:
+ description: List of private IP addresses attached to the ENI
+ returned: always
+ type: list
+ sample: []
+ requester_id:
+ description: The ID of the entity that launched the ENI
+ returned: always
+ type: str
+ sample: "AIDAIONYVJQNIAZFT3ABC"
+ requester_managed:
+ description: Indicates whether the network interface is being managed by an AWS service.
+ returned: always
+ type: bool
+ sample: false
+ source_dest_check:
+ description: Indicates whether the network interface performs source/destination checking.
+ returned: always
+ type: bool
+ sample: false
+ status:
+ description: Indicates if the network interface is attached to an instance or not
+ returned: always
+ type: str
+ sample: "in-use"
+ subnet_id:
+ description: Subnet ID the ENI is in
+ returned: always
+ type: str
+ sample: "subnet-7bbf01234"
+ tag_set:
+ description: Dictionary of tags added to the ENI
+ returned: always
+ type: dict
+ sample: {}
+ vpc_id:
+ description: ID of the VPC the network interface it part of
+ returned: always
+ type: str
+ sample: "vpc-b3f1f123"
+'''
+
+try:
+ from botocore.exceptions import ClientError, NoCredentialsError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_conn
+from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
+from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
+
+
+def list_eni(connection, module):
+
+ if module.params.get("filters") is None:
+ filters = []
+ else:
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ network_interfaces_result = connection.describe_network_interfaces(Filters=filters)['NetworkInterfaces']
+ except (ClientError, NoCredentialsError) as e:
+ module.fail_json(msg=e.message)
+
+ # Modify boto3 tags list to be ansible friendly dict and then camel_case
+ camel_network_interfaces = []
+ for network_interface in network_interfaces_result:
+ network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet'])
+ # Added id to interface info to be compatible with return values of ec2_eni module:
+ network_interface['Id'] = network_interface['NetworkInterfaceId']
+ camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface))
+
+ module.exit_json(network_interfaces=camel_network_interfaces)
+
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ for ip in interface.private_ip_addresses:
+ private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
+
+ interface_info = {'id': interface.id,
+ 'subnet_id': interface.subnet_id,
+ 'vpc_id': interface.vpc_id,
+ 'description': interface.description,
+ 'owner_id': interface.owner_id,
+ 'status': interface.status,
+ 'mac_address': interface.mac_address,
+ 'private_ip_address': interface.private_ip_address,
+ 'source_dest_check': interface.source_dest_check,
+ 'groups': dict((group.id, group.name) for group in interface.groups),
+ 'private_ip_addresses': private_addresses
+ }
+
+ if hasattr(interface, 'publicDnsName'):
+ interface_info['association'] = {'public_ip_address': interface.publicIp,
+ 'public_dns_name': interface.publicDnsName,
+ 'ip_owner_id': interface.ipOwnerId
+ }
+
+ if interface.attachment is not None:
+ interface_info['attachment'] = {'attachment_id': interface.attachment.id,
+ 'instance_id': interface.attachment.instance_id,
+ 'device_index': interface.attachment.device_index,
+ 'status': interface.attachment.status,
+ 'attach_time': interface.attachment.attach_time,
+ 'delete_on_termination': interface.attachment.delete_on_termination,
+ }
+
+ return interface_info
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters=dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+ if module._name == 'ec2_eni_facts':
+ module.deprecate("The 'ec2_eni_facts' module has been renamed to 'ec2_eni_info'", version='2.13')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
+
+ list_eni(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_group.py b/test/support/integration/plugins/modules/ec2_group.py
new file mode 100644
index 0000000000..bc416f66b5
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_group.py
@@ -0,0 +1,1345 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: ec2_group
+author: "Andrew de Quincey (@adq)"
+version_added: "1.3"
+requirements: [ boto3 ]
+short_description: maintain an ec2 VPC security group.
+description:
+ - Maintains ec2 security groups. This module has a dependency on python-boto >= 2.5.
+options:
+ name:
+ description:
+ - Name of the security group.
+ - One of and only one of I(name) or I(group_id) is required.
+ - Required if I(state=present).
+ required: false
+ type: str
+ group_id:
+ description:
+ - Id of group to delete (works only with absent).
+ - One of and only one of I(name) or I(group_id) is required.
+ required: false
+ version_added: "2.4"
+ type: str
+ description:
+ description:
+ - Description of the security group. Required when C(state) is C(present).
+ required: false
+ type: str
+ vpc_id:
+ description:
+ - ID of the VPC to create the group in.
+ required: false
+ type: str
+ rules:
+ description:
+ - List of firewall inbound rules to enforce in this group (see example). If none are supplied,
+ no inbound rules will be enabled. Rules list may include its own name in `group_name`.
+ This allows idempotent loopback additions (e.g. allow group to access itself).
+ Rule sources list support was added in version 2.4. This allows to define multiple sources per
+ source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
+ In version 2.5 support for rule descriptions was added.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ cidr_ip:
+ type: str
+ description:
+ - The IPv4 CIDR range traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ cidr_ipv6:
+ type: str
+ description:
+ - The IPv6 CIDR range traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ ip_prefix:
+ type: str
+ description:
+ - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
+ that traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_id:
+ type: str
+ description:
+ - The ID of the Security Group that traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_name:
+ type: str
+ description:
+ - Name of the Security Group that traffic is coming from.
+ - If the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_desc:
+ type: str
+ description:
+ - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ proto:
+ type: str
+ description:
+ - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
+ from_port:
+ type: int
+ description: The start of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
+ to_port:
+ type: int
+ description: The end of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
+ rule_desc:
+ type: str
+ description: A description for the rule.
+ rules_egress:
+ description:
+ - List of firewall outbound rules to enforce in this group (see example). If none are supplied,
+ a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
+ Rule Egress sources list support was added in version 2.4. In version 2.5 support for rule descriptions
+ was added.
+ required: false
+ version_added: "1.6"
+ type: list
+ elements: dict
+ suboptions:
+ cidr_ip:
+ type: str
+ description:
+ - The IPv4 CIDR range traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ cidr_ipv6:
+ type: str
+ description:
+ - The IPv6 CIDR range traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ ip_prefix:
+ type: str
+ description:
+ - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
+ that traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_id:
+ type: str
+ description:
+ - The ID of the Security Group that traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_name:
+ type: str
+ description:
+ - Name of the Security Group that traffic is going to.
+ - If the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_desc:
+ type: str
+ description:
+ - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ proto:
+ type: str
+ description:
+ - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
+ from_port:
+ type: int
+ description: The start of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
+ to_port:
+ type: int
+ description: The end of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
+ rule_desc:
+ type: str
+ description: A description for the rule.
+ state:
+ version_added: "1.4"
+ description:
+ - Create or delete a security group.
+ required: false
+ default: 'present'
+ choices: [ "present", "absent" ]
+ aliases: []
+ type: str
+ purge_rules:
+ version_added: "1.8"
+ description:
+ - Purge existing rules on security group that are not found in rules.
+ required: false
+ default: 'true'
+ aliases: []
+ type: bool
+ purge_rules_egress:
+ version_added: "1.8"
+ description:
+ - Purge existing rules_egress on security group that are not found in rules_egress.
+ required: false
+ default: 'true'
+ aliases: []
+ type: bool
+ tags:
+ version_added: "2.4"
+ description:
+ - A dictionary of one or more tags to assign to the security group.
+ required: false
+ type: dict
+ aliases: ['resource_tags']
+ purge_tags:
+ version_added: "2.4"
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
+ tags will not be modified.
+ required: false
+ default: yes
+ type: bool
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+
+notes:
+ - If a rule declares a group_name and that group doesn't exist, it will be
+ automatically created. In that case, group_desc should be provided as well.
+ The module will refuse to create a depended-on group without a description.
+ - Preview diff mode support is added in version 2.7.
+'''
+
+EXAMPLES = '''
+- name: example using security group rule descriptions
+ ec2_group:
+ name: "{{ name }}"
+ description: sg with rule descriptions
+ vpc_id: vpc-xxxxxxxx
+ profile: "{{ aws_profile }}"
+ region: us-east-1
+ rules:
+ - proto: tcp
+ ports:
+ - 80
+ cidr_ip: 0.0.0.0/0
+ rule_desc: allow all on port 80
+
+- name: example ec2 group
+ ec2_group:
+ name: example
+ description: an example EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ aws_secret_key: SECRET
+ aws_access_key: ACCESS
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ # this should only be needed for EC2 Classic security group rules
+ # because in a VPC an ELB will use a user-account security group
+ group_id: amazon-elb/sg-87654321/amazon-elb-sg
+ - proto: tcp
+ from_port: 3306
+ to_port: 3306
+ group_id: 123412341234/sg-87654321/exact-name-of-sg
+ - proto: udp
+ from_port: 10050
+ to_port: 10050
+ cidr_ip: 10.0.0.0/8
+ - proto: udp
+ from_port: 10051
+ to_port: 10051
+ group_id: sg-12345678
+ - proto: icmp
+ from_port: 8 # icmp type, -1 = any type
+ to_port: -1 # icmp subtype, -1 = any subtype
+ cidr_ip: 10.0.0.0/8
+ - proto: all
+ # the containing group name may be specified here
+ group_name: example
+ - proto: all
+ # in the 'proto' attribute, if you specify -1, all, or a protocol number other than tcp, udp, icmp, or 58 (ICMPv6),
+ # traffic on all ports is allowed, regardless of any ports you specify
+ from_port: 10050 # this value is ignored
+ to_port: 10050 # this value is ignored
+ cidr_ip: 10.0.0.0/8
+
+ rules_egress:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ cidr_ipv6: 64:ff9b::/96
+ group_name: example-other
+ # description to use if example-other needs to be created
+ group_desc: other example EC2 group
+
+- name: example2 ec2 group
+ ec2_group:
+ name: example2
+ description: an example2 EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ rules:
+ # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
+ - proto: tcp
+ ports: 22
+ group_name: example-vpn
+ - proto: tcp
+ ports:
+ - 80
+ - 443
+ - 8080-8099
+ cidr_ip: 0.0.0.0/0
+ # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
+ - proto: tcp
+ ports:
+ - 6379
+ - 26379
+ group_name:
+ - example-vpn
+ - example-redis
+ - proto: tcp
+ ports: 5665
+ group_name: example-vpn
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ cidr_ipv6:
+ - 2607:F8B0::/32
+ - 64:ff9b::/96
+ group_id:
+ - sg-edcd9784
+ diff: True
+
+- name: "Delete group by its id"
+ ec2_group:
+ region: eu-west-1
+ group_id: sg-33b4ee5b
+ state: absent
+'''
+
+RETURN = '''
+group_name:
+ description: Security group name
+ sample: My Security Group
+ type: str
+ returned: on create/update
+group_id:
+ description: Security group id
+ sample: sg-abcd1234
+ type: str
+ returned: on create/update
+description:
+ description: Description of security group
+ sample: My Security Group
+ type: str
+ returned: on create/update
+tags:
+ description: Tags associated with the security group
+ sample:
+ Name: My Security Group
+ Purpose: protecting stuff
+ type: dict
+ returned: on create/update
+vpc_id:
+ description: ID of VPC to which the security group belongs
+ sample: vpc-abcd1234
+ type: str
+ returned: on create/update
+ip_permissions:
+ description: Inbound rules associated with the security group.
+ sample:
+ - from_port: 8182
+ ip_protocol: tcp
+ ip_ranges:
+ - cidr_ip: "1.1.1.1/32"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ to_port: 8182
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+ip_permissions_egress:
+ description: Outbound rules associated with the security group.
+ sample:
+ - ip_protocol: -1
+ ip_ranges:
+ - cidr_ip: "0.0.0.0/0"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+owner_id:
+ description: AWS Account ID of the security group
+ sample: 123456789012
+ type: int
+ returned: on create/update
+'''
+
+import json
+import re
+import itertools
+from copy import deepcopy
+from time import sleep
+from collections import namedtuple
+from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
+from ansible.module_utils.aws.iam import get_aws_account_id
+from ansible.module_utils.aws.waiters import get_waiter
+from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
+from ansible.module_utils.common.network import to_ipv6_subnet, to_subnet
+from ansible.module_utils.compat.ipaddress import ip_network, IPv6Network
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import string_types
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description'])
+valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix'])
+current_account_id = None
+
+
+def rule_cmp(a, b):
+ """Compare rules without descriptions"""
+ for prop in ['port_range', 'protocol', 'target', 'target_type']:
+ if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol):
+ # equal protocols can interchange `(-1, -1)` and `(None, None)`
+ if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)):
+ continue
+ elif getattr(a, prop) != getattr(b, prop):
+ return False
+ elif getattr(a, prop) != getattr(b, prop):
+ return False
+ return True
+
+
+def rules_to_permissions(rules):
+ return [to_permission(rule) for rule in rules]
+
+
+def to_permission(rule):
+ # take a Rule, output the serialized grant
+ perm = {
+ 'IpProtocol': rule.protocol,
+ }
+ perm['FromPort'], perm['ToPort'] = rule.port_range
+ if rule.target_type == 'ipv4':
+ perm['IpRanges'] = [{
+ 'CidrIp': rule.target,
+ }]
+ if rule.description:
+ perm['IpRanges'][0]['Description'] = rule.description
+ elif rule.target_type == 'ipv6':
+ perm['Ipv6Ranges'] = [{
+ 'CidrIpv6': rule.target,
+ }]
+ if rule.description:
+ perm['Ipv6Ranges'][0]['Description'] = rule.description
+ elif rule.target_type == 'group':
+ if isinstance(rule.target, tuple):
+ pair = {}
+ if rule.target[0]:
+ pair['UserId'] = rule.target[0]
+ # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
+ if rule.target[1]:
+ pair['GroupId'] = rule.target[1]
+ elif rule.target[2]:
+ pair['GroupName'] = rule.target[2]
+ perm['UserIdGroupPairs'] = [pair]
+ else:
+ perm['UserIdGroupPairs'] = [{
+ 'GroupId': rule.target
+ }]
+ if rule.description:
+ perm['UserIdGroupPairs'][0]['Description'] = rule.description
+ elif rule.target_type == 'ip_prefix':
+ perm['PrefixListIds'] = [{
+ 'PrefixListId': rule.target,
+ }]
+ if rule.description:
+ perm['PrefixListIds'][0]['Description'] = rule.description
+ elif rule.target_type not in valid_targets:
+ raise ValueError('Invalid target type for rule {0}'.format(rule))
+ return fix_port_and_protocol(perm)
+
+
+def rule_from_group_permission(perm):
+ def ports_from_permission(p):
+ if 'FromPort' not in p and 'ToPort' not in p:
+ return (None, None)
+ return (int(perm['FromPort']), int(perm['ToPort']))
+
+ # outputs a rule tuple
+ for target_key, target_subkey, target_type in [
+ ('IpRanges', 'CidrIp', 'ipv4'),
+ ('Ipv6Ranges', 'CidrIpv6', 'ipv6'),
+ ('PrefixListIds', 'PrefixListId', 'ip_prefix'),
+ ]:
+ if target_key not in perm:
+ continue
+ for r in perm[target_key]:
+ # there may be several IP ranges here, which is ok
+ yield Rule(
+ ports_from_permission(perm),
+ to_text(perm['IpProtocol']),
+ r[target_subkey],
+ target_type,
+ r.get('Description')
+ )
+ if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']:
+ for pair in perm['UserIdGroupPairs']:
+ target = (
+ pair.get('UserId', None),
+ pair.get('GroupId', None),
+ pair.get('GroupName', None),
+ )
+ if pair.get('UserId', '').startswith('amazon-'):
+ # amazon-elb and amazon-prefix rules don't need
+ # group-id specified, so remove it when querying
+ # from permission
+ target = (
+ target[0],
+ None,
+ target[2],
+ )
+ elif 'VpcPeeringConnectionId' in pair or pair['UserId'] != current_account_id:
+ target = (
+ pair.get('UserId', None),
+ pair.get('GroupId', None),
+ pair.get('GroupName', None),
+ )
+
+ yield Rule(
+ ports_from_permission(perm),
+ to_text(perm['IpProtocol']),
+ target,
+ 'group',
+ pair.get('Description')
+ )
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['InvalidGroup.NotFound'])
+def get_security_groups_with_backoff(connection, **kwargs):
+ return connection.describe_security_groups(**kwargs)
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def sg_exists_with_backoff(connection, **kwargs):
+ try:
+ return connection.describe_security_groups(**kwargs)
+ except is_boto3_error_code('InvalidGroup.NotFound'):
+ return {'SecurityGroups': []}
+
+
+def deduplicate_rules_args(rules):
+ """Returns unique rules"""
+ if rules is None:
+ return None
+ return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
+
+
+def validate_rule(module, rule):
+ VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', 'ip_prefix',
+ 'group_id', 'group_name', 'group_desc',
+ 'proto', 'from_port', 'to_port', 'rule_desc')
+ if not isinstance(rule, dict):
+ module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
+ for k in rule:
+ if k not in VALID_PARAMS:
+ module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule))
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_id OR cidr_ip, not both')
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_name OR cidr_ip, not both')
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg='Specify group_id OR group_name, not both')
+
+
+def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
+ """
+ Returns tuple of (target_type, target, group_created) after validating rule params.
+
+ rule: Dict describing a rule.
+ name: Name of the security group being managed.
+ groups: Dict of all available security groups.
+
+ AWS accepts an ip range or a security group as target of a rule. This
+ function validate the rule specification and return either a non-None
+ group_id or a non-None ip range.
+ """
+ FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)'
+ group_id = None
+ group_name = None
+ target_group_created = False
+
+ validate_rule(module, rule)
+ if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
+ # this is a foreign Security Group. Since you can't fetch it you must create an instance of it
+ owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
+ group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name)
+ groups[group_id] = group_instance
+ groups[group_name] = group_instance
+ # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
+ if group_id and group_name:
+ group_name = None
+ return 'group', (owner_id, group_id, group_name), False
+ elif 'group_id' in rule:
+ return 'group', rule['group_id'], False
+ elif 'group_name' in rule:
+ group_name = rule['group_name']
+ if group_name == name:
+ group_id = group['GroupId']
+ groups[group_id] = group
+ groups[group_name] = group
+ elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
+ # both are VPC groups, this is ok
+ group_id = groups[group_name]['GroupId']
+ elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
+ # both are EC2 classic, this is ok
+ group_id = groups[group_name]['GroupId']
+ else:
+ auto_group = None
+ filters = {'group-name': group_name}
+ if vpc_id:
+ filters['vpc-id'] = vpc_id
+ # if we got here, either the target group does not exist, or there
+ # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
+ # is bad, so we have to create a new SG because no compatible group
+ # exists
+ if not rule.get('group_desc', '').strip():
+ # retry describing the group once
+ try:
+ auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
+ except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError):
+ module.fail_json(msg="group %s will be automatically created by rule %s but "
+ "no description was provided" % (group_name, rule))
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+ elif not module.check_mode:
+ params = dict(GroupName=group_name, Description=rule['group_desc'])
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ try:
+ auto_group = client.create_security_group(**params)
+ get_waiter(
+ client, 'security_group_exists',
+ ).wait(
+ GroupIds=[auto_group['GroupId']],
+ )
+ except is_boto3_error_code('InvalidGroup.Duplicate'):
+ # The group exists, but didn't show up in any of our describe-security-groups calls
+ # Try searching on a filter for the name, and allow a retry window for AWS to update
+ # the model on their end.
+ try:
+ auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
+ except IndexError as e:
+ module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
+ except ClientError as e:
+ module.fail_json_aws(
+ e,
+ msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
+ if auto_group is not None:
+ group_id = auto_group['GroupId']
+ groups[group_id] = auto_group
+ groups[group_name] = auto_group
+ target_group_created = True
+ return 'group', group_id, target_group_created
+ elif 'cidr_ip' in rule:
+ return 'ipv4', validate_ip(module, rule['cidr_ip']), False
+ elif 'cidr_ipv6' in rule:
+ return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False
+ elif 'ip_prefix' in rule:
+ return 'ip_prefix', rule['ip_prefix'], False
+
+ module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule)
+
+
+def ports_expand(ports):
+ # takes a list of ports and returns a list of (port_from, port_to)
+ ports_expanded = []
+ for port in ports:
+ if not isinstance(port, string_types):
+ ports_expanded.append((port,) * 2)
+ elif '-' in port:
+ ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1)))
+ else:
+ ports_expanded.append((int(port.strip()),) * 2)
+
+ return ports_expanded
+
+
+def rule_expand_ports(rule):
+ # takes a rule dict and returns a list of expanded rule dicts
+ if 'ports' not in rule:
+ if isinstance(rule.get('from_port'), string_types):
+ rule['from_port'] = int(rule.get('from_port'))
+ if isinstance(rule.get('to_port'), string_types):
+ rule['to_port'] = int(rule.get('to_port'))
+ return [rule]
+
+ ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
+
+ rule_expanded = []
+ for from_to in ports_expand(ports):
+ temp_rule = rule.copy()
+ del temp_rule['ports']
+ temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to)
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rules_expand_ports(rules):
+ # takes a list of rules and expands it based on 'ports'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_ports(rule_complex)]
+
+
+def rule_expand_source(rule, source_type):
+ # takes a rule dict and returns a list of expanded rule dicts for specified source_type
+ sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
+ source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix')
+
+ rule_expanded = []
+ for source in sources:
+ temp_rule = rule.copy()
+ for s in source_types_all:
+ temp_rule.pop(s, None)
+ temp_rule[source_type] = source
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rule_expand_sources(rule):
+ # takes a rule dict and returns a list of expanded rule discts
+ source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule)
+
+ return [r for stype in source_types
+ for r in rule_expand_source(rule, stype)]
+
+
+def rules_expand_sources(rules):
+ # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_sources(rule_complex)]
+
+
+def update_rules_description(module, client, rule_type, group_id, ip_permissions):
+ if module.check_mode:
+ return
+ try:
+ if rule_type == "in":
+ client.update_security_group_rule_descriptions_ingress(GroupId=group_id, IpPermissions=ip_permissions)
+ if rule_type == "out":
+ client.update_security_group_rule_descriptions_egress(GroupId=group_id, IpPermissions=ip_permissions)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id)
+
+
+def fix_port_and_protocol(permission):
+ for key in ('FromPort', 'ToPort'):
+ if key in permission:
+ if permission[key] is None:
+ del permission[key]
+ else:
+ permission[key] = int(permission[key])
+
+ permission['IpProtocol'] = to_text(permission['IpProtocol'])
+
+ return permission
+
+
+def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id):
+ if revoke_ingress:
+ revoke(client, module, revoke_ingress, group_id, 'in')
+ if revoke_egress:
+ revoke(client, module, revoke_egress, group_id, 'out')
+ return bool(revoke_ingress or revoke_egress)
+
+
+def revoke(client, module, ip_permissions, group_id, rule_type):
+ if not module.check_mode:
+ try:
+ if rule_type == 'in':
+ client.revoke_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
+ elif rule_type == 'out':
+ client.revoke_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
+ except (BotoCoreError, ClientError) as e:
+ rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
+ module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions))
+
+
+def add_new_permissions(client, module, new_ingress, new_egress, group_id):
+ if new_ingress:
+ authorize(client, module, new_ingress, group_id, 'in')
+ if new_egress:
+ authorize(client, module, new_egress, group_id, 'out')
+ return bool(new_ingress or new_egress)
+
+
+def authorize(client, module, ip_permissions, group_id, rule_type):
+ if not module.check_mode:
+ try:
+ if rule_type == 'in':
+ client.authorize_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
+ elif rule_type == 'out':
+ client.authorize_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
+ except (BotoCoreError, ClientError) as e:
+ rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
+ module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions))
+
+
+def validate_ip(module, cidr_ip):
+ split_addr = cidr_ip.split('/')
+ if len(split_addr) == 2:
+ # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set
+ # Get the network bits if IPv4, and validate if IPv6.
+ try:
+ ip = to_subnet(split_addr[0], split_addr[1])
+ if ip != cidr_ip:
+ module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(
+ cidr_ip, ip))
+ except ValueError:
+ # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here
+ try:
+ isinstance(ip_network(to_text(cidr_ip)), IPv6Network)
+ ip = cidr_ip
+ except ValueError:
+ # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError
+ # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits
+ ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1]
+ if ip6 != cidr_ip:
+ module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6))
+ return ip6
+ return ip
+ return cidr_ip
+
+
+def update_tags(client, module, group_id, current_tags, tags, purge_tags):
+ tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
+
+ if not module.check_mode:
+ if tags_to_delete:
+ try:
+ client.delete_tags(Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete))
+
+ # Add/update tags
+ if tags_need_modify:
+ try:
+ client.create_tags(Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json(e, msg="Unable to add tags {0}".format(tags_need_modify))
+
+ return bool(tags_need_modify or tags_to_delete)
+
+
+def update_rule_descriptions(module, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list):
+ changed = False
+ client = module.client('ec2')
+ ingress_needs_desc_update = []
+ egress_needs_desc_update = []
+
+ for present_rule in present_egress:
+ needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
+ for r in needs_update:
+ named_tuple_egress_list.remove(r)
+ egress_needs_desc_update.extend(needs_update)
+ for present_rule in present_ingress:
+ needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
+ for r in needs_update:
+ named_tuple_ingress_list.remove(r)
+ ingress_needs_desc_update.extend(needs_update)
+
+ if ingress_needs_desc_update:
+ update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update))
+ changed |= True
+ if egress_needs_desc_update:
+ update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update))
+ changed |= True
+ return changed
+
+
+def create_security_group(client, module, name, description, vpc_id):
+ if not module.check_mode:
+ params = dict(GroupName=name, Description=description)
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ try:
+ group = client.create_security_group(**params)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to create security group")
+ # When a group is created, an egress_rule ALLOW ALL
+ # to 0.0.0.0/0 is added automatically but it's not
+ # reflected in the object returned by the AWS API
+ # call. We re-read the group for getting an updated object
+ # amazon sometimes takes a couple seconds to update the security group so wait till it exists
+ while True:
+ sleep(3)
+ group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ if group.get('VpcId') and not group.get('IpPermissionsEgress'):
+ pass
+ else:
+ break
+ return group
+ return None
+
+
+def wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_ingress, purge_egress):
+ group_id = group['GroupId']
+ tries = 6
+
+ def await_rules(group, desired_rules, purge, rule_key):
+ for i in range(tries):
+ current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], []))
+ if purge and len(current_rules ^ set(desired_rules)) == 0:
+ return group
+ elif purge:
+ conflicts = current_rules ^ set(desired_rules)
+ # For cases where set comparison is equivalent, but invalid port/proto exist
+ for a, b in itertools.combinations(conflicts, 2):
+ if rule_cmp(a, b):
+ conflicts.discard(a)
+ conflicts.discard(b)
+ if not len(conflicts):
+ return group
+ elif current_rules.issuperset(desired_rules) and not purge:
+ return group
+ sleep(10)
+ group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
+ module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules))
+ return group
+
+ group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
+ if 'VpcId' in group and module.params.get('rules_egress') is not None:
+ group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress')
+ return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions')
+
+
+def group_exists(client, module, vpc_id, group_id, name):
+ params = {'Filters': []}
+ if group_id:
+ params['GroupIds'] = [group_id]
+ if name:
+ # Add name to filters rather than params['GroupNames']
+ # because params['GroupNames'] only checks the default vpc if no vpc is provided
+ params['Filters'].append({'Name': 'group-name', 'Values': [name]})
+ if vpc_id:
+ params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]})
+ # Don't filter by description to maintain backwards compatibility
+
+ try:
+ security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', [])
+ all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', [])
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Error in describe_security_groups")
+
+ if security_groups:
+ groups = dict((group['GroupId'], group) for group in all_groups)
+ groups.update(dict((group['GroupName'], group) for group in all_groups))
+ if vpc_id:
+ vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id)
+ groups.update(vpc_wins)
+ # maintain backwards compatibility by using the last matching group
+ return security_groups[-1], groups
+ return None, {}
+
+
+def verify_rules_with_descriptions_permitted(client, module, rules, rules_egress):
+ if not hasattr(client, "update_security_group_rule_descriptions_egress"):
+ all_rules = rules if rules else [] + rules_egress if rules_egress else []
+ if any('rule_desc' in rule for rule in all_rules):
+ module.fail_json(msg="Using rule descriptions requires botocore version >= 1.7.2.")
+
+
+def get_diff_final_resource(client, module, security_group):
+ def get_account_id(security_group, module):
+ try:
+ owner_id = security_group.get('owner_id', module.client('sts').get_caller_identity()['Account'])
+ except (BotoCoreError, ClientError) as e:
+ owner_id = "Unable to determine owner_id: {0}".format(to_text(e))
+ return owner_id
+
+ def get_final_tags(security_group_tags, specified_tags, purge_tags):
+ if specified_tags is None:
+ return security_group_tags
+ tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags)
+ end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete)
+ end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete))
+ end_result_tags.update(tags_need_modify)
+ return end_result_tags
+
+ def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules):
+ if specified_rules is None:
+ return security_group_rules
+ if purge_rules:
+ final_rules = []
+ else:
+ final_rules = list(security_group_rules)
+ specified_rules = flatten_nested_targets(module, deepcopy(specified_rules))
+ for rule in specified_rules:
+ format_rule = {
+ 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'),
+ 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': []
+ }
+ if rule.get('proto', 'tcp') in ('all', '-1', -1):
+ format_rule['ip_protocol'] = '-1'
+ format_rule.pop('from_port')
+ format_rule.pop('to_port')
+ elif rule.get('ports'):
+ if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)):
+ rule['ports'] = [rule['ports']]
+ for port in rule.get('ports'):
+ if isinstance(port, string_types) and '-' in port:
+ format_rule['from_port'], format_rule['to_port'] = port.split('-')
+ else:
+ format_rule['from_port'] = format_rule['to_port'] = port
+ elif rule.get('from_port') or rule.get('to_port'):
+ format_rule['from_port'] = rule.get('from_port', rule.get('to_port'))
+ format_rule['to_port'] = rule.get('to_port', rule.get('from_port'))
+ for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'):
+ if rule.get(source_type):
+ rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type)
+ if rule.get('rule_desc'):
+ format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}]
+ else:
+ if not isinstance(rule[source_type], list):
+ rule[source_type] = [rule[source_type]]
+ format_rule[rule_key] = [{source_type: target} for target in rule[source_type]]
+ if rule.get('group_id') or rule.get('group_name'):
+ rule_sg = camel_dict_to_snake_dict(group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0])
+ format_rule['user_id_group_pairs'] = [{
+ 'description': rule_sg.get('description', rule_sg.get('group_desc')),
+ 'group_id': rule_sg.get('group_id', rule.get('group_id')),
+ 'group_name': rule_sg.get('group_name', rule.get('group_name')),
+ 'peering_status': rule_sg.get('peering_status'),
+ 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)),
+ 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']),
+ 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id')
+ }]
+ for k, v in list(format_rule['user_id_group_pairs'][0].items()):
+ if v is None:
+ format_rule['user_id_group_pairs'][0].pop(k)
+ final_rules.append(format_rule)
+ # Order final rules consistently
+ final_rules.sort(key=get_ip_permissions_sort_key)
+ return final_rules
+ security_group_ingress = security_group.get('ip_permissions', [])
+ specified_ingress = module.params['rules']
+ purge_ingress = module.params['purge_rules']
+ security_group_egress = security_group.get('ip_permissions_egress', [])
+ specified_egress = module.params['rules_egress']
+ purge_egress = module.params['purge_rules_egress']
+ return {
+ 'description': module.params['description'],
+ 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'),
+ 'group_name': security_group.get('group_name', module.params['name']),
+ 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress),
+ 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress),
+ 'owner_id': get_account_id(security_group, module),
+ 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']),
+ 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])}
+
+
+def flatten_nested_targets(module, rules):
+ def _flatten(targets):
+ for target in targets:
+ if isinstance(target, list):
+ for t in _flatten(target):
+ yield t
+ elif isinstance(target, string_types):
+ yield target
+
+ if rules is not None:
+ for rule in rules:
+ target_list_type = None
+ if isinstance(rule.get('cidr_ip'), list):
+ target_list_type = 'cidr_ip'
+ elif isinstance(rule.get('cidr_ipv6'), list):
+ target_list_type = 'cidr_ipv6'
+ if target_list_type is not None:
+ rule[target_list_type] = list(_flatten(rule[target_list_type]))
+ return rules
+
+
+def get_rule_sort_key(dicts):
+ if dicts.get('cidr_ip'):
+ return dicts.get('cidr_ip')
+ elif dicts.get('cidr_ipv6'):
+ return dicts.get('cidr_ipv6')
+ elif dicts.get('prefix_list_id'):
+ return dicts.get('prefix_list_id')
+ elif dicts.get('group_id'):
+ return dicts.get('group_id')
+ return None
+
+
+def get_ip_permissions_sort_key(rule):
+ if rule.get('ip_ranges'):
+ rule.get('ip_ranges').sort(key=get_rule_sort_key)
+ return rule.get('ip_ranges')[0]['cidr_ip']
+ elif rule.get('ipv6_ranges'):
+ rule.get('ipv6_ranges').sort(key=get_rule_sort_key)
+ return rule.get('ipv6_ranges')[0]['cidr_ipv6']
+ elif rule.get('prefix_list_ids'):
+ rule.get('prefix_list_ids').sort(key=get_rule_sort_key)
+ return rule.get('prefix_list_ids')[0]['prefix_list_id']
+ elif rule.get('user_id_group_pairs'):
+ rule.get('user_id_group_pairs').sort(key=get_rule_sort_key)
+ return rule.get('user_id_group_pairs')[0]['group_id']
+ return None
+
+
+def main():
+ argument_spec = dict(
+ name=dict(),
+ group_id=dict(),
+ description=dict(),
+ vpc_id=dict(),
+ rules=dict(type='list'),
+ rules_egress=dict(type='list'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ purge_rules=dict(default=True, required=False, type='bool'),
+ purge_rules_egress=dict(default=True, required=False, type='bool'),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, required=False, type='bool')
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['name', 'group_id']],
+ required_if=[['state', 'present', ['name']]],
+ )
+
+ name = module.params['name']
+ group_id = module.params['group_id']
+ description = module.params['description']
+ vpc_id = module.params['vpc_id']
+ rules = flatten_nested_targets(module, deepcopy(module.params['rules']))
+ rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress']))
+ rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules)))
+ rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress)))
+ state = module.params.get('state')
+ purge_rules = module.params['purge_rules']
+ purge_rules_egress = module.params['purge_rules_egress']
+ tags = module.params['tags']
+ purge_tags = module.params['purge_tags']
+
+ if state == 'present' and not description:
+ module.fail_json(msg='Must provide description when state is present.')
+
+ changed = False
+ client = module.client('ec2')
+
+ verify_rules_with_descriptions_permitted(client, module, rules, rules_egress)
+ group, groups = group_exists(client, module, vpc_id, group_id, name)
+ group_created_new = not bool(group)
+
+ global current_account_id
+ current_account_id = get_aws_account_id(module)
+
+ before = {}
+ after = {}
+
+ # Ensure requested group is absent
+ if state == 'absent':
+ if group:
+ # found a match, delete it
+ before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
+ before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
+ try:
+ if not module.check_mode:
+ client.delete_security_group(GroupId=group['GroupId'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group)
+ else:
+ group = None
+ changed = True
+ else:
+ # no match found, no changes required
+ pass
+
+ # Ensure requested group is present
+ elif state == 'present':
+ if group:
+ # existing group
+ before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
+ before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
+ if group['Description'] != description:
+ module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
+ "and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
+ else:
+ # no match found, create it
+ group = create_security_group(client, module, name, description, vpc_id)
+ changed = True
+
+ if tags is not None and group is not None:
+ current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
+ changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags)
+
+ if group:
+ named_tuple_ingress_list = []
+ named_tuple_egress_list = []
+ current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], [])
+ current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], [])
+
+ for new_rules, rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list),
+ (rules_egress, 'out', named_tuple_egress_list)]:
+ if new_rules is None:
+ continue
+ for rule in new_rules:
+ target_type, target, target_group_created = get_target_from_rule(
+ module, client, rule, name, group, groups, vpc_id)
+ changed |= target_group_created
+
+ if rule.get('proto', 'tcp') in ('all', '-1', -1):
+ rule['proto'] = '-1'
+ rule['from_port'] = None
+ rule['to_port'] = None
+ try:
+ int(rule.get('proto', 'tcp'))
+ rule['proto'] = to_text(rule.get('proto', 'tcp'))
+ rule['from_port'] = None
+ rule['to_port'] = None
+ except ValueError:
+ # rule does not use numeric protocol spec
+ pass
+
+ named_tuple_rule_list.append(
+ Rule(
+ port_range=(rule['from_port'], rule['to_port']),
+ protocol=to_text(rule.get('proto', 'tcp')),
+ target=target, target_type=target_type,
+ description=rule.get('rule_desc'),
+ )
+ )
+
+ # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
+ new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
+ new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))]
+
+ if module.params.get('rules_egress') is None and 'VpcId' in group:
+ # when no egress rules are specified and we're in a VPC,
+ # we add in a default allow all out rule, which was the
+ # default behavior before egress rules were added
+ rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
+ if rule in current_egress:
+ named_tuple_egress_list.append(rule)
+ if rule not in current_egress:
+ current_egress.append(rule)
+
+ # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
+ present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress)))
+ present_egress = list(set(named_tuple_egress_list).union(set(current_egress)))
+
+ if purge_rules:
+ revoke_ingress = []
+ for p in present_ingress:
+ if not any([rule_cmp(p, b) for b in named_tuple_ingress_list]):
+ revoke_ingress.append(to_permission(p))
+ else:
+ revoke_ingress = []
+ if purge_rules_egress and module.params.get('rules_egress') is not None:
+ if module.params.get('rules_egress') is []:
+ revoke_egress = [
+ to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list)
+ if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
+ ]
+ else:
+ revoke_egress = []
+ for p in present_egress:
+ if not any([rule_cmp(p, b) for b in named_tuple_egress_list]):
+ revoke_egress.append(to_permission(p))
+ else:
+ revoke_egress = []
+
+ # named_tuple_ingress_list and named_tuple_egress_list got updated by
+ # method update_rule_descriptions, deep copy these two lists to new
+ # variables for the record of the 'desired' ingress and egress sg permissions
+ desired_ingress = deepcopy(named_tuple_ingress_list)
+ desired_egress = deepcopy(named_tuple_egress_list)
+
+ changed |= update_rule_descriptions(module, group['GroupId'], present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list)
+
+ # Revoke old rules
+ changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId'])
+ rule_msg = 'Revoking {0}, and egress {1}'.format(revoke_ingress, revoke_egress)
+
+ new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
+ new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress))
+ new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress))
+ # Authorize new rules
+ changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId'])
+
+ if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None:
+ # A new group with no rules provided is already being awaited.
+ # When it is created we wait for the default egress rule to be added by AWS
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ elif changed and not module.check_mode:
+ # keep pulling until current security group rules match the desired ingress and egress rules
+ security_group = wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress)
+ else:
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags'])
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []))
+
+ else:
+ security_group = {'group_id': None}
+
+ if module._diff:
+ if module.params['state'] == 'present':
+ after = get_diff_final_resource(client, module, security_group)
+ if before.get('ip_permissions'):
+ before['ip_permissions'].sort(key=get_ip_permissions_sort_key)
+
+ security_group['diff'] = [{'before': before, 'after': after}]
+
+ module.exit_json(changed=changed, **security_group)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_instance.py b/test/support/integration/plugins/modules/ec2_instance.py
new file mode 100644
index 0000000000..7a587fb941
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_instance.py
@@ -0,0 +1,1805 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: ec2_instance
+short_description: Create & manage EC2 instances
+description:
+ - Create and manage AWS EC2 instances.
+ - >
+ Note: This module does not support creating
+ L(EC2 Spot instances,https://aws.amazon.com/ec2/spot/). The M(ec2) module
+ can create and manage spot instances.
+version_added: "2.5"
+author:
+ - Ryan Scott Brown (@ryansb)
+requirements: [ "boto3", "botocore" ]
+options:
+ instance_ids:
+ description:
+ - If you specify one or more instance IDs, only instances that have the specified IDs are returned.
+ type: list
+ state:
+ description:
+ - Goal state for the instances.
+ choices: [present, terminated, running, started, stopped, restarted, rebooted, absent]
+ default: present
+ type: str
+ wait:
+ description:
+ - Whether or not to wait for the desired state (use wait_timeout to customize this).
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - How long to wait (in seconds) for the instance to finish booting/terminating.
+ default: 600
+ type: int
+ instance_type:
+ description:
+ - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
+ Only required when instance is not already present.
+ default: t2.micro
+ type: str
+ user_data:
+ description:
+ - Opaque blob of data which is made available to the ec2 instance
+ type: str
+ tower_callback:
+ description:
+ - Preconfigured user-data to enable an instance to perform a Tower callback (Linux only).
+ - Mutually exclusive with I(user_data).
+ - For Windows instances, to enable remote access via Ansible set I(tower_callback.windows) to true, and optionally set an admin password.
+ - If using 'windows' and 'set_password', callback to Tower will not be performed but the instance will be ready to receive winrm connections from Ansible.
+ type: dict
+ suboptions:
+ tower_address:
+ description:
+ - IP address or DNS name of Tower server. Must be accessible via this address from the VPC that this instance will be launched in.
+ type: str
+ job_template_id:
+ description:
+ - Either the integer ID of the Tower Job Template, or the name (name supported only for Tower 3.2+).
+ type: str
+ host_config_key:
+ description:
+ - Host configuration secret key generated by the Tower job template.
+ type: str
+ tags:
+ description:
+ - A hash/dictionary of tags to add to the new instance or to add/remove from an existing one.
+ type: dict
+ purge_tags:
+ description:
+ - Delete any tags not specified in the task that are on the instance.
+ This means you have to specify all the desired tags on each task affecting an instance.
+ default: false
+ type: bool
+ image:
+ description:
+ - An image to use for the instance. The M(ec2_ami_info) module may be used to retrieve images.
+ One of I(image) or I(image_id) are required when instance is not already present.
+ type: dict
+ suboptions:
+ id:
+ description:
+ - The AMI ID.
+ type: str
+ ramdisk:
+ description:
+ - Overrides the AMI's default ramdisk ID.
+ type: str
+ kernel:
+ description:
+ - a string AKI to override the AMI kernel.
+ image_id:
+ description:
+ - I(ami) ID to use for the instance. One of I(image) or I(image_id) are required when instance is not already present.
+ - This is an alias for I(image.id).
+ type: str
+ security_groups:
+ description:
+ - A list of security group IDs or names (strings). Mutually exclusive with I(security_group).
+ type: list
+ security_group:
+ description:
+ - A security group ID or name. Mutually exclusive with I(security_groups).
+ type: str
+ name:
+ description:
+ - The Name tag for the instance.
+ type: str
+ vpc_subnet_id:
+ description:
+ - The subnet ID in which to launch the instance (VPC)
+ If none is provided, ec2_instance will chose the default zone of the default VPC.
+ aliases: ['subnet_id']
+ type: str
+ network:
+ description:
+ - Either a dictionary containing the key 'interfaces' corresponding to a list of network interface IDs or
+ containing specifications for a single network interface.
+ - Use the ec2_eni module to create ENIs with special settings.
+ type: dict
+ suboptions:
+ interfaces:
+ description:
+ - a list of ENI IDs (strings) or a list of objects containing the key I(id).
+ type: list
+ assign_public_ip:
+ description:
+ - when true assigns a public IP address to the interface
+ type: bool
+ private_ip_address:
+ description:
+ - an IPv4 address to assign to the interface
+ type: str
+ ipv6_addresses:
+ description:
+ - a list of IPv6 addresses to assign to the network interface
+ type: list
+ source_dest_check:
+ description:
+ - controls whether source/destination checking is enabled on the interface
+ type: bool
+ description:
+ description:
+ - a description for the network interface
+ type: str
+ private_ip_addresses:
+ description:
+ - a list of IPv4 addresses to assign to the network interface
+ type: list
+ subnet_id:
+ description:
+ - the subnet to connect the network interface to
+ type: str
+ delete_on_termination:
+ description:
+ - Delete the interface when the instance it is attached to is
+ terminated.
+ type: bool
+ device_index:
+ description:
+ - The index of the interface to modify
+ type: int
+ groups:
+ description:
+ - a list of security group IDs to attach to the interface
+ type: list
+ volumes:
+ description:
+ - A list of block device mappings, by default this will always use the AMI root device so the volumes option is primarily for adding more storage.
+ - A mapping contains the (optional) keys device_name, virtual_name, ebs.volume_type, ebs.volume_size, ebs.kms_key_id,
+ ebs.iops, and ebs.delete_on_termination.
+ - For more information about each parameter, see U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html).
+ type: list
+ launch_template:
+ description:
+ - The EC2 launch template to base instance configuration on.
+ type: dict
+ suboptions:
+ id:
+ description:
+ - the ID of the launch template (optional if name is specified).
+ type: str
+ name:
+ description:
+ - the pretty name of the launch template (optional if id is specified).
+ type: str
+ version:
+ description:
+ - the specific version of the launch template to use. If unspecified, the template default is chosen.
+ key_name:
+ description:
+ - Name of the SSH access key to assign to the instance - must exist in the region the instance is created.
+ type: str
+ availability_zone:
+ description:
+ - Specify an availability zone to use the default subnet it. Useful if not specifying the I(vpc_subnet_id) parameter.
+ - If no subnet, ENI, or availability zone is provided, the default subnet in the default VPC will be used in the first AZ (alphabetically sorted).
+ type: str
+ instance_initiated_shutdown_behavior:
+ description:
+ - Whether to stop or terminate an instance upon shutdown.
+ choices: ['stop', 'terminate']
+ type: str
+ tenancy:
+ description:
+ - What type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges.
+ choices: ['dedicated', 'default']
+ type: str
+ termination_protection:
+ description:
+ - Whether to enable termination protection.
+ This module will not terminate an instance with termination protection active, it must be turned off first.
+ type: bool
+ cpu_credit_specification:
+ description:
+ - For T series instances, choose whether to allow increased charges to buy CPU credits if the default pool is depleted.
+ - Choose I(unlimited) to enable buying additional CPU credits.
+ choices: ['unlimited', 'standard']
+ type: str
+ cpu_options:
+ description:
+ - Reduce the number of vCPU exposed to the instance.
+ - Those parameters can only be set at instance launch. The two suboptions threads_per_core and core_count are mandatory.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for combinations available.
+ - Requires botocore >= 1.10.16
+ version_added: 2.7
+ type: dict
+ suboptions:
+ threads_per_core:
+ description:
+ - Select the number of threads per core to enable. Disable or Enable Intel HT.
+ choices: [1, 2]
+ required: true
+ type: int
+ core_count:
+ description:
+ - Set the number of core to enable.
+ required: true
+ type: int
+ detailed_monitoring:
+ description:
+ - Whether to allow detailed cloudwatch metrics to be collected, enabling more detailed alerting.
+ type: bool
+ ebs_optimized:
+ description:
+ - Whether instance is should use optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
+ type: bool
+ filters:
+ description:
+ - A dict of filters to apply when deciding whether existing instances match and should be altered. Each dict item
+ consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html).
+ for possible filters. Filter names and values are case sensitive.
+ - By default, instances are filtered for counting by their "Name" tag, base AMI, state (running, by default), and
+ subnet ID. Any queryable filter can be used. Good candidates are specific tags, SSH keys, or security groups.
+ type: dict
+ instance_role:
+ description:
+ - The ARN or name of an EC2-enabled instance role to be used. If a name is not provided in arn format
+ then the ListInstanceProfiles permission must also be granted.
+ U(https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListInstanceProfiles.html) If no full ARN is provided,
+ the role with a matching name will be used from the active AWS account.
+ type: str
+ placement_group:
+ description:
+ - The placement group that needs to be assigned to the instance
+ version_added: 2.8
+ type: str
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Terminate every running instance in a region. Use with EXTREME caution.
+- ec2_instance:
+ state: absent
+ filters:
+ instance-state-name: running
+
+# restart a particular instance by its ID
+- ec2_instance:
+ state: restarted
+ instance_ids:
+ - i-12345678
+
+# start an instance with a public IP address
+- ec2_instance:
+ name: "public-compute-instance"
+ key_name: "prod-ssh-key"
+ vpc_subnet_id: subnet-5ca1ab1e
+ instance_type: c5.large
+ security_group: default
+ network:
+ assign_public_ip: true
+ image_id: ami-123456
+ tags:
+ Environment: Testing
+
+# start an instance and Add EBS
+- ec2_instance:
+ name: "public-withebs-instance"
+ vpc_subnet_id: subnet-5ca1ab1e
+ instance_type: t2.micro
+ key_name: "prod-ssh-key"
+ security_group: default
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ volume_size: 16
+ delete_on_termination: true
+
+# start an instance with a cpu_options
+- ec2_instance:
+ name: "public-cpuoption-instance"
+ vpc_subnet_id: subnet-5ca1ab1e
+ tags:
+ Environment: Testing
+ instance_type: c4.large
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+
+# start an instance and have it begin a Tower callback on boot
+- ec2_instance:
+ name: "tower-callback-test"
+ key_name: "prod-ssh-key"
+ vpc_subnet_id: subnet-5ca1ab1e
+ security_group: default
+ tower_callback:
+ # IP or hostname of tower server
+ tower_address: 1.2.3.4
+ job_template_id: 876
+ host_config_key: '[secret config key goes here]'
+ network:
+ assign_public_ip: true
+ image_id: ami-123456
+ cpu_credit_specification: unlimited
+ tags:
+ SomeThing: "A value"
+
+# start an instance with ENI (An existing ENI ID is required)
+- ec2_instance:
+ name: "public-eni-instance"
+ key_name: "prod-ssh-key"
+ vpc_subnet_id: subnet-5ca1ab1e
+ network:
+ interfaces:
+ - id: "eni-12345"
+ tags:
+ Env: "eni_on"
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ instance_type: t2.micro
+ image_id: ami-123456
+
+# add second ENI interface
+- ec2_instance:
+ name: "public-eni-instance"
+ network:
+ interfaces:
+ - id: "eni-12345"
+ - id: "eni-67890"
+ image_id: ami-123456
+ tags:
+ Env: "eni_on"
+ instance_type: t2.micro
+'''
+
+RETURN = '''
+instances:
+ description: a list of ec2 instances
+ returned: when wait == true
+ type: complex
+ contains:
+ ami_launch_index:
+ description: The AMI launch index, which can be used to find this instance in the launch group.
+ returned: always
+ type: int
+ sample: 0
+ architecture:
+ description: The architecture of the image
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/sdh or xvdh).
+ returned: always
+ type: str
+ sample: /dev/sdh
+ ebs:
+ description: Parameters used to automatically set up EBS volumes when the instance is launched.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ volume_id:
+ description: The ID of the EBS volume
+ returned: always
+ type: str
+ sample: vol-12345678
+ client_token:
+ description: The idempotency token you provided when you launched the instance, if applicable.
+ returned: always
+ type: str
+ sample: mytoken
+ ebs_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ hypervisor:
+ description: The hypervisor type of the instance.
+ returned: always
+ type: str
+ sample: xen
+ iam_instance_profile:
+ description: The IAM instance profile associated with the instance, if applicable.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: The Amazon Resource Name (ARN) of the instance profile.
+ returned: always
+ type: str
+ sample: "arn:aws:iam::000012345678:instance-profile/myprofile"
+ id:
+ description: The ID of the instance profile
+ returned: always
+ type: str
+ sample: JFJ397FDG400FG9FD1N
+ image_id:
+ description: The ID of the AMI used to launch the instance.
+ returned: always
+ type: str
+ sample: ami-0011223344
+ instance_id:
+ description: The ID of the instance.
+ returned: always
+ type: str
+ sample: i-012345678
+ instance_type:
+ description: The instance type size of the running instance.
+ returned: always
+ type: str
+ sample: t2.micro
+ key_name:
+ description: The name of the key pair, if this instance was launched with an associated key pair.
+ returned: always
+ type: str
+ sample: my-key
+ launch_time:
+ description: The time the instance was launched.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ monitoring:
+ description: The monitoring for the instance.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
+ returned: always
+ type: str
+ sample: disabled
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: The association information for an Elastic IPv4 associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ attachment:
+ description: The network interface attachment.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ attachment_id:
+ description: The ID of the network interface attachment.
+ returned: always
+ type: str
+ sample: eni-attach-3aff3f
+ delete_on_termination:
+ description: Indicates whether the network interface is deleted when the instance is terminated.
+ returned: always
+ type: bool
+ sample: true
+ device_index:
+ description: The index of the device on the instance for the network interface attachment.
+ returned: always
+ type: int
+ sample: 0
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ description:
+ description: The description.
+ returned: always
+ type: str
+ sample: My interface
+ groups:
+ description: One or more security groups.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-abcdef12
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: mygroup
+ ipv6_addresses:
+ description: One or more IPv6 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ipv6_address:
+ description: The IPv6 address.
+ returned: always
+ type: str
+ sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ owner_id:
+ description: The AWS account ID of the owner of the network interface.
+ returned: always
+ type: str
+ sample: 01234567890
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ private_ip_addresses:
+ description: The private IPv4 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ association:
+ description: The association information for an Elastic IP address (IPv4) associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ primary:
+ description: Indicates whether this IPv4 address is the primary private IP address of the network interface.
+ returned: always
+ type: bool
+ sample: true
+ private_ip_address:
+ description: The private IPv4 address of the network interface.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The status of the network interface.
+ returned: always
+ type: str
+ sample: in-use
+ subnet_id:
+ description: The ID of the subnet for the network interface.
+ returned: always
+ type: str
+ sample: subnet-0123456
+ vpc_id:
+ description: The ID of the VPC for the network interface.
+ returned: always
+ type: str
+ sample: vpc-0123456
+ placement:
+ description: The location where the instance launched, if applicable.
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The Availability Zone of the instance.
+ returned: always
+ type: str
+ sample: ap-southeast-2a
+ group_name:
+ description: The name of the placement group the instance is in (for cluster compute instances).
+ returned: always
+ type: str
+ sample: ""
+ tenancy:
+ description: The tenancy of the instance (if the instance is running in a VPC).
+ returned: always
+ type: str
+ sample: default
+ private_dns_name:
+ description: The private DNS name.
+ returned: always
+ type: str
+ sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ product_codes:
+ description: One or more product codes.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ product_code_id:
+ description: The product code.
+ returned: always
+ type: str
+ sample: aw0evgkw8ef3n2498gndfgasdfsd5cce
+ product_code_type:
+ description: The type of product code.
+ returned: always
+ type: str
+ sample: marketplace
+ public_dns_name:
+ description: The public DNS name assigned to the instance.
+ returned: always
+ type: str
+ sample:
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance
+ returned: always
+ type: str
+ sample: 52.0.0.1
+ root_device_name:
+ description: The device name of the root device
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ network.source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ state:
+ description: The current state of the instance.
+ returned: always
+ type: complex
+ contains:
+ code:
+ description: The low byte represents the state.
+ returned: always
+ type: int
+ sample: 16
+ name:
+ description: The name of the state.
+ returned: always
+ type: str
+ sample: running
+ state_transition_reason:
+ description: The reason for the most recent state transition.
+ returned: always
+ type: str
+ sample:
+ subnet_id:
+ description: The ID of the subnet in which the instance is running.
+ returned: always
+ type: str
+ sample: subnet-00abcdef
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: dict
+ sample: vpc-0011223344
+'''
+
+import re
+import uuid
+import string
+import textwrap
+import time
+from collections import namedtuple
+
+try:
+ import boto3
+ import botocore.exceptions
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.six import text_type, string_types
+from ansible.module_utils.six.moves.urllib import parse as urlparse
+from ansible.module_utils._text import to_bytes, to_native
+import ansible.module_utils.ec2 as ec2_utils
+from ansible.module_utils.ec2 import (AWSRetry,
+ ansible_dict_to_boto3_filter_list,
+ compare_aws_tags,
+ boto3_tag_list_to_ansible_dict,
+ ansible_dict_to_boto3_tag_list,
+ camel_dict_to_snake_dict)
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+
+module = None
+
+
+def tower_callback_script(tower_conf, windows=False, passwd=None):
+ script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'
+ if windows and passwd is not None:
+ script_tpl = """<powershell>
+ $admin = [adsi]("WinNT://./administrator, user")
+ $admin.PSBase.Invoke("SetPassword", "{PASS}")
+ Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}'))
+ </powershell>
+ """
+ return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url))
+ elif windows and passwd is None:
+ script_tpl = """<powershell>
+ $admin = [adsi]("WinNT://./administrator, user")
+ Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}'))
+ </powershell>
+ """
+ return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url))
+ elif not windows:
+ for p in ['tower_address', 'job_template_id', 'host_config_key']:
+ if p not in tower_conf:
+ module.fail_json(msg="Incomplete tower_callback configuration. tower_callback.{0} not set.".format(p))
+
+ if isinstance(tower_conf['job_template_id'], string_types):
+ tower_conf['job_template_id'] = urlparse.quote(tower_conf['job_template_id'])
+ tpl = string.Template(textwrap.dedent("""#!/bin/bash
+ set -x
+
+ retry_attempts=10
+ attempt=0
+ while [[ $attempt -lt $retry_attempts ]]
+ do
+ status_code=`curl --max-time 10 -v -k -s -i \
+ --data "host_config_key=${host_config_key}" \
+ 'https://${tower_address}/api/v2/job_templates/${template_id}/callback/' \
+ | head -n 1 \
+ | awk '{print $2}'`
+ if [[ $status_code == 404 ]]
+ then
+ status_code=`curl --max-time 10 -v -k -s -i \
+ --data "host_config_key=${host_config_key}" \
+ 'https://${tower_address}/api/v1/job_templates/${template_id}/callback/' \
+ | head -n 1 \
+ | awk '{print $2}'`
+ # fall back to using V1 API for Tower 3.1 and below, since v2 API will always 404
+ fi
+ if [[ $status_code == 201 ]]
+ then
+ exit 0
+ fi
+ attempt=$(( attempt + 1 ))
+ echo "$${status_code} received... retrying in 1 minute. (Attempt $${attempt})"
+ sleep 60
+ done
+ exit 1
+ """))
+ return tpl.safe_substitute(tower_address=tower_conf['tower_address'],
+ template_id=tower_conf['job_template_id'],
+ host_config_key=tower_conf['host_config_key'])
+ raise NotImplementedError("Only windows with remote-prep or non-windows with tower job callback supported so far.")
+
+
+@AWSRetry.jittered_backoff()
+def manage_tags(match, new_tags, purge_tags, ec2):
+ changed = False
+ old_tags = boto3_tag_list_to_ansible_dict(match['Tags'])
+ tags_to_set, tags_to_delete = compare_aws_tags(
+ old_tags, new_tags,
+ purge_tags=purge_tags,
+ )
+ if tags_to_set:
+ ec2.create_tags(
+ Resources=[match['InstanceId']],
+ Tags=ansible_dict_to_boto3_tag_list(tags_to_set))
+ changed |= True
+ if tags_to_delete:
+ delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete)
+ ec2.delete_tags(
+ Resources=[match['InstanceId']],
+ Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values))
+ changed |= True
+ return changed
+
+
+def build_volume_spec(params):
+ volumes = params.get('volumes') or []
+ for volume in volumes:
+ if 'ebs' in volume:
+ for int_value in ['volume_size', 'iops']:
+ if int_value in volume['ebs']:
+ volume['ebs'][int_value] = int(volume['ebs'][int_value])
+ return [ec2_utils.snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes]
+
+
+def add_or_update_instance_profile(instance, desired_profile_name):
+ instance_profile_setting = instance.get('IamInstanceProfile')
+ if instance_profile_setting and desired_profile_name:
+ if desired_profile_name in (instance_profile_setting.get('Name'), instance_profile_setting.get('Arn')):
+ # great, the profile we asked for is what's there
+ return False
+ else:
+ desired_arn = determine_iam_role(desired_profile_name)
+ if instance_profile_setting.get('Arn') == desired_arn:
+ return False
+ # update association
+ ec2 = module.client('ec2')
+ try:
+ association = ec2.describe_iam_instance_profile_associations(Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}])
+ except botocore.exceptions.ClientError as e:
+ # check for InvalidAssociationID.NotFound
+ module.fail_json_aws(e, "Could not find instance profile association")
+ try:
+ resp = ec2.replace_iam_instance_profile_association(
+ AssociationId=association['IamInstanceProfileAssociations'][0]['AssociationId'],
+ IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)}
+ )
+ return True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, "Could not associate instance profile")
+
+ if not instance_profile_setting and desired_profile_name:
+ # create association
+ ec2 = module.client('ec2')
+ try:
+ resp = ec2.associate_iam_instance_profile(
+ IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)},
+ InstanceId=instance['InstanceId']
+ )
+ return True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, "Could not associate new instance profile")
+
+ return False
+
+
+def build_network_spec(params, ec2=None):
+ """
+ Returns list of interfaces [complex]
+ Interface type: {
+ 'AssociatePublicIpAddress': True|False,
+ 'DeleteOnTermination': True|False,
+ 'Description': 'string',
+ 'DeviceIndex': 123,
+ 'Groups': [
+ 'string',
+ ],
+ 'Ipv6AddressCount': 123,
+ 'Ipv6Addresses': [
+ {
+ 'Ipv6Address': 'string'
+ },
+ ],
+ 'NetworkInterfaceId': 'string',
+ 'PrivateIpAddress': 'string',
+ 'PrivateIpAddresses': [
+ {
+ 'Primary': True|False,
+ 'PrivateIpAddress': 'string'
+ },
+ ],
+ 'SecondaryPrivateIpAddressCount': 123,
+ 'SubnetId': 'string'
+ },
+ """
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ interfaces = []
+ network = params.get('network') or {}
+ if not network.get('interfaces'):
+ # they only specified one interface
+ spec = {
+ 'DeviceIndex': 0,
+ }
+ if network.get('assign_public_ip') is not None:
+ spec['AssociatePublicIpAddress'] = network['assign_public_ip']
+
+ if params.get('vpc_subnet_id'):
+ spec['SubnetId'] = params['vpc_subnet_id']
+ else:
+ default_vpc = get_default_vpc(ec2)
+ if default_vpc is None:
+ raise module.fail_json(
+ msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to create an instance")
+ else:
+ sub = get_default_subnet(ec2, default_vpc)
+ spec['SubnetId'] = sub['SubnetId']
+
+ if network.get('private_ip_address'):
+ spec['PrivateIpAddress'] = network['private_ip_address']
+
+ if params.get('security_group') or params.get('security_groups'):
+ groups = discover_security_groups(
+ group=params.get('security_group'),
+ groups=params.get('security_groups'),
+ subnet_id=spec['SubnetId'],
+ ec2=ec2
+ )
+ spec['Groups'] = [g['GroupId'] for g in groups]
+ if network.get('description') is not None:
+ spec['Description'] = network['description']
+ # TODO more special snowflake network things
+
+ return [spec]
+
+ # handle list of `network.interfaces` options
+ for idx, interface_params in enumerate(network.get('interfaces', [])):
+ spec = {
+ 'DeviceIndex': idx,
+ }
+
+ if isinstance(interface_params, string_types):
+ # naive case where user gave
+ # network_interfaces: [eni-1234, eni-4567, ....]
+ # put into normal data structure so we don't dupe code
+ interface_params = {'id': interface_params}
+
+ if interface_params.get('id') is not None:
+ # if an ID is provided, we don't want to set any other parameters.
+ spec['NetworkInterfaceId'] = interface_params['id']
+ interfaces.append(spec)
+ continue
+
+ spec['DeleteOnTermination'] = interface_params.get('delete_on_termination', True)
+
+ if interface_params.get('ipv6_addresses'):
+ spec['Ipv6Addresses'] = [{'Ipv6Address': a} for a in interface_params.get('ipv6_addresses', [])]
+
+ if interface_params.get('private_ip_address'):
+ spec['PrivateIpAddress'] = interface_params.get('private_ip_address')
+
+ if interface_params.get('description'):
+ spec['Description'] = interface_params.get('description')
+
+ if interface_params.get('subnet_id', params.get('vpc_subnet_id')):
+ spec['SubnetId'] = interface_params.get('subnet_id', params.get('vpc_subnet_id'))
+ elif not spec.get('SubnetId') and not interface_params['id']:
+ # TODO grab a subnet from default VPC
+ raise ValueError('Failed to assign subnet to interface {0}'.format(interface_params))
+
+ interfaces.append(spec)
+ return interfaces
+
+
+def warn_if_public_ip_assignment_changed(instance):
+ # This is a non-modifiable attribute.
+ assign_public_ip = (module.params.get('network') or {}).get('assign_public_ip')
+ if assign_public_ip is None:
+ return
+
+ # Check that public ip assignment is the same and warn if not
+ public_dns_name = instance.get('PublicDnsName')
+ if (public_dns_name and not assign_public_ip) or (assign_public_ip and not public_dns_name):
+ module.warn(
+ "Unable to modify public ip assignment to {0} for instance {1}. "
+ "Whether or not to assign a public IP is determined during instance creation.".format(
+ assign_public_ip, instance['InstanceId']))
+
+
+def warn_if_cpu_options_changed(instance):
+ # This is a non-modifiable attribute.
+ cpu_options = module.params.get('cpu_options')
+ if cpu_options is None:
+ return
+
+ # Check that the CpuOptions set are the same and warn if not
+ core_count_curr = instance['CpuOptions'].get('CoreCount')
+ core_count = cpu_options.get('core_count')
+ threads_per_core_curr = instance['CpuOptions'].get('ThreadsPerCore')
+ threads_per_core = cpu_options.get('threads_per_core')
+ if core_count_curr != core_count:
+ module.warn(
+ "Unable to modify core_count from {0} to {1}. "
+ "Assigning a number of core is determinted during instance creation".format(
+ core_count_curr, core_count))
+
+ if threads_per_core_curr != threads_per_core:
+ module.warn(
+ "Unable to modify threads_per_core from {0} to {1}. "
+ "Assigning a number of threads per core is determined during instance creation.".format(
+ threads_per_core_curr, threads_per_core))
+
+
+def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None, ec2=None):
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ if subnet_id is not None:
+ try:
+ sub = ec2.describe_subnets(SubnetIds=[subnet_id])
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidGroup.NotFound':
+ module.fail_json(
+ "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format(
+ subnet_id
+ )
+ )
+ module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id))
+ parent_vpc_id = sub['Subnets'][0]['VpcId']
+
+ vpc = {
+ 'Name': 'vpc-id',
+ 'Values': [parent_vpc_id]
+ }
+
+ # because filter lists are AND in the security groups API,
+ # make two separate requests for groups by ID and by name
+ id_filters = [vpc]
+ name_filters = [vpc]
+
+ if group:
+ name_filters.append(
+ dict(
+ Name='group-name',
+ Values=[group]
+ )
+ )
+ if group.startswith('sg-'):
+ id_filters.append(
+ dict(
+ Name='group-id',
+ Values=[group]
+ )
+ )
+ if groups:
+ name_filters.append(
+ dict(
+ Name='group-name',
+ Values=groups
+ )
+ )
+ if [g for g in groups if g.startswith('sg-')]:
+ id_filters.append(
+ dict(
+ Name='group-id',
+ Values=[g for g in groups if g.startswith('sg-')]
+ )
+ )
+
+ found_groups = []
+ for f_set in (id_filters, name_filters):
+ if len(f_set) > 1:
+ found_groups.extend(ec2.get_paginator(
+ 'describe_security_groups'
+ ).paginate(
+ Filters=f_set
+ ).search('SecurityGroups[]'))
+ return list(dict((g['GroupId'], g) for g in found_groups).values())
+
+
+def build_top_level_options(params):
+ spec = {}
+ if params.get('image_id'):
+ spec['ImageId'] = params['image_id']
+ elif isinstance(params.get('image'), dict):
+ image = params.get('image', {})
+ spec['ImageId'] = image.get('id')
+ if 'ramdisk' in image:
+ spec['RamdiskId'] = image['ramdisk']
+ if 'kernel' in image:
+ spec['KernelId'] = image['kernel']
+ if not spec.get('ImageId') and not params.get('launch_template'):
+ module.fail_json(msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template.")
+
+ if params.get('key_name') is not None:
+ spec['KeyName'] = params.get('key_name')
+ if params.get('user_data') is not None:
+ spec['UserData'] = to_native(params.get('user_data'))
+ elif params.get('tower_callback') is not None:
+ spec['UserData'] = tower_callback_script(
+ tower_conf=params.get('tower_callback'),
+ windows=params.get('tower_callback').get('windows', False),
+ passwd=params.get('tower_callback').get('set_password'),
+ )
+
+ if params.get('launch_template') is not None:
+ spec['LaunchTemplate'] = {}
+ if not params.get('launch_template').get('id') or params.get('launch_template').get('name'):
+ module.fail_json(msg="Could not create instance with launch template. Either launch_template.name or launch_template.id parameters are required")
+
+ if params.get('launch_template').get('id') is not None:
+ spec['LaunchTemplate']['LaunchTemplateId'] = params.get('launch_template').get('id')
+ if params.get('launch_template').get('name') is not None:
+ spec['LaunchTemplate']['LaunchTemplateName'] = params.get('launch_template').get('name')
+ if params.get('launch_template').get('version') is not None:
+ spec['LaunchTemplate']['Version'] = to_native(params.get('launch_template').get('version'))
+
+ if params.get('detailed_monitoring', False):
+ spec['Monitoring'] = {'Enabled': True}
+ if params.get('cpu_credit_specification') is not None:
+ spec['CreditSpecification'] = {'CpuCredits': params.get('cpu_credit_specification')}
+ if params.get('tenancy') is not None:
+ spec['Placement'] = {'Tenancy': params.get('tenancy')}
+ if params.get('placement_group'):
+ if 'Placement' in spec:
+ spec['Placement']['GroupName'] = str(params.get('placement_group'))
+ else:
+ spec.setdefault('Placement', {'GroupName': str(params.get('placement_group'))})
+ if params.get('ebs_optimized') is not None:
+ spec['EbsOptimized'] = params.get('ebs_optimized')
+ if params.get('instance_initiated_shutdown_behavior'):
+ spec['InstanceInitiatedShutdownBehavior'] = params.get('instance_initiated_shutdown_behavior')
+ if params.get('termination_protection') is not None:
+ spec['DisableApiTermination'] = params.get('termination_protection')
+ if params.get('cpu_options') is not None:
+ spec['CpuOptions'] = {}
+ spec['CpuOptions']['ThreadsPerCore'] = params.get('cpu_options').get('threads_per_core')
+ spec['CpuOptions']['CoreCount'] = params.get('cpu_options').get('core_count')
+ return spec
+
+
+def build_instance_tags(params, propagate_tags_to_volumes=True):
+ tags = params.get('tags', {})
+ if params.get('name') is not None:
+ if tags is None:
+ tags = {}
+ tags['Name'] = params.get('name')
+ return [
+ {
+ 'ResourceType': 'volume',
+ 'Tags': ansible_dict_to_boto3_tag_list(tags),
+ },
+ {
+ 'ResourceType': 'instance',
+ 'Tags': ansible_dict_to_boto3_tag_list(tags),
+ },
+ ]
+
+
+def build_run_instance_spec(params, ec2=None):
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ spec = dict(
+ ClientToken=uuid.uuid4().hex,
+ MaxCount=1,
+ MinCount=1,
+ )
+ # network parameters
+ spec['NetworkInterfaces'] = build_network_spec(params, ec2)
+ spec['BlockDeviceMappings'] = build_volume_spec(params)
+ spec.update(**build_top_level_options(params))
+ spec['TagSpecifications'] = build_instance_tags(params)
+
+ # IAM profile
+ if params.get('instance_role'):
+ spec['IamInstanceProfile'] = dict(Arn=determine_iam_role(params.get('instance_role')))
+
+ spec['InstanceType'] = params['instance_type']
+ return spec
+
+
+def await_instances(ids, state='OK'):
+ if not module.params.get('wait', True):
+ # the user asked not to wait for anything
+ return
+
+ if module.check_mode:
+ # In check mode, there is no change even if you wait.
+ return
+
+ state_opts = {
+ 'OK': 'instance_status_ok',
+ 'STOPPED': 'instance_stopped',
+ 'TERMINATED': 'instance_terminated',
+ 'EXISTS': 'instance_exists',
+ 'RUNNING': 'instance_running',
+ }
+ if state not in state_opts:
+ module.fail_json(msg="Cannot wait for state {0}, invalid state".format(state))
+ waiter = module.client('ec2').get_waiter(state_opts[state])
+ try:
+ waiter.wait(
+ InstanceIds=ids,
+ WaiterConfig={
+ 'Delay': 15,
+ 'MaxAttempts': module.params.get('wait_timeout', 600) // 15,
+ }
+ )
+ except botocore.exceptions.WaiterConfigError as e:
+ module.fail_json(msg="{0}. Error waiting for instances {1} to reach state {2}".format(
+ to_native(e), ', '.join(ids), state))
+ except botocore.exceptions.WaiterError as e:
+ module.warn("Instances {0} took too long to reach state {1}. {2}".format(
+ ', '.join(ids), state, to_native(e)))
+
+
+def diff_instance_and_params(instance, params, ec2=None, skip=None):
+ """boto3 instance obj, module params"""
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ if skip is None:
+ skip = []
+
+ changes_to_apply = []
+ id_ = instance['InstanceId']
+
+ ParamMapper = namedtuple('ParamMapper', ['param_key', 'instance_key', 'attribute_name', 'add_value'])
+
+ def value_wrapper(v):
+ return {'Value': v}
+
+ param_mappings = [
+ ParamMapper('ebs_optimized', 'EbsOptimized', 'ebsOptimized', value_wrapper),
+ ParamMapper('termination_protection', 'DisableApiTermination', 'disableApiTermination', value_wrapper),
+ # user data is an immutable property
+ # ParamMapper('user_data', 'UserData', 'userData', value_wrapper),
+ ]
+
+ for mapping in param_mappings:
+ if params.get(mapping.param_key) is not None and mapping.instance_key not in skip:
+ value = AWSRetry.jittered_backoff()(ec2.describe_instance_attribute)(Attribute=mapping.attribute_name, InstanceId=id_)
+ if params.get(mapping.param_key) is not None and value[mapping.instance_key]['Value'] != params.get(mapping.param_key):
+ arguments = dict(
+ InstanceId=instance['InstanceId'],
+ # Attribute=mapping.attribute_name,
+ )
+ arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key))
+ changes_to_apply.append(arguments)
+
+ if (params.get('network') or {}).get('source_dest_check') is not None:
+ # network.source_dest_check is nested, so needs to be treated separately
+ check = bool(params.get('network').get('source_dest_check'))
+ if instance['SourceDestCheck'] != check:
+ changes_to_apply.append(dict(
+ InstanceId=instance['InstanceId'],
+ SourceDestCheck={'Value': check},
+ ))
+
+ return changes_to_apply
+
+
+def change_network_attachments(instance, params, ec2):
+ if (params.get('network') or {}).get('interfaces') is not None:
+ new_ids = []
+ for inty in params.get('network').get('interfaces'):
+ if isinstance(inty, dict) and 'id' in inty:
+ new_ids.append(inty['id'])
+ elif isinstance(inty, string_types):
+ new_ids.append(inty)
+ # network.interfaces can create the need to attach new interfaces
+ old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']]
+ to_attach = set(new_ids) - set(old_ids)
+ for eni_id in to_attach:
+ ec2.attach_network_interface(
+ DeviceIndex=new_ids.index(eni_id),
+ InstanceId=instance['InstanceId'],
+ NetworkInterfaceId=eni_id,
+ )
+ return bool(len(to_attach))
+ return False
+
+
+def find_instances(ec2, ids=None, filters=None):
+ paginator = ec2.get_paginator('describe_instances')
+ if ids:
+ return list(paginator.paginate(
+ InstanceIds=ids,
+ ).search('Reservations[].Instances[]'))
+ elif filters is None:
+ module.fail_json(msg="No filters provided when they were required")
+ elif filters is not None:
+ for key in list(filters.keys()):
+ if not key.startswith("tag:"):
+ filters[key.replace("_", "-")] = filters.pop(key)
+ return list(paginator.paginate(
+ Filters=ansible_dict_to_boto3_filter_list(filters)
+ ).search('Reservations[].Instances[]'))
+ return []
+
+
+@AWSRetry.jittered_backoff()
+def get_default_vpc(ec2):
+ vpcs = ec2.describe_vpcs(Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'}))
+ if len(vpcs.get('Vpcs', [])):
+ return vpcs.get('Vpcs')[0]
+ return None
+
+
+@AWSRetry.jittered_backoff()
+def get_default_subnet(ec2, vpc, availability_zone=None):
+ subnets = ec2.describe_subnets(
+ Filters=ansible_dict_to_boto3_filter_list({
+ 'vpc-id': vpc['VpcId'],
+ 'state': 'available',
+ 'default-for-az': 'true',
+ })
+ )
+ if len(subnets.get('Subnets', [])):
+ if availability_zone is not None:
+ subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets'))
+ if availability_zone in subs_by_az:
+ return subs_by_az[availability_zone]
+
+ # to have a deterministic sorting order, we sort by AZ so we'll always pick the `a` subnet first
+ # there can only be one default-for-az subnet per AZ, so the AZ key is always unique in this list
+ by_az = sorted(subnets.get('Subnets'), key=lambda s: s['AvailabilityZone'])
+ return by_az[0]
+ return None
+
+
+def ensure_instance_state(state, ec2=None):
+ if ec2 is None:
+ module.client('ec2')
+ if state in ('running', 'started'):
+ changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING')
+
+ if failed:
+ module.fail_json(
+ msg="Unable to start instances: {0}".format(failure_reason),
+ reboot_success=list(changed),
+ reboot_failed=failed)
+
+ module.exit_json(
+ msg='Instances started',
+ reboot_success=list(changed),
+ changed=bool(len(changed)),
+ reboot_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+ elif state in ('restarted', 'rebooted'):
+ changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_state='STOPPED')
+ changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_state='RUNNING')
+
+ if failed:
+ module.fail_json(
+ msg="Unable to restart instances: {0}".format(failure_reason),
+ reboot_success=list(changed),
+ reboot_failed=failed)
+
+ module.exit_json(
+ msg='Instances restarted',
+ reboot_success=list(changed),
+ changed=bool(len(changed)),
+ reboot_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+ elif state in ('stopped',):
+ changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_state='STOPPED')
+
+ if failed:
+ module.fail_json(
+ msg="Unable to stop instances: {0}".format(failure_reason),
+ stop_success=list(changed),
+ stop_failed=failed)
+
+ module.exit_json(
+ msg='Instances stopped',
+ stop_success=list(changed),
+ changed=bool(len(changed)),
+ stop_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+ elif state in ('absent', 'terminated'):
+ terminated, terminate_failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_state='TERMINATED')
+
+ if terminate_failed:
+ module.fail_json(
+ msg="Unable to terminate instances: {0}".format(failure_reason),
+ terminate_success=list(terminated),
+ terminate_failed=terminate_failed)
+ module.exit_json(
+ msg='Instances terminated',
+ terminate_success=list(terminated),
+ changed=bool(len(terminated)),
+ terminate_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+
+
+@AWSRetry.jittered_backoff()
+def change_instance_state(filters, desired_state, ec2=None):
+ """Takes STOPPED/RUNNING/TERMINATED"""
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ changed = set()
+ instances = find_instances(ec2, filters=filters)
+ to_change = set(i['InstanceId'] for i in instances if i['State']['Name'].upper() != desired_state)
+ unchanged = set()
+ failure_reason = ""
+
+ for inst in instances:
+ try:
+ if desired_state == 'TERMINATED':
+ if module.check_mode:
+ changed.add(inst['InstanceId'])
+ continue
+
+ # TODO use a client-token to prevent double-sends of these start/stop/terminate commands
+ # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html
+ resp = ec2.terminate_instances(InstanceIds=[inst['InstanceId']])
+ [changed.add(i['InstanceId']) for i in resp['TerminatingInstances']]
+ if desired_state == 'STOPPED':
+ if inst['State']['Name'] in ('stopping', 'stopped'):
+ unchanged.add(inst['InstanceId'])
+ continue
+
+ if module.check_mode:
+ changed.add(inst['InstanceId'])
+ continue
+
+ resp = ec2.stop_instances(InstanceIds=[inst['InstanceId']])
+ [changed.add(i['InstanceId']) for i in resp['StoppingInstances']]
+ if desired_state == 'RUNNING':
+ if module.check_mode:
+ changed.add(inst['InstanceId'])
+ continue
+
+ resp = ec2.start_instances(InstanceIds=[inst['InstanceId']])
+ [changed.add(i['InstanceId']) for i in resp['StartingInstances']]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ try:
+ failure_reason = to_native(e.message)
+ except AttributeError:
+ failure_reason = to_native(e)
+
+ if changed:
+ await_instances(ids=list(changed) + list(unchanged), state=desired_state)
+
+ change_failed = list(to_change - changed)
+ instances = find_instances(ec2, ids=list(i['InstanceId'] for i in instances))
+ return changed, change_failed, instances, failure_reason
+
+
+def pretty_instance(i):
+ instance = camel_dict_to_snake_dict(i, ignore_list=['Tags'])
+ instance['tags'] = boto3_tag_list_to_ansible_dict(i['Tags'])
+ return instance
+
+
+def determine_iam_role(name_or_arn):
+ if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn):
+ return name_or_arn
+ iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ try:
+ role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True)
+ return role['InstanceProfile']['Arn']
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn))
+ module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn))
+
+
+def handle_existing(existing_matches, changed, ec2, state):
+ if state in ('running', 'started') and [i for i in existing_matches if i['State']['Name'] != 'running']:
+ ins_changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING')
+ if failed:
+ module.fail_json(msg="Couldn't start instances: {0}. Failure reason: {1}".format(instances, failure_reason))
+ module.exit_json(
+ changed=bool(len(ins_changed)) or changed,
+ instances=[pretty_instance(i) for i in instances],
+ instance_ids=[i['InstanceId'] for i in instances],
+ )
+ changes = diff_instance_and_params(existing_matches[0], module.params)
+ for c in changes:
+ AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c)
+ changed |= bool(changes)
+ changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('instance_role'))
+ changed |= change_network_attachments(existing_matches[0], module.params, ec2)
+ altered = find_instances(ec2, ids=[i['InstanceId'] for i in existing_matches])
+ module.exit_json(
+ changed=bool(len(changes)) or changed,
+ instances=[pretty_instance(i) for i in altered],
+ instance_ids=[i['InstanceId'] for i in altered],
+ changes=changes,
+ )
+
+
+def ensure_present(existing_matches, changed, ec2, state):
+ if len(existing_matches):
+ try:
+ handle_existing(existing_matches, changed, ec2, state)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(
+ e, msg="Failed to handle existing instances {0}".format(', '.join([i['InstanceId'] for i in existing_matches])),
+ # instances=[pretty_instance(i) for i in existing_matches],
+ # instance_ids=[i['InstanceId'] for i in existing_matches],
+ )
+ try:
+ instance_spec = build_run_instance_spec(module.params)
+ # If check mode is enabled,suspend 'ensure function'.
+ if module.check_mode:
+ module.exit_json(
+ changed=True,
+ spec=instance_spec,
+ )
+ instance_response = run_instances(ec2, **instance_spec)
+ instances = instance_response['Instances']
+ instance_ids = [i['InstanceId'] for i in instances]
+
+ for ins in instances:
+ changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized'])
+ for c in changes:
+ try:
+ AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c)))
+
+ if not module.params.get('wait'):
+ module.exit_json(
+ changed=True,
+ instance_ids=instance_ids,
+ spec=instance_spec,
+ )
+ await_instances(instance_ids)
+ instances = ec2.get_paginator('describe_instances').paginate(
+ InstanceIds=instance_ids
+ ).search('Reservations[].Instances[]')
+
+ module.exit_json(
+ changed=True,
+ instances=[pretty_instance(i) for i in instances],
+ instance_ids=instance_ids,
+ spec=instance_spec,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to create new EC2 instance")
+
+
+@AWSRetry.jittered_backoff()
+def run_instances(ec2, **instance_spec):
+ try:
+ return ec2.run_instances(**instance_spec)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidParameterValue' and "Invalid IAM Instance Profile ARN" in e.response['Error']['Message']:
+ # If the instance profile has just been created, it takes some time to be visible by ec2
+ # So we wait 10 second and retry the run_instances
+ time.sleep(10)
+ return ec2.run_instances(**instance_spec)
+ else:
+ raise e
+
+
+def main():
+ global module
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'started', 'running', 'stopped', 'restarted', 'rebooted', 'terminated', 'absent']),
+ wait=dict(default=True, type='bool'),
+ wait_timeout=dict(default=600, type='int'),
+ # count=dict(default=1, type='int'),
+ image=dict(type='dict'),
+ image_id=dict(type='str'),
+ instance_type=dict(default='t2.micro', type='str'),
+ user_data=dict(type='str'),
+ tower_callback=dict(type='dict'),
+ ebs_optimized=dict(type='bool'),
+ vpc_subnet_id=dict(type='str', aliases=['subnet_id']),
+ availability_zone=dict(type='str'),
+ security_groups=dict(default=[], type='list'),
+ security_group=dict(type='str'),
+ instance_role=dict(type='str'),
+ name=dict(type='str'),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=False),
+ filters=dict(type='dict', default=None),
+ launch_template=dict(type='dict'),
+ key_name=dict(type='str'),
+ cpu_credit_specification=dict(type='str', choices=['standard', 'unlimited']),
+ cpu_options=dict(type='dict', options=dict(
+ core_count=dict(type='int', required=True),
+ threads_per_core=dict(type='int', choices=[1, 2], required=True)
+ )),
+ tenancy=dict(type='str', choices=['dedicated', 'default']),
+ placement_group=dict(type='str'),
+ instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']),
+ termination_protection=dict(type='bool'),
+ detailed_monitoring=dict(type='bool'),
+ instance_ids=dict(default=[], type='list'),
+ network=dict(default=None, type='dict'),
+ volumes=dict(default=None, type='list'),
+ )
+ # running/present are synonyms
+ # as are terminated/absent
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['security_groups', 'security_group'],
+ ['availability_zone', 'vpc_subnet_id'],
+ ['tower_callback', 'user_data'],
+ ['image_id', 'image'],
+ ],
+ supports_check_mode=True
+ )
+
+ if module.params.get('network'):
+ if module.params.get('network').get('interfaces'):
+ if module.params.get('security_group'):
+ module.fail_json(msg="Parameter network.interfaces can't be used with security_group")
+ if module.params.get('security_groups'):
+ module.fail_json(msg="Parameter network.interfaces can't be used with security_groups")
+
+ state = module.params.get('state')
+ ec2 = module.client('ec2')
+ if module.params.get('filters') is None:
+ filters = {
+ # all states except shutting-down and terminated
+ 'instance-state-name': ['pending', 'running', 'stopping', 'stopped']
+ }
+ if state == 'stopped':
+ # only need to change instances that aren't already stopped
+ filters['instance-state-name'] = ['stopping', 'pending', 'running']
+
+ if isinstance(module.params.get('instance_ids'), string_types):
+ filters['instance-id'] = [module.params.get('instance_ids')]
+ elif isinstance(module.params.get('instance_ids'), list) and len(module.params.get('instance_ids')):
+ filters['instance-id'] = module.params.get('instance_ids')
+ else:
+ if not module.params.get('vpc_subnet_id'):
+ if module.params.get('network'):
+ # grab AZ from one of the ENIs
+ ints = module.params.get('network').get('interfaces')
+ if ints:
+ filters['network-interface.network-interface-id'] = []
+ for i in ints:
+ if isinstance(i, dict):
+ i = i['id']
+ filters['network-interface.network-interface-id'].append(i)
+ else:
+ sub = get_default_subnet(ec2, get_default_vpc(ec2), availability_zone=module.params.get('availability_zone'))
+ filters['subnet-id'] = sub['SubnetId']
+ else:
+ filters['subnet-id'] = [module.params.get('vpc_subnet_id')]
+
+ if module.params.get('name'):
+ filters['tag:Name'] = [module.params.get('name')]
+
+ if module.params.get('image_id'):
+ filters['image-id'] = [module.params.get('image_id')]
+ elif (module.params.get('image') or {}).get('id'):
+ filters['image-id'] = [module.params.get('image', {}).get('id')]
+
+ module.params['filters'] = filters
+
+ if module.params.get('cpu_options') and not module.botocore_at_least('1.10.16'):
+ module.fail_json(msg="cpu_options is only supported with botocore >= 1.10.16")
+
+ existing_matches = find_instances(ec2, filters=module.params.get('filters'))
+ changed = False
+
+ if state not in ('terminated', 'absent') and existing_matches:
+ for match in existing_matches:
+ warn_if_public_ip_assignment_changed(match)
+ warn_if_cpu_options_changed(match)
+ tags = module.params.get('tags') or {}
+ name = module.params.get('name')
+ if name:
+ tags['Name'] = name
+ changed |= manage_tags(match, tags, module.params.get('purge_tags', False), ec2)
+
+ if state in ('present', 'running', 'started'):
+ ensure_present(existing_matches=existing_matches, changed=changed, ec2=ec2, state=state)
+ elif state in ('restarted', 'rebooted', 'stopped', 'absent', 'terminated'):
+ if existing_matches:
+ ensure_instance_state(state, ec2)
+ else:
+ module.exit_json(
+ msg='No matching instances found',
+ changed=False,
+ instances=[],
+ )
+ else:
+ module.fail_json(msg="We don't handle the state {0}".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_instance_info.py b/test/support/integration/plugins/modules/ec2_instance_info.py
new file mode 100644
index 0000000000..7615b958d3
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_instance_info.py
@@ -0,0 +1,571 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: ec2_instance_info
+short_description: Gather information about ec2 instances in AWS
+description:
+ - Gather information about ec2 instances in AWS
+ - This module was called C(ec2_instance_facts) before Ansible 2.9. The usage did not change.
+version_added: "2.4"
+author:
+ - Michael Schuett (@michaeljs1990)
+ - Rob White (@wimnat)
+requirements: [ "boto3", "botocore" ]
+options:
+ instance_ids:
+ description:
+ - If you specify one or more instance IDs, only instances that have the specified IDs are returned.
+ required: false
+ version_added: 2.4
+ type: list
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters. Filter
+ names and values are case sensitive.
+ required: false
+ default: {}
+ type: dict
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all instances
+- ec2_instance_info:
+
+# Gather information about all instances in AZ ap-southeast-2a
+- ec2_instance_info:
+ filters:
+ availability-zone: ap-southeast-2a
+
+# Gather information about a particular instance using ID
+- ec2_instance_info:
+ instance_ids:
+ - i-12345678
+
+# Gather information about any instance with a tag key Name and value Example
+- ec2_instance_info:
+ filters:
+ "tag:Name": Example
+
+# Gather information about any instance in states "shutting-down", "stopping", "stopped"
+- ec2_instance_info:
+ filters:
+ instance-state-name: [ "shutting-down", "stopping", "stopped" ]
+
+'''
+
+RETURN = '''
+instances:
+ description: a list of ec2 instances
+ returned: always
+ type: complex
+ contains:
+ ami_launch_index:
+ description: The AMI launch index, which can be used to find this instance in the launch group.
+ returned: always
+ type: int
+ sample: 0
+ architecture:
+ description: The architecture of the image
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/sdh or xvdh).
+ returned: always
+ type: str
+ sample: /dev/sdh
+ ebs:
+ description: Parameters used to automatically set up EBS volumes when the instance is launched.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ volume_id:
+ description: The ID of the EBS volume
+ returned: always
+ type: str
+ sample: vol-12345678
+ cpu_options:
+ description: The CPU options set for the instance.
+ returned: always if botocore version >= 1.10.16
+ type: complex
+ contains:
+ core_count:
+ description: The number of CPU cores for the instance.
+ returned: always
+ type: int
+ sample: 1
+ threads_per_core:
+ description: The number of threads per CPU core. On supported instance, a value of 1 means Intel Hyper-Threading Technology is disabled.
+ returned: always
+ type: int
+ sample: 1
+ client_token:
+ description: The idempotency token you provided when you launched the instance, if applicable.
+ returned: always
+ type: str
+ sample: mytoken
+ ebs_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ hypervisor:
+ description: The hypervisor type of the instance.
+ returned: always
+ type: str
+ sample: xen
+ iam_instance_profile:
+ description: The IAM instance profile associated with the instance, if applicable.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: The Amazon Resource Name (ARN) of the instance profile.
+ returned: always
+ type: str
+ sample: "arn:aws:iam::000012345678:instance-profile/myprofile"
+ id:
+ description: The ID of the instance profile
+ returned: always
+ type: str
+ sample: JFJ397FDG400FG9FD1N
+ image_id:
+ description: The ID of the AMI used to launch the instance.
+ returned: always
+ type: str
+ sample: ami-0011223344
+ instance_id:
+ description: The ID of the instance.
+ returned: always
+ type: str
+ sample: i-012345678
+ instance_type:
+ description: The instance type size of the running instance.
+ returned: always
+ type: str
+ sample: t2.micro
+ key_name:
+ description: The name of the key pair, if this instance was launched with an associated key pair.
+ returned: always
+ type: str
+ sample: my-key
+ launch_time:
+ description: The time the instance was launched.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ monitoring:
+ description: The monitoring for the instance.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
+ returned: always
+ type: str
+ sample: disabled
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: The association information for an Elastic IPv4 associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ attachment:
+ description: The network interface attachment.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ attachment_id:
+ description: The ID of the network interface attachment.
+ returned: always
+ type: str
+ sample: eni-attach-3aff3f
+ delete_on_termination:
+ description: Indicates whether the network interface is deleted when the instance is terminated.
+ returned: always
+ type: bool
+ sample: true
+ device_index:
+ description: The index of the device on the instance for the network interface attachment.
+ returned: always
+ type: int
+ sample: 0
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ description:
+ description: The description.
+ returned: always
+ type: str
+ sample: My interface
+ groups:
+ description: One or more security groups.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-abcdef12
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: mygroup
+ ipv6_addresses:
+ description: One or more IPv6 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ipv6_address:
+ description: The IPv6 address.
+ returned: always
+ type: str
+ sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ owner_id:
+ description: The AWS account ID of the owner of the network interface.
+ returned: always
+ type: str
+ sample: 01234567890
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ private_ip_addresses:
+ description: The private IPv4 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ association:
+ description: The association information for an Elastic IP address (IPv4) associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ primary:
+ description: Indicates whether this IPv4 address is the primary private IP address of the network interface.
+ returned: always
+ type: bool
+ sample: true
+ private_ip_address:
+ description: The private IPv4 address of the network interface.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The status of the network interface.
+ returned: always
+ type: str
+ sample: in-use
+ subnet_id:
+ description: The ID of the subnet for the network interface.
+ returned: always
+ type: str
+ sample: subnet-0123456
+ vpc_id:
+ description: The ID of the VPC for the network interface.
+ returned: always
+ type: str
+ sample: vpc-0123456
+ placement:
+ description: The location where the instance launched, if applicable.
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The Availability Zone of the instance.
+ returned: always
+ type: str
+ sample: ap-southeast-2a
+ group_name:
+ description: The name of the placement group the instance is in (for cluster compute instances).
+ returned: always
+ type: str
+ sample: ""
+ tenancy:
+ description: The tenancy of the instance (if the instance is running in a VPC).
+ returned: always
+ type: str
+ sample: default
+ private_dns_name:
+ description: The private DNS name.
+ returned: always
+ type: str
+ sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ product_codes:
+ description: One or more product codes.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ product_code_id:
+ description: The product code.
+ returned: always
+ type: str
+ sample: aw0evgkw8ef3n2498gndfgasdfsd5cce
+ product_code_type:
+ description: The type of product code.
+ returned: always
+ type: str
+ sample: marketplace
+ public_dns_name:
+ description: The public DNS name assigned to the instance.
+ returned: always
+ type: str
+ sample:
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance
+ returned: always
+ type: str
+ sample: 52.0.0.1
+ root_device_name:
+ description: The device name of the root device
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ state:
+ description: The current state of the instance.
+ returned: always
+ type: complex
+ contains:
+ code:
+ description: The low byte represents the state.
+ returned: always
+ type: int
+ sample: 16
+ name:
+ description: The name of the state.
+ returned: always
+ type: str
+ sample: running
+ state_transition_reason:
+ description: The reason for the most recent state transition.
+ returned: always
+ type: str
+ sample:
+ subnet_id:
+ description: The ID of the subnet in which the instance is running.
+ returned: always
+ type: str
+ sample: subnet-00abcdef
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: dict
+ sample: vpc-0011223344
+'''
+
+import traceback
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
+ ec2_argument_spec, get_aws_connection_info)
+
+
+def list_ec2_instances(connection, module):
+
+ instance_ids = module.params.get("instance_ids")
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ reservations_paginator = connection.get_paginator('describe_instances')
+ reservations = reservations_paginator.paginate(InstanceIds=instance_ids, Filters=filters).build_full_result()
+ except ClientError as e:
+ module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+
+ # Get instances from reservations
+ instances = []
+ for reservation in reservations['Reservations']:
+ instances = instances + reservation['Instances']
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances]
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for instance in snaked_instances:
+ instance['tags'] = boto3_tag_list_to_ansible_dict(instance.get('tags', []), 'key', 'value')
+
+ module.exit_json(instances=snaked_instances)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ instance_ids=dict(default=[], type='list'),
+ filters=dict(default={}, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['instance_ids', 'filters']
+ ],
+ supports_check_mode=True
+ )
+ if module._name == 'ec2_instance_facts':
+ module.deprecate("The 'ec2_instance_facts' module has been renamed to 'ec2_instance_info'", version='2.13')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_instances(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_key.py b/test/support/integration/plugins/modules/ec2_key.py
new file mode 100644
index 0000000000..de67af8bc0
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_key.py
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_key
+version_added: "1.5"
+short_description: create or delete an ec2 key pair
+description:
+ - create or delete an ec2 key pair.
+options:
+ name:
+ description:
+ - Name of the key pair.
+ required: true
+ type: str
+ key_material:
+ description:
+ - Public key material.
+ required: false
+ type: str
+ force:
+ description:
+ - Force overwrite of already existing key pair if key has changed.
+ required: false
+ default: true
+ type: bool
+ version_added: "2.3"
+ state:
+ description:
+ - create or delete keypair
+ required: false
+ choices: [ present, absent ]
+ default: 'present'
+ type: str
+ wait:
+ description:
+ - This option has no effect since version 2.5 and will be removed in 2.14.
+ version_added: "1.6"
+ type: bool
+ wait_timeout:
+ description:
+ - This option has no effect since version 2.5 and will be removed in 2.14.
+ version_added: "1.6"
+ type: int
+ required: false
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements: [ boto3 ]
+author:
+ - "Vincent Viallet (@zbal)"
+ - "Prasad Katti (@prasadkatti)"
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: create a new ec2 key pair, returns generated private key
+ ec2_key:
+ name: my_keypair
+
+- name: create key pair using provided key_material
+ ec2_key:
+ name: my_keypair
+ key_material: 'ssh-rsa AAAAxyz...== me@example.com'
+
+- name: create key pair using key_material obtained using 'file' lookup plugin
+ ec2_key:
+ name: my_keypair
+ key_material: "{{ lookup('file', '/path/to/public_key/id_rsa.pub') }}"
+
+# try creating a key pair with the name of an already existing keypair
+# but don't overwrite it even if the key is different (force=false)
+- name: try creating a key pair with name of an already existing keypair
+ ec2_key:
+ name: my_existing_keypair
+ key_material: 'ssh-rsa AAAAxyz...== me@example.com'
+ force: false
+
+- name: remove key pair by name
+ ec2_key:
+ name: my_keypair
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: whether a keypair was created/deleted
+ returned: always
+ type: bool
+ sample: true
+msg:
+ description: short message describing the action taken
+ returned: always
+ type: str
+ sample: key pair created
+key:
+ description: details of the keypair (this is set to null when state is absent)
+ returned: always
+ type: complex
+ contains:
+ fingerprint:
+ description: fingerprint of the key
+ returned: when state is present
+ type: str
+ sample: 'b0:22:49:61:d9:44:9d:0c:7e:ac:8a:32:93:21:6c:e8:fb:59:62:43'
+ name:
+ description: name of the keypair
+ returned: when state is present
+ type: str
+ sample: my_keypair
+ private_key:
+ description: private key of a newly created keypair
+ returned: when a new keypair is created by AWS (key_material is not provided)
+ type: str
+ sample: '-----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKC...
+ -----END RSA PRIVATE KEY-----'
+'''
+
+import uuid
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils._text import to_bytes
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def extract_key_data(key):
+
+ data = {
+ 'name': key['KeyName'],
+ 'fingerprint': key['KeyFingerprint']
+ }
+ if 'KeyMaterial' in key:
+ data['private_key'] = key['KeyMaterial']
+ return data
+
+
+def get_key_fingerprint(module, ec2_client, key_material):
+ '''
+ EC2's fingerprints are non-trivial to generate, so push this key
+ to a temporary name and make ec2 calculate the fingerprint for us.
+ http://blog.jbrowne.com/?p=23
+ https://forums.aws.amazon.com/thread.jspa?messageID=352828
+ '''
+
+ # find an unused name
+ name_in_use = True
+ while name_in_use:
+ random_name = "ansible-" + str(uuid.uuid4())
+ name_in_use = find_key_pair(module, ec2_client, random_name)
+
+ temp_key = import_key_pair(module, ec2_client, random_name, key_material)
+ delete_key_pair(module, ec2_client, random_name, finish_task=False)
+ return temp_key['KeyFingerprint']
+
+
+def find_key_pair(module, ec2_client, name):
+
+ try:
+ key = ec2_client.describe_key_pairs(KeyNames=[name])['KeyPairs'][0]
+ except ClientError as err:
+ if err.response['Error']['Code'] == "InvalidKeyPair.NotFound":
+ return None
+ module.fail_json_aws(err, msg="error finding keypair")
+ except IndexError:
+ key = None
+ return key
+
+
+def create_key_pair(module, ec2_client, name, key_material, force):
+
+ key = find_key_pair(module, ec2_client, name)
+ if key:
+ if key_material and force:
+ if not module.check_mode:
+ new_fingerprint = get_key_fingerprint(module, ec2_client, key_material)
+ if key['KeyFingerprint'] != new_fingerprint:
+ delete_key_pair(module, ec2_client, name, finish_task=False)
+ key = import_key_pair(module, ec2_client, name, key_material)
+ key_data = extract_key_data(key)
+ module.exit_json(changed=True, key=key_data, msg="key pair updated")
+ else:
+ # Assume a change will be made in check mode since a comparison can't be done
+ module.exit_json(changed=True, key=extract_key_data(key), msg="key pair updated")
+ key_data = extract_key_data(key)
+ module.exit_json(changed=False, key=key_data, msg="key pair already exists")
+ else:
+ # key doesn't exist, create it now
+ key_data = None
+ if not module.check_mode:
+ if key_material:
+ key = import_key_pair(module, ec2_client, name, key_material)
+ else:
+ try:
+ key = ec2_client.create_key_pair(KeyName=name)
+ except ClientError as err:
+ module.fail_json_aws(err, msg="error creating key")
+ key_data = extract_key_data(key)
+ module.exit_json(changed=True, key=key_data, msg="key pair created")
+
+
+def import_key_pair(module, ec2_client, name, key_material):
+
+ try:
+ key = ec2_client.import_key_pair(KeyName=name, PublicKeyMaterial=to_bytes(key_material))
+ except ClientError as err:
+ module.fail_json_aws(err, msg="error importing key")
+ return key
+
+
+def delete_key_pair(module, ec2_client, name, finish_task=True):
+
+ key = find_key_pair(module, ec2_client, name)
+ if key:
+ if not module.check_mode:
+ try:
+ ec2_client.delete_key_pair(KeyName=name)
+ except ClientError as err:
+ module.fail_json_aws(err, msg="error deleting key")
+ if not finish_task:
+ return
+ module.exit_json(changed=True, key=None, msg="key deleted")
+ module.exit_json(key=None, msg="key did not exist")
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True),
+ key_material=dict(),
+ force=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', removed_in_version='2.14'),
+ wait_timeout=dict(type='int', removed_in_version='2.14')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ ec2_client = module.client('ec2')
+
+ name = module.params['name']
+ state = module.params.get('state')
+ key_material = module.params.get('key_material')
+ force = module.params.get('force')
+
+ if state == 'absent':
+ delete_key_pair(module, ec2_client, name)
+ elif state == 'present':
+ create_key_pair(module, ec2_client, name, key_material, force)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_vpc_igw.py b/test/support/integration/plugins/modules/ec2_vpc_igw.py
new file mode 100644
index 0000000000..5198527af7
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_vpc_igw.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_igw
+short_description: Manage an AWS VPC Internet gateway
+description:
+ - Manage an AWS VPC Internet gateway
+version_added: "2.0"
+author: Robert Estelle (@erydo)
+options:
+ vpc_id:
+ description:
+ - The VPC ID for the VPC in which to manage the Internet Gateway.
+ required: true
+ type: str
+ tags:
+ description:
+ - "A dict of tags to apply to the internet gateway. Any tags currently applied to the internet gateway and not present here will be removed."
+ aliases: [ 'resource_tags' ]
+ version_added: "2.4"
+ type: dict
+ state:
+ description:
+ - Create or terminate the IGW
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements:
+ - botocore
+ - boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Ensure that the VPC has an Internet Gateway.
+# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
+ec2_vpc_igw:
+ vpc_id: vpc-abcdefgh
+ state: present
+register: igw
+
+'''
+
+RETURN = '''
+changed:
+ description: If any changes have been made to the Internet Gateway.
+ type: bool
+ returned: always
+ sample:
+ changed: false
+gateway_id:
+ description: The unique identifier for the Internet Gateway.
+ type: str
+ returned: I(state=present)
+ sample:
+ gateway_id: "igw-XXXXXXXX"
+tags:
+ description: The tags associated the Internet Gateway.
+ type: dict
+ returned: I(state=present)
+ sample:
+ tags:
+ "Ansible": "Test"
+vpc_id:
+ description: The VPC ID associated with the Internet Gateway.
+ type: str
+ returned: I(state=present)
+ sample:
+ vpc_id: "vpc-XXXXXXXX"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.aws.waiters import get_waiter
+from ansible.module_utils.ec2 import (
+ AWSRetry,
+ camel_dict_to_snake_dict,
+ boto3_tag_list_to_ansible_dict,
+ ansible_dict_to_boto3_filter_list,
+ ansible_dict_to_boto3_tag_list,
+ compare_aws_tags
+)
+from ansible.module_utils.six import string_types
+
+
+class AnsibleEc2Igw(object):
+
+ def __init__(self, module, results):
+ self._module = module
+ self._results = results
+ self._connection = self._module.client('ec2')
+ self._check_mode = self._module.check_mode
+
+ def process(self):
+ vpc_id = self._module.params.get('vpc_id')
+ state = self._module.params.get('state', 'present')
+ tags = self._module.params.get('tags')
+
+ if state == 'present':
+ self.ensure_igw_present(vpc_id, tags)
+ elif state == 'absent':
+ self.ensure_igw_absent(vpc_id)
+
+ def get_matching_igw(self, vpc_id):
+ filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id})
+ igws = []
+ try:
+ response = self._connection.describe_internet_gateways(Filters=filters)
+ igws = response.get('InternetGateways', [])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e)
+
+ igw = None
+ if len(igws) > 1:
+ self._module.fail_json(
+ msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting'.format(vpc_id))
+ elif igws:
+ igw = camel_dict_to_snake_dict(igws[0])
+
+ return igw
+
+ def check_input_tags(self, tags):
+ nonstring_tags = [k for k, v in tags.items() if not isinstance(v, string_types)]
+ if nonstring_tags:
+ self._module.fail_json(msg='One or more tags contain non-string values: {0}'.format(nonstring_tags))
+
+ def ensure_tags(self, igw_id, tags, add_only):
+ final_tags = []
+
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': igw_id, 'resource-type': 'internet-gateway'})
+ cur_tags = None
+ try:
+ cur_tags = self._connection.describe_tags(Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ purge_tags = bool(not add_only)
+ to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
+ final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags'))
+
+ if to_update:
+ try:
+ if self._check_mode:
+ # update tags
+ final_tags.update(to_update)
+ else:
+ AWSRetry.exponential_backoff()(self._connection.create_tags)(
+ Resources=[igw_id],
+ Tags=ansible_dict_to_boto3_tag_list(to_update)
+ )
+
+ self._results['changed'] = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't create tags")
+
+ if to_delete:
+ try:
+ if self._check_mode:
+ # update tags
+ for key in to_delete:
+ del final_tags[key]
+ else:
+ tags_list = []
+ for key in to_delete:
+ tags_list.append({'Key': key})
+
+ AWSRetry.exponential_backoff()(self._connection.delete_tags)(Resources=[igw_id], Tags=tags_list)
+
+ self._results['changed'] = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't delete tags")
+
+ if not self._check_mode and (to_update or to_delete):
+ try:
+ response = self._connection.describe_tags(Filters=filters)
+ final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ return final_tags
+
+ @staticmethod
+ def get_igw_info(igw):
+ return {
+ 'gateway_id': igw['internet_gateway_id'],
+ 'tags': igw['tags'],
+ 'vpc_id': igw['vpc_id']
+ }
+
+ def ensure_igw_absent(self, vpc_id):
+ igw = self.get_matching_igw(vpc_id)
+ if igw is None:
+ return self._results
+
+ if self._check_mode:
+ self._results['changed'] = True
+ return self._results
+
+ try:
+ self._results['changed'] = True
+ self._connection.detach_internet_gateway(InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id)
+ self._connection.delete_internet_gateway(InternetGatewayId=igw['internet_gateway_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Unable to delete Internet Gateway")
+
+ return self._results
+
+ def ensure_igw_present(self, vpc_id, tags):
+ self.check_input_tags(tags)
+
+ igw = self.get_matching_igw(vpc_id)
+
+ if igw is None:
+ if self._check_mode:
+ self._results['changed'] = True
+ self._results['gateway_id'] = None
+ return self._results
+
+ try:
+ response = self._connection.create_internet_gateway()
+
+ # Ensure the gateway exists before trying to attach it or add tags
+ waiter = get_waiter(self._connection, 'internet_gateway_exists')
+ waiter.wait(InternetGatewayIds=[response['InternetGateway']['InternetGatewayId']])
+
+ igw = camel_dict_to_snake_dict(response['InternetGateway'])
+ self._connection.attach_internet_gateway(InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id)
+ self._results['changed'] = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg='Unable to create Internet Gateway')
+
+ igw['vpc_id'] = vpc_id
+
+ igw['tags'] = self.ensure_tags(igw_id=igw['internet_gateway_id'], tags=tags, add_only=False)
+
+ igw_info = self.get_igw_info(igw)
+ self._results.update(igw_info)
+
+ return self._results
+
+
+def main():
+ argument_spec = dict(
+ vpc_id=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(default=dict(), required=False, type='dict', aliases=['resource_tags'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ results = dict(
+ changed=False
+ )
+ igw_manager = AnsibleEc2Igw(module=module, results=results)
+ igw_manager.process()
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_vpc_net.py b/test/support/integration/plugins/modules/ec2_vpc_net.py
new file mode 100644
index 0000000000..30e4b1e94c
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_vpc_net.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_net
+short_description: Configure AWS virtual private clouds
+description:
+ - Create, modify, and terminate AWS virtual private clouds.
+version_added: "2.0"
+author:
+ - Jonathan Davila (@defionscode)
+ - Sloane Hertel (@s-hertel)
+options:
+ name:
+ description:
+ - The name to give your VPC. This is used in combination with C(cidr_block) to determine if a VPC already exists.
+ required: yes
+ type: str
+ cidr_block:
+ description:
+ - The primary CIDR of the VPC. After 2.5 a list of CIDRs can be provided. The first in the list will be used as the primary CIDR
+ and is used in conjunction with the C(name) to ensure idempotence.
+ required: yes
+ type: list
+ elements: str
+ ipv6_cidr:
+ description:
+ - Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses,
+ or the size of the CIDR block.
+ default: False
+ type: bool
+ version_added: '2.10'
+ purge_cidrs:
+ description:
+ - Remove CIDRs that are associated with the VPC and are not specified in C(cidr_block).
+ default: no
+ type: bool
+ version_added: '2.5'
+ tenancy:
+ description:
+ - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
+ default: default
+ choices: [ 'default', 'dedicated' ]
+ type: str
+ dns_support:
+ description:
+ - Whether to enable AWS DNS support.
+ default: yes
+ type: bool
+ dns_hostnames:
+ description:
+ - Whether to enable AWS hostname support.
+ default: yes
+ type: bool
+ dhcp_opts_id:
+ description:
+ - The id of the DHCP options to use for this VPC.
+ type: str
+ tags:
+ description:
+ - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of
+ the VPC if it's different.
+ aliases: [ 'resource_tags' ]
+ type: dict
+ state:
+ description:
+ - The state of the VPC. Either absent or present.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ multi_ok:
+ description:
+ - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want
+ duplicate VPCs created.
+ type: bool
+ default: false
+requirements:
+ - boto3
+ - botocore
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: create a VPC with dedicated tenancy and a couple of tags
+ ec2_vpc_net:
+ name: Module_dev2
+ cidr_block: 10.10.0.0/16
+ region: us-east-1
+ tags:
+ module: ec2_vpc_net
+ this: works
+ tenancy: dedicated
+
+- name: create a VPC with dedicated tenancy and request an IPv6 CIDR
+ ec2_vpc_net:
+ name: Module_dev2
+ cidr_block: 10.10.0.0/16
+ ipv6_cidr: True
+ region: us-east-1
+ tenancy: dedicated
+'''
+
+RETURN = '''
+vpc:
+ description: info about the VPC that was created or deleted
+ returned: always
+ type: complex
+ contains:
+ cidr_block:
+ description: The CIDR of the VPC
+ returned: always
+ type: str
+ sample: 10.0.0.0/16
+ cidr_block_association_set:
+ description: IPv4 CIDR blocks associated with the VPC
+ returned: success
+ type: list
+ sample:
+ "cidr_block_association_set": [
+ {
+ "association_id": "vpc-cidr-assoc-97aeeefd",
+ "cidr_block": "20.0.0.0/24",
+ "cidr_block_state": {
+ "state": "associated"
+ }
+ }
+ ]
+ classic_link_enabled:
+ description: indicates whether ClassicLink is enabled
+ returned: always
+ type: bool
+ sample: false
+ dhcp_options_id:
+ description: the id of the DHCP options associated with this VPC
+ returned: always
+ type: str
+ sample: dopt-0fb8bd6b
+ id:
+ description: VPC resource id
+ returned: always
+ type: str
+ sample: vpc-c2e00da5
+ instance_tenancy:
+ description: indicates whether VPC uses default or dedicated tenancy
+ returned: always
+ type: str
+ sample: default
+ ipv6_cidr_block_association_set:
+ description: IPv6 CIDR blocks associated with the VPC
+ returned: success
+ type: list
+ sample:
+ "ipv6_cidr_block_association_set": [
+ {
+ "association_id": "vpc-cidr-assoc-97aeeefd",
+ "ipv6_cidr_block": "2001:db8::/56",
+ "ipv6_cidr_block_state": {
+ "state": "associated"
+ }
+ }
+ ]
+ is_default:
+ description: indicates whether this is the default VPC
+ returned: always
+ type: bool
+ sample: false
+ state:
+ description: state of the VPC
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: tags attached to the VPC, includes name
+ returned: always
+ type: complex
+ contains:
+ Name:
+ description: name tag for the VPC
+ returned: always
+ type: str
+ sample: pk_vpc4
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from time import sleep, time
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict, compare_aws_tags,
+ ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict)
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native
+from ansible.module_utils.network.common.utils import to_subnet
+
+
+def vpc_exists(module, vpc, name, cidr_block, multi):
+ """Returns None or a vpc object depending on the existence of a VPC. When supplied
+ with a CIDR, it will check for matching tags to determine if it is a match
+ otherwise it will assume the VPC does not exist and thus return None.
+ """
+ try:
+ matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': cidr_block}])['Vpcs']
+ # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block)
+ if not matching_vpcs:
+ matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': [cidr_block[0]]}])['Vpcs']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+
+ if multi:
+ return None
+ elif len(matching_vpcs) == 1:
+ return matching_vpcs[0]['VpcId']
+ elif len(matching_vpcs) > 1:
+ module.fail_json(msg='Currently there are %d VPCs that have the same name and '
+ 'CIDR block you specified. If you would like to create '
+ 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
+ return None
+
+
+@AWSRetry.backoff(delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound'])
+def get_classic_link_with_backoff(connection, vpc_id):
+ try:
+ return connection.describe_vpc_classic_link(VpcIds=[vpc_id])['Vpcs'][0].get('ClassicLinkEnabled')
+ except botocore.exceptions.ClientError as e:
+ if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
+ return False
+ else:
+ raise
+
+
+def get_vpc(module, connection, vpc_id):
+ # wait for vpc to be available
+ try:
+ connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id))
+
+ try:
+ vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+ try:
+ vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff(connection, vpc_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+
+ return vpc_obj
+
+
+def update_vpc_tags(connection, module, vpc_id, tags, name):
+ if tags is None:
+ tags = dict()
+
+ tags.update({'Name': name})
+ tags = dict((k, to_native(v)) for k, v in tags.items())
+ try:
+ current_tags = dict((t['Key'], t['Value']) for t in connection.describe_tags(Filters=[{'Name': 'resource-id', 'Values': [vpc_id]}])['Tags'])
+ tags_to_update, dummy = compare_aws_tags(current_tags, tags, False)
+ if tags_to_update:
+ if not module.check_mode:
+ tags = ansible_dict_to_boto3_tag_list(tags_to_update)
+ vpc_obj = connection.create_tags(Resources=[vpc_id], Tags=tags, aws_retry=True)
+
+ # Wait for tags to be updated
+ expected_tags = boto3_tag_list_to_ansible_dict(tags)
+ filters = [{'Name': 'tag:{0}'.format(key), 'Values': [value]} for key, value in expected_tags.items()]
+ connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters)
+
+ return True
+ else:
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update tags")
+
+
+def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
+ if vpc_obj['DhcpOptionsId'] != dhcp_id:
+ if not module.check_mode:
+ try:
+ connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id))
+
+ try:
+ # Wait for DhcpOptionsId to be updated
+ filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}]
+ connection.get_waiter('vpc_available').wait(VpcIds=[vpc_obj['VpcId']], Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Failed to wait for DhcpOptionsId to be updated")
+
+ return True
+ else:
+ return False
+
+
+def create_vpc(connection, module, cidr_block, tenancy):
+ try:
+ if not module.check_mode:
+ vpc_obj = connection.create_vpc(CidrBlock=cidr_block, InstanceTenancy=tenancy)
+ else:
+ module.exit_json(changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to create the VPC")
+
+ # wait for vpc to exist
+ try:
+ connection.get_waiter('vpc_exists').wait(VpcIds=[vpc_obj['Vpc']['VpcId']])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be created.".format(vpc_obj['Vpc']['VpcId']))
+
+ return vpc_obj['Vpc']['VpcId']
+
+
+def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value):
+ start_time = time()
+ updated = False
+ while time() < start_time + 300:
+ current_value = connection.describe_vpc_attribute(
+ Attribute=attribute,
+ VpcId=vpc_id
+ )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value']
+ if current_value != expected_value:
+ sleep(3)
+ else:
+ updated = True
+ break
+ if not updated:
+ module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute))
+
+
+def get_cidr_network_bits(module, cidr_block):
+ fixed_cidrs = []
+ for cidr in cidr_block:
+ split_addr = cidr.split('/')
+ if len(split_addr) == 2:
+ # this_ip is a IPv4 CIDR that may or may not have host bits set
+ # Get the network bits.
+ valid_cidr = to_subnet(split_addr[0], split_addr[1])
+ if cidr != valid_cidr:
+ module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr))
+ fixed_cidrs.append(valid_cidr)
+ else:
+ # let AWS handle invalid CIDRs
+ fixed_cidrs.append(cidr)
+ return fixed_cidrs
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ cidr_block=dict(type='list', required=True),
+ ipv6_cidr=dict(type='bool', default=False),
+ tenancy=dict(choices=['default', 'dedicated'], default='default'),
+ dns_support=dict(type='bool', default=True),
+ dns_hostnames=dict(type='bool', default=True),
+ dhcp_opts_id=dict(),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ multi_ok=dict(type='bool', default=False),
+ purge_cidrs=dict(type='bool', default=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params.get('name')
+ cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block'))
+ ipv6_cidr = module.params.get('ipv6_cidr')
+ purge_cidrs = module.params.get('purge_cidrs')
+ tenancy = module.params.get('tenancy')
+ dns_support = module.params.get('dns_support')
+ dns_hostnames = module.params.get('dns_hostnames')
+ dhcp_id = module.params.get('dhcp_opts_id')
+ tags = module.params.get('tags')
+ state = module.params.get('state')
+ multi = module.params.get('multi_ok')
+
+ changed = False
+
+ connection = module.client(
+ 'ec2',
+ retry_decorator=AWSRetry.jittered_backoff(
+ retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound']
+ )
+ )
+
+ if dns_hostnames and not dns_support:
+ module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support')
+
+ if state == 'present':
+
+ # Check if VPC exists
+ vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
+
+ if vpc_id is None:
+ vpc_id = create_vpc(connection, module, cidr_block[0], tenancy)
+ changed = True
+
+ vpc_obj = get_vpc(module, connection, vpc_id)
+
+ associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', [])
+ if cidr['CidrBlockState']['State'] != 'disassociated')
+ to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs]
+ to_remove = [associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block]
+ expected_cidrs = [cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove] + to_add
+
+ if len(cidr_block) > 1:
+ for cidr in to_add:
+ changed = True
+ try:
+ connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
+ if ipv6_cidr:
+ if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys():
+ module.warn("Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}".format(
+ vpc_id,
+ vpc_obj['Ipv6CidrBlockAssociationSet'][0]['Ipv6CidrBlock']))
+ else:
+ try:
+ connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
+
+ if purge_cidrs:
+ for association_id in to_remove:
+ changed = True
+ try:
+ connection.disassociate_vpc_cidr_block(AssociationId=association_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that "
+ "are associated with the CIDR block before you can disassociate it.".format(association_id))
+
+ if dhcp_id is not None:
+ try:
+ if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update DHCP options")
+
+ if tags is not None or name is not None:
+ try:
+ if update_vpc_tags(connection, module, vpc_id, tags, name):
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update tags")
+
+ current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value']
+ current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value']
+ if current_dns_enabled != dns_support:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update enabled dns support attribute")
+ if current_dns_hostnames != dns_hostnames:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute")
+
+ # wait for associated cidrs to match
+ if to_add or to_remove:
+ try:
+ connection.get_waiter('vpc_available').wait(
+ VpcIds=[vpc_id],
+ Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}]
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to wait for CIDRs to update")
+
+ # try to wait for enableDnsSupport and enableDnsHostnames to match
+ wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support)
+ wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames)
+
+ final_state = camel_dict_to_snake_dict(get_vpc(module, connection, vpc_id))
+ final_state['tags'] = boto3_tag_list_to_ansible_dict(final_state.get('tags', []))
+ final_state['id'] = final_state.pop('vpc_id')
+
+ module.exit_json(changed=changed, vpc=final_state)
+
+ elif state == 'absent':
+
+ # Check if VPC exists
+ vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
+
+ if vpc_id is not None:
+ try:
+ if not module.check_mode:
+ connection.delete_vpc(VpcId=vpc_id)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
+ "and/or ec2_vpc_route_table modules to ensure the other components are absent.".format(vpc_id))
+
+ module.exit_json(changed=changed, vpc={})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_vpc_route_table.py b/test/support/integration/plugins/modules/ec2_vpc_route_table.py
new file mode 100644
index 0000000000..96c9b2d04d
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_vpc_route_table.py
@@ -0,0 +1,750 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_route_table
+short_description: Manage route tables for AWS virtual private clouds
+description:
+ - Manage route tables for AWS virtual private clouds
+version_added: "2.0"
+author:
+- Robert Estelle (@erydo)
+- Rob White (@wimnat)
+- Will Thames (@willthames)
+options:
+ lookup:
+ description: Look up route table by either tags or by route table ID. Non-unique tag lookup will fail.
+ If no tags are specified then no lookup for an existing route table is performed and a new
+ route table will be created. To change tags of a route table you must look up by id.
+ default: tag
+ choices: [ 'tag', 'id' ]
+ type: str
+ propagating_vgw_ids:
+ description: Enable route propagation from virtual gateways specified by ID.
+ type: list
+ elements: str
+ purge_routes:
+ version_added: "2.3"
+ description: Purge existing routes that are not found in routes.
+ type: bool
+ default: 'yes'
+ purge_subnets:
+ version_added: "2.3"
+ description: Purge existing subnets that are not found in subnets. Ignored unless the subnets option is supplied.
+ default: 'true'
+ type: bool
+ purge_tags:
+ version_added: "2.5"
+ description: Purge existing tags that are not found in route table.
+ type: bool
+ default: 'no'
+ route_table_id:
+ description:
+ - The ID of the route table to update or delete.
+ - Required when I(lookup=id).
+ type: str
+ routes:
+ description: List of routes in the route table.
+ Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id',
+ 'instance_id', 'network_interface_id', or 'vpc_peering_connection_id'.
+ If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'.
+ Routes are required for present states.
+ type: list
+ elements: dict
+ state:
+ description: Create or destroy the VPC route table.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ subnets:
+ description: An array of subnets to add to this route table. Subnets may be specified
+ by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'.
+ type: list
+ elements: str
+ tags:
+ description: >
+ A dictionary of resource tags of the form: C({ tag1: value1, tag2: value2 }). Tags are
+ used to uniquely identify route tables within a VPC when the route_table_id is not supplied.
+ aliases: [ "resource_tags" ]
+ type: dict
+ vpc_id:
+ description:
+ - VPC ID of the VPC in which to create the route table.
+ - Required when I(state=present) or I(lookup=tag).
+ type: str
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic creation example:
+- name: Set up public subnet route table
+ ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ tags:
+ Name: Public
+ subnets:
+ - "{{ jumpbox_subnet.subnet.id }}"
+ - "{{ frontend_subnet.subnet.id }}"
+ - "{{ vpn_subnet.subnet_id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ register: public_route_table
+
+- name: Set up NAT-protected route table
+ ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ tags:
+ Name: Internal
+ subnets:
+ - "{{ application_subnet.subnet.id }}"
+ - 'Database Subnet'
+ - '10.0.0.0/8'
+ routes:
+ - dest: 0.0.0.0/0
+ instance_id: "{{ nat.instance_id }}"
+ register: nat_route_table
+
+- name: delete route table
+ ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ route_table_id: "{{ route_table.id }}"
+ lookup: id
+ state: absent
+'''
+
+RETURN = '''
+route_table:
+ description: Route Table result
+ returned: always
+ type: complex
+ contains:
+ associations:
+ description: List of subnets associated with the route table
+ returned: always
+ type: complex
+ contains:
+ main:
+ description: Whether this is the main route table
+ returned: always
+ type: bool
+ sample: false
+ route_table_association_id:
+ description: ID of association between route table and subnet
+ returned: always
+ type: str
+ sample: rtbassoc-ab47cfc3
+ route_table_id:
+ description: ID of the route table
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ subnet_id:
+ description: ID of the subnet
+ returned: always
+ type: str
+ sample: subnet-82055af9
+ id:
+ description: ID of the route table (same as route_table_id for backwards compatibility)
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ propagating_vgws:
+ description: List of Virtual Private Gateways propagating routes
+ returned: always
+ type: list
+ sample: []
+ route_table_id:
+ description: ID of the route table
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ routes:
+ description: List of routes in the route table
+ returned: always
+ type: complex
+ contains:
+ destination_cidr_block:
+ description: CIDR block of destination
+ returned: always
+ type: str
+ sample: 10.228.228.0/22
+ gateway_id:
+ description: ID of the gateway
+ returned: when gateway is local or internet gateway
+ type: str
+ sample: local
+ instance_id:
+ description: ID of a NAT instance
+ returned: when the route is via an EC2 instance
+ type: str
+ sample: i-abcd123456789
+ instance_owner_id:
+ description: AWS account owning the NAT instance
+ returned: when the route is via an EC2 instance
+ type: str
+ sample: 123456789012
+ nat_gateway_id:
+ description: ID of the NAT gateway
+ returned: when the route is via a NAT gateway
+ type: str
+ sample: local
+ origin:
+ description: mechanism through which the route is in the table
+ returned: always
+ type: str
+ sample: CreateRouteTable
+ state:
+ description: state of the route
+ returned: always
+ type: str
+ sample: active
+ tags:
+ description: Tags applied to the route table
+ returned: always
+ type: dict
+ sample:
+ Name: Public route table
+ Public: 'true'
+ vpc_id:
+ description: ID for the VPC in which the route lives
+ returned: always
+ type: str
+ sample: vpc-6e2d2407
+'''
+
+import re
+from time import sleep
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.aws.waiters import get_waiter
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
+from ansible.module_utils.ec2 import compare_aws_tags, AWSRetry
+
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$')
+SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$')
+ROUTE_TABLE_RE = re.compile(r'^rtb-[A-z0-9]+$')
+
+
+@AWSRetry.exponential_backoff()
+def describe_subnets_with_backoff(connection, **params):
+ return connection.describe_subnets(**params)['Subnets']
+
+
+def find_subnets(connection, module, vpc_id, identified_subnets):
+ """
+ Finds a list of subnets, each identified either by a raw ID, a unique
+ 'Name' tag, or a CIDR such as 10.0.0.0/8.
+
+ Note that this function is duplicated in other ec2 modules, and should
+ potentially be moved into a shared module_utils
+ """
+ subnet_ids = []
+ subnet_names = []
+ subnet_cidrs = []
+ for subnet in (identified_subnets or []):
+ if re.match(SUBNET_RE, subnet):
+ subnet_ids.append(subnet)
+ elif re.match(CIDR_RE, subnet):
+ subnet_cidrs.append(subnet)
+ else:
+ subnet_names.append(subnet)
+
+ subnets_by_id = []
+ if subnet_ids:
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id})
+ try:
+ subnets_by_id = describe_subnets_with_backoff(connection, SubnetIds=subnet_ids, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't find subnet with id %s" % subnet_ids)
+
+ subnets_by_cidr = []
+ if subnet_cidrs:
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr': subnet_cidrs})
+ try:
+ subnets_by_cidr = describe_subnets_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't find subnet with cidr %s" % subnet_cidrs)
+
+ subnets_by_name = []
+ if subnet_names:
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'tag:Name': subnet_names})
+ try:
+ subnets_by_name = describe_subnets_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't find subnet with names %s" % subnet_names)
+
+ for name in subnet_names:
+ matching_count = len([1 for s in subnets_by_name for t in s.get('Tags', []) if t['Key'] == 'Name' and t['Value'] == name])
+ if matching_count == 0:
+ module.fail_json(msg='Subnet named "{0}" does not exist'.format(name))
+ elif matching_count > 1:
+ module.fail_json(msg='Multiple subnets named "{0}"'.format(name))
+
+ return subnets_by_id + subnets_by_cidr + subnets_by_name
+
+
+def find_igw(connection, module, vpc_id):
+ """
+ Finds the Internet gateway for the given VPC ID.
+ """
+ filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id})
+ try:
+ igw = connection.describe_internet_gateways(Filters=filters)['InternetGateways']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='No IGW found for VPC {0}'.format(vpc_id))
+ if len(igw) == 1:
+ return igw[0]['InternetGatewayId']
+ elif len(igw) == 0:
+ module.fail_json(msg='No IGWs found for VPC {0}'.format(vpc_id))
+ else:
+ module.fail_json(msg='Multiple IGWs found for VPC {0}'.format(vpc_id))
+
+
+@AWSRetry.exponential_backoff()
+def describe_tags_with_backoff(connection, resource_id):
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': resource_id})
+ paginator = connection.get_paginator('describe_tags')
+ tags = paginator.paginate(Filters=filters).build_full_result()['Tags']
+ return boto3_tag_list_to_ansible_dict(tags)
+
+
+def tags_match(match_tags, candidate_tags):
+ return all((k in candidate_tags and candidate_tags[k] == v
+ for k, v in match_tags.items()))
+
+
+def ensure_tags(connection=None, module=None, resource_id=None, tags=None, purge_tags=None, check_mode=None):
+ try:
+ cur_tags = describe_tags_with_backoff(connection, resource_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to list tags for VPC')
+
+ to_add, to_delete = compare_aws_tags(cur_tags, tags, purge_tags)
+
+ if not to_add and not to_delete:
+ return {'changed': False, 'tags': cur_tags}
+ if check_mode:
+ if not purge_tags:
+ tags = cur_tags.update(tags)
+ return {'changed': True, 'tags': tags}
+
+ if to_delete:
+ try:
+ connection.delete_tags(Resources=[resource_id], Tags=[{'Key': k} for k in to_delete])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete tags")
+ if to_add:
+ try:
+ connection.create_tags(Resources=[resource_id], Tags=ansible_dict_to_boto3_tag_list(to_add))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create tags")
+
+ try:
+ latest_tags = describe_tags_with_backoff(connection, resource_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to list tags for VPC')
+ return {'changed': True, 'tags': latest_tags}
+
+
+@AWSRetry.exponential_backoff()
+def describe_route_tables_with_backoff(connection, **params):
+ try:
+ return connection.describe_route_tables(**params)['RouteTables']
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidRouteTableID.NotFound':
+ return None
+ else:
+ raise
+
+
+def get_route_table_by_id(connection, module, route_table_id):
+
+ route_table = None
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, RouteTableIds=[route_table_id])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route table")
+ if route_tables:
+ route_table = route_tables[0]
+
+ return route_table
+
+
+def get_route_table_by_tags(connection, module, vpc_id, tags):
+ count = 0
+ route_table = None
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id})
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route table")
+ for table in route_tables:
+ this_tags = describe_tags_with_backoff(connection, table['RouteTableId'])
+ if tags_match(tags, this_tags):
+ route_table = table
+ count += 1
+
+ if count > 1:
+ module.fail_json(msg="Tags provided do not identify a unique route table")
+ else:
+ return route_table
+
+
+def route_spec_matches_route(route_spec, route):
+ if route_spec.get('GatewayId') and 'nat-' in route_spec['GatewayId']:
+ route_spec['NatGatewayId'] = route_spec.pop('GatewayId')
+ if route_spec.get('GatewayId') and 'vpce-' in route_spec['GatewayId']:
+ if route_spec.get('DestinationCidrBlock', '').startswith('pl-'):
+ route_spec['DestinationPrefixListId'] = route_spec.pop('DestinationCidrBlock')
+
+ return set(route_spec.items()).issubset(route.items())
+
+
+def route_spec_matches_route_cidr(route_spec, route):
+ return route_spec['DestinationCidrBlock'] == route.get('DestinationCidrBlock')
+
+
+def rename_key(d, old_key, new_key):
+ d[new_key] = d.pop(old_key)
+
+
+def index_of_matching_route(route_spec, routes_to_match):
+ for i, route in enumerate(routes_to_match):
+ if route_spec_matches_route(route_spec, route):
+ return "exact", i
+ elif 'Origin' in route_spec and route_spec['Origin'] != 'EnableVgwRoutePropagation':
+ if route_spec_matches_route_cidr(route_spec, route):
+ return "replace", i
+
+
+def ensure_routes(connection=None, module=None, route_table=None, route_specs=None,
+ propagating_vgw_ids=None, check_mode=None, purge_routes=None):
+ routes_to_match = [route for route in route_table['Routes']]
+ route_specs_to_create = []
+ route_specs_to_recreate = []
+ for route_spec in route_specs:
+ match = index_of_matching_route(route_spec, routes_to_match)
+ if match is None:
+ if route_spec.get('DestinationCidrBlock'):
+ route_specs_to_create.append(route_spec)
+ else:
+ module.warn("Skipping creating {0} because it has no destination cidr block. "
+ "To add VPC endpoints to route tables use the ec2_vpc_endpoint module.".format(route_spec))
+ else:
+ if match[0] == "replace":
+ if route_spec.get('DestinationCidrBlock'):
+ route_specs_to_recreate.append(route_spec)
+ else:
+ module.warn("Skipping recreating route {0} because it has no destination cidr block.".format(route_spec))
+ del routes_to_match[match[1]]
+
+ routes_to_delete = []
+ if purge_routes:
+ for r in routes_to_match:
+ if not r.get('DestinationCidrBlock'):
+ module.warn("Skipping purging route {0} because it has no destination cidr block. "
+ "To remove VPC endpoints from route tables use the ec2_vpc_endpoint module.".format(r))
+ continue
+ if r['Origin'] == 'CreateRoute':
+ routes_to_delete.append(r)
+
+ changed = bool(routes_to_delete or route_specs_to_create or route_specs_to_recreate)
+ if changed and not check_mode:
+ for route in routes_to_delete:
+ try:
+ connection.delete_route(RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=route['DestinationCidrBlock'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete route")
+
+ for route_spec in route_specs_to_recreate:
+ try:
+ connection.replace_route(RouteTableId=route_table['RouteTableId'],
+ **route_spec)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't recreate route")
+
+ for route_spec in route_specs_to_create:
+ try:
+ connection.create_route(RouteTableId=route_table['RouteTableId'],
+ **route_spec)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create route")
+
+ return {'changed': bool(changed)}
+
+
+def ensure_subnet_association(connection=None, module=None, vpc_id=None, route_table_id=None, subnet_id=None,
+ check_mode=None):
+ filters = ansible_dict_to_boto3_filter_list({'association.subnet-id': subnet_id, 'vpc-id': vpc_id})
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route tables")
+ for route_table in route_tables:
+ if route_table['RouteTableId'] is None:
+ continue
+ for a in route_table['Associations']:
+ if a['Main']:
+ continue
+ if a['SubnetId'] == subnet_id:
+ if route_table['RouteTableId'] == route_table_id:
+ return {'changed': False, 'association_id': a['RouteTableAssociationId']}
+ else:
+ if check_mode:
+ return {'changed': True}
+ try:
+ connection.disassociate_route_table(AssociationId=a['RouteTableAssociationId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table")
+
+ try:
+ association_id = connection.associate_route_table(RouteTableId=route_table_id, SubnetId=subnet_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't associate subnet with route table")
+ return {'changed': True, 'association_id': association_id}
+
+
+def ensure_subnet_associations(connection=None, module=None, route_table=None, subnets=None,
+ check_mode=None, purge_subnets=None):
+ current_association_ids = [a['RouteTableAssociationId'] for a in route_table['Associations'] if not a['Main']]
+ new_association_ids = []
+ changed = False
+ for subnet in subnets:
+ result = ensure_subnet_association(connection=connection, module=module, vpc_id=route_table['VpcId'],
+ route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId'], check_mode=check_mode)
+ changed = changed or result['changed']
+ if changed and check_mode:
+ return {'changed': True}
+ new_association_ids.append(result['association_id'])
+
+ if purge_subnets:
+ to_delete = [a_id for a_id in current_association_ids
+ if a_id not in new_association_ids]
+
+ for a_id in to_delete:
+ changed = True
+ if not check_mode:
+ try:
+ connection.disassociate_route_table(AssociationId=a_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table")
+
+ return {'changed': changed}
+
+
+def ensure_propagation(connection=None, module=None, route_table=None, propagating_vgw_ids=None,
+ check_mode=None):
+ changed = False
+ gateways = [gateway['GatewayId'] for gateway in route_table['PropagatingVgws']]
+ to_add = set(propagating_vgw_ids) - set(gateways)
+ if to_add:
+ changed = True
+ if not check_mode:
+ for vgw_id in to_add:
+ try:
+ connection.enable_vgw_route_propagation(RouteTableId=route_table['RouteTableId'],
+ GatewayId=vgw_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't enable route propagation")
+
+ return {'changed': changed}
+
+
+def ensure_route_table_absent(connection, module):
+
+ lookup = module.params.get('lookup')
+ route_table_id = module.params.get('route_table_id')
+ tags = module.params.get('tags')
+ vpc_id = module.params.get('vpc_id')
+ purge_subnets = module.params.get('purge_subnets')
+
+ if lookup == 'tag':
+ if tags is not None:
+ route_table = get_route_table_by_tags(connection, module, vpc_id, tags)
+ else:
+ route_table = None
+ elif lookup == 'id':
+ route_table = get_route_table_by_id(connection, module, route_table_id)
+
+ if route_table is None:
+ return {'changed': False}
+
+ # disassociate subnets before deleting route table
+ if not module.check_mode:
+ ensure_subnet_associations(connection=connection, module=module, route_table=route_table,
+ subnets=[], check_mode=False, purge_subnets=purge_subnets)
+ try:
+ connection.delete_route_table(RouteTableId=route_table['RouteTableId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error deleting route table")
+
+ return {'changed': True}
+
+
+def get_route_table_info(connection, module, route_table):
+ result = get_route_table_by_id(connection, module, route_table['RouteTableId'])
+ try:
+ result['Tags'] = describe_tags_with_backoff(connection, route_table['RouteTableId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get tags for route table")
+ result = camel_dict_to_snake_dict(result, ignore_list=['Tags'])
+ # backwards compatibility
+ result['id'] = result['route_table_id']
+ return result
+
+
+def create_route_spec(connection, module, vpc_id):
+ routes = module.params.get('routes')
+
+ for route_spec in routes:
+ rename_key(route_spec, 'dest', 'destination_cidr_block')
+
+ if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw':
+ igw = find_igw(connection, module, vpc_id)
+ route_spec['gateway_id'] = igw
+ if route_spec.get('gateway_id') and route_spec['gateway_id'].startswith('nat-'):
+ rename_key(route_spec, 'gateway_id', 'nat_gateway_id')
+
+ return snake_dict_to_camel_dict(routes, capitalize_first=True)
+
+
+def ensure_route_table_present(connection, module):
+
+ lookup = module.params.get('lookup')
+ propagating_vgw_ids = module.params.get('propagating_vgw_ids')
+ purge_routes = module.params.get('purge_routes')
+ purge_subnets = module.params.get('purge_subnets')
+ purge_tags = module.params.get('purge_tags')
+ route_table_id = module.params.get('route_table_id')
+ subnets = module.params.get('subnets')
+ tags = module.params.get('tags')
+ vpc_id = module.params.get('vpc_id')
+ routes = create_route_spec(connection, module, vpc_id)
+
+ changed = False
+ tags_valid = False
+
+ if lookup == 'tag':
+ if tags is not None:
+ try:
+ route_table = get_route_table_by_tags(connection, module, vpc_id, tags)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error finding route table with lookup 'tag'")
+ else:
+ route_table = None
+ elif lookup == 'id':
+ try:
+ route_table = get_route_table_by_id(connection, module, route_table_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error finding route table with lookup 'id'")
+
+ # If no route table returned then create new route table
+ if route_table is None:
+ changed = True
+ if not module.check_mode:
+ try:
+ route_table = connection.create_route_table(VpcId=vpc_id)['RouteTable']
+ # try to wait for route table to be present before moving on
+ get_waiter(
+ connection, 'route_table_exists'
+ ).wait(
+ RouteTableIds=[route_table['RouteTableId']],
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error creating route table")
+ else:
+ route_table = {"id": "rtb-xxxxxxxx", "route_table_id": "rtb-xxxxxxxx", "vpc_id": vpc_id}
+ module.exit_json(changed=changed, route_table=route_table)
+
+ if routes is not None:
+ result = ensure_routes(connection=connection, module=module, route_table=route_table,
+ route_specs=routes, propagating_vgw_ids=propagating_vgw_ids,
+ check_mode=module.check_mode, purge_routes=purge_routes)
+ changed = changed or result['changed']
+
+ if propagating_vgw_ids is not None:
+ result = ensure_propagation(connection=connection, module=module, route_table=route_table,
+ propagating_vgw_ids=propagating_vgw_ids, check_mode=module.check_mode)
+ changed = changed or result['changed']
+
+ if not tags_valid and tags is not None:
+ result = ensure_tags(connection=connection, module=module, resource_id=route_table['RouteTableId'], tags=tags,
+ purge_tags=purge_tags, check_mode=module.check_mode)
+ route_table['Tags'] = result['tags']
+ changed = changed or result['changed']
+
+ if subnets is not None:
+ associated_subnets = find_subnets(connection, module, vpc_id, subnets)
+
+ result = ensure_subnet_associations(connection=connection, module=module, route_table=route_table,
+ subnets=associated_subnets, check_mode=module.check_mode,
+ purge_subnets=purge_subnets)
+ changed = changed or result['changed']
+
+ if changed:
+ # pause to allow route table routes/subnets/associations to be updated before exiting with final state
+ sleep(5)
+ module.exit_json(changed=changed, route_table=get_route_table_info(connection, module, route_table))
+
+
+def main():
+ argument_spec = dict(
+ lookup=dict(default='tag', choices=['tag', 'id']),
+ propagating_vgw_ids=dict(type='list'),
+ purge_routes=dict(default=True, type='bool'),
+ purge_subnets=dict(default=True, type='bool'),
+ purge_tags=dict(default=False, type='bool'),
+ route_table_id=dict(),
+ routes=dict(default=[], type='list'),
+ state=dict(default='present', choices=['present', 'absent']),
+ subnets=dict(type='list'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ vpc_id=dict()
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['lookup', 'id', ['route_table_id']],
+ ['lookup', 'tag', ['vpc_id']],
+ ['state', 'present', ['vpc_id']]],
+ supports_check_mode=True)
+
+ connection = module.client('ec2')
+
+ state = module.params.get('state')
+
+ if state == 'present':
+ result = ensure_route_table_present(connection, module)
+ elif state == 'absent':
+ result = ensure_route_table_absent(connection, module)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_vpc_subnet.py b/test/support/integration/plugins/modules/ec2_vpc_subnet.py
new file mode 100644
index 0000000000..5085e99b79
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_vpc_subnet.py
@@ -0,0 +1,604 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_subnet
+short_description: Manage subnets in AWS virtual private clouds
+description:
+ - Manage subnets in AWS virtual private clouds.
+version_added: "2.0"
+author:
+- Robert Estelle (@erydo)
+- Brad Davidson (@brandond)
+requirements: [ boto3 ]
+options:
+ az:
+ description:
+ - "The availability zone for the subnet."
+ type: str
+ cidr:
+ description:
+ - "The CIDR block for the subnet. E.g. 192.0.2.0/24."
+ type: str
+ required: true
+ ipv6_cidr:
+ description:
+ - "The IPv6 CIDR block for the subnet. The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range."
+ - "Required if I(assign_instances_ipv6=true)"
+ version_added: "2.5"
+ type: str
+ tags:
+ description:
+ - "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
+ aliases: [ 'resource_tags' ]
+ type: dict
+ state:
+ description:
+ - "Create or remove the subnet."
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ vpc_id:
+ description:
+ - "VPC ID of the VPC in which to create or delete the subnet."
+ required: true
+ type: str
+ map_public:
+ description:
+ - "Specify C(yes) to indicate that instances launched into the subnet should be assigned public IP address by default."
+ type: bool
+ default: 'no'
+ version_added: "2.4"
+ assign_instances_ipv6:
+ description:
+ - "Specify C(yes) to indicate that instances launched into the subnet should be automatically assigned an IPv6 address."
+ type: bool
+ default: false
+ version_added: "2.5"
+ wait:
+ description:
+ - "When I(wait=true) and I(state=present), module will wait for subnet to be in available state before continuing."
+ type: bool
+ default: true
+ version_added: "2.5"
+ wait_timeout:
+ description:
+ - "Number of seconds to wait for subnet to become available I(wait=True)."
+ default: 300
+ version_added: "2.5"
+ type: int
+ purge_tags:
+ description:
+ - Whether or not to remove tags that do not appear in the I(tags) list.
+ type: bool
+ default: true
+ version_added: "2.5"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create subnet for database servers
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+ tags:
+ Name: Database Subnet
+ register: database_subnet
+
+- name: Remove subnet for database servers
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+
+- name: Create subnet with IPv6 block assigned
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.1.100.0/24
+ ipv6_cidr: 2001:db8:0:102::/64
+
+- name: Remove IPv6 block assigned to subnet
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.1.100.0/24
+ ipv6_cidr: ''
+'''
+
+RETURN = '''
+subnet:
+ description: Dictionary of subnet values
+ returned: I(state=present)
+ type: complex
+ contains:
+ id:
+ description: Subnet resource id
+ returned: I(state=present)
+ type: str
+ sample: subnet-b883b2c4
+ cidr_block:
+ description: The IPv4 CIDR of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: "10.0.0.0/16"
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block actively associated with the Subnet
+ returned: I(state=present)
+ type: str
+ sample: "2001:db8:0:102::/64"
+ availability_zone:
+ description: Availability zone of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: us-east-1a
+ state:
+ description: state of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: available
+ tags:
+ description: tags attached to the Subnet, includes name
+ returned: I(state=present)
+ type: dict
+ sample: {"Name": "My Subnet", "env": "staging"}
+ map_public_ip_on_launch:
+ description: whether public IP is auto-assigned to new instances
+ returned: I(state=present)
+ type: bool
+ sample: false
+ assign_ipv6_address_on_creation:
+ description: whether IPv6 address is auto-assigned to new instances
+ returned: I(state=present)
+ type: bool
+ sample: false
+ vpc_id:
+ description: the id of the VPC where this Subnet exists
+ returned: I(state=present)
+ type: str
+ sample: vpc-67236184
+ available_ip_address_count:
+ description: number of available IPv4 addresses
+ returned: I(state=present)
+ type: str
+ sample: 251
+ default_for_az:
+ description: indicates whether this is the default Subnet for this Availability Zone
+ returned: I(state=present)
+ type: bool
+ sample: false
+ ipv6_association_id:
+ description: The IPv6 association ID for the currently associated CIDR
+ returned: I(state=present)
+ type: str
+ sample: subnet-cidr-assoc-b85c74d2
+ ipv6_cidr_block_association_set:
+ description: An array of IPv6 cidr block association set information.
+ returned: I(state=present)
+ type: complex
+ contains:
+ association_id:
+ description: The association ID
+ returned: always
+ type: str
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block that is associated with the subnet.
+ returned: always
+ type: str
+ ipv6_cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+'''
+
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.aws.waiters import get_waiter
+from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, ansible_dict_to_boto3_tag_list,
+ camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags, AWSRetry)
+
+
+def get_subnet_info(subnet):
+ if 'Subnets' in subnet:
+ return [get_subnet_info(s) for s in subnet['Subnets']]
+ elif 'Subnet' in subnet:
+ subnet = camel_dict_to_snake_dict(subnet['Subnet'])
+ else:
+ subnet = camel_dict_to_snake_dict(subnet)
+
+ if 'tags' in subnet:
+ subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags'])
+ else:
+ subnet['tags'] = dict()
+
+ if 'subnet_id' in subnet:
+ subnet['id'] = subnet['subnet_id']
+ del subnet['subnet_id']
+
+ subnet['ipv6_cidr_block'] = ''
+ subnet['ipv6_association_id'] = ''
+ ipv6set = subnet.get('ipv6_cidr_block_association_set')
+ if ipv6set:
+ for item in ipv6set:
+ if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'):
+ subnet['ipv6_cidr_block'] = item['ipv6_cidr_block']
+ subnet['ipv6_association_id'] = item['association_id']
+
+ return subnet
+
+
+@AWSRetry.exponential_backoff()
+def describe_subnets_with_backoff(client, **params):
+ return client.describe_subnets(**params)
+
+
+def waiter_params(module, params, start_time):
+ if not module.botocore_at_least("1.7.0"):
+ remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time())
+ params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5}
+ return params
+
+
+def handle_waiter(conn, module, waiter_name, params, start_time):
+ try:
+ get_waiter(conn, waiter_name).wait(
+ **waiter_params(module, params, start_time)
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, "Failed to wait for updates to complete")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "An exception happened while trying to wait for updates")
+
+
+def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, az=None, start_time=None):
+ wait = module.params['wait']
+ wait_timeout = module.params['wait_timeout']
+
+ params = dict(VpcId=vpc_id,
+ CidrBlock=cidr)
+
+ if ipv6_cidr:
+ params['Ipv6CidrBlock'] = ipv6_cidr
+
+ if az:
+ params['AvailabilityZone'] = az
+
+ try:
+ subnet = get_subnet_info(conn.create_subnet(**params))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create subnet")
+
+ # Sometimes AWS takes its time to create a subnet and so using
+ # new subnets's id to do things like create tags results in
+ # exception.
+ if wait and subnet.get('state') != 'available':
+ handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
+ try:
+ conn.get_waiter('subnet_available').wait(
+ **waiter_params(module, {'SubnetIds': [subnet['id']]}, start_time)
+ )
+ subnet['state'] = 'available'
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Create subnet action timed out waiting for subnet to become available")
+
+ return subnet
+
+
+def ensure_tags(conn, module, subnet, tags, purge_tags, start_time):
+ changed = False
+
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': subnet['id'], 'resource-type': 'subnet'})
+ try:
+ cur_tags = conn.describe_tags(Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
+
+ if to_update:
+ try:
+ if not module.check_mode:
+ AWSRetry.exponential_backoff(
+ catch_extra_error_codes=['InvalidSubnetID.NotFound']
+ )(conn.create_tags)(
+ Resources=[subnet['id']],
+ Tags=ansible_dict_to_boto3_tag_list(to_update)
+ )
+
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create tags")
+
+ if to_delete:
+ try:
+ if not module.check_mode:
+ tags_list = []
+ for key in to_delete:
+ tags_list.append({'Key': key})
+
+ AWSRetry.exponential_backoff(
+ catch_extra_error_codes=['InvalidSubnetID.NotFound']
+ )(conn.delete_tags)(Resources=[subnet['id']], Tags=tags_list)
+
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete tags")
+
+ if module.params['wait'] and not module.check_mode:
+ # Wait for tags to be updated
+ filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()]
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+ return changed
+
+
+def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time):
+ if check_mode:
+ return
+ try:
+ conn.modify_subnet_attribute(SubnetId=subnet['id'], MapPublicIpOnLaunch={'Value': map_public})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
+
+
+def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time):
+ if check_mode:
+ return
+ try:
+ conn.modify_subnet_attribute(SubnetId=subnet['id'], AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
+
+
+def disassociate_ipv6_cidr(conn, module, subnet, start_time):
+ if subnet.get('assign_ipv6_address_on_creation'):
+ ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time)
+
+ try:
+ conn.disassociate_subnet_cidr_block(AssociationId=subnet['ipv6_association_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}"
+ .format(subnet['ipv6_association_id'], subnet['id']))
+
+ # Wait for cidr block to be disassociated
+ if module.params['wait']:
+ filters = ansible_dict_to_boto3_filter_list(
+ {'ipv6-cidr-block-association.state': ['disassociated'],
+ 'vpc-id': subnet['vpc_id']}
+ )
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+
+def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time):
+ wait = module.params['wait']
+ changed = False
+
+ if subnet['ipv6_association_id'] and not ipv6_cidr:
+ if not check_mode:
+ disassociate_ipv6_cidr(conn, module, subnet, start_time)
+ changed = True
+
+ if ipv6_cidr:
+ filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr,
+ 'vpc-id': subnet['vpc_id']})
+
+ try:
+ check_subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get subnet info")
+
+ if check_subnets and check_subnets[0]['ipv6_cidr_block']:
+ module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr))
+
+ if subnet['ipv6_association_id']:
+ if not check_mode:
+ disassociate_ipv6_cidr(conn, module, subnet, start_time)
+ changed = True
+
+ try:
+ if not check_mode:
+ associate_resp = conn.associate_subnet_cidr_block(SubnetId=subnet['id'], Ipv6CidrBlock=ipv6_cidr)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id']))
+ else:
+ if not check_mode and wait:
+ filters = ansible_dict_to_boto3_filter_list(
+ {'ipv6-cidr-block-association.state': ['associated'],
+ 'vpc-id': subnet['vpc_id']}
+ )
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+ if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'):
+ subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId']
+ subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock']
+ if subnet['ipv6_cidr_block_association_set']:
+ subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])
+ else:
+ subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']))
+
+ return changed
+
+
+def get_matching_subnet(conn, module, vpc_id, cidr):
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr})
+ try:
+ subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get matching subnet")
+
+ if subnets:
+ return subnets[0]
+
+ return None
+
+
+def ensure_subnet_present(conn, module):
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ changed = False
+
+ # Initialize start so max time does not exceed the specified wait_timeout for multiple operations
+ start_time = time.time()
+
+ if subnet is None:
+ if not module.check_mode:
+ subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'],
+ ipv6_cidr=module.params['ipv6_cidr'], az=module.params['az'], start_time=start_time)
+ changed = True
+ # Subnet will be None when check_mode is true
+ if subnet is None:
+ return {
+ 'changed': changed,
+ 'subnet': {}
+ }
+ if module.params['wait']:
+ handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'):
+ if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time):
+ changed = True
+
+ if module.params['map_public'] != subnet['map_public_ip_on_launch']:
+ ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time)
+ changed = True
+
+ if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'):
+ ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time)
+ changed = True
+
+ if module.params['tags'] != subnet['tags']:
+ stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items())
+ if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time):
+ changed = True
+
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ if not module.check_mode and module.params['wait']:
+ # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation
+ # so we only wait for those if necessary just before returning the subnet
+ subnet = ensure_final_subnet(conn, module, subnet, start_time)
+
+ return {
+ 'changed': changed,
+ 'subnet': subnet
+ }
+
+
+def ensure_final_subnet(conn, module, subnet, start_time):
+ for rewait in range(0, 30):
+ map_public_correct = False
+ assign_ipv6_correct = False
+
+ if module.params['map_public'] == subnet['map_public_ip_on_launch']:
+ map_public_correct = True
+ else:
+ if module.params['map_public']:
+ handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time)
+ else:
+ handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'):
+ assign_ipv6_correct = True
+ else:
+ if module.params['assign_instances_ipv6']:
+ handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
+ else:
+ handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if map_public_correct and assign_ipv6_correct:
+ break
+
+ time.sleep(5)
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+
+ return subnet
+
+
+def ensure_subnet_absent(conn, module):
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ if subnet is None:
+ return {'changed': False}
+
+ try:
+ if not module.check_mode:
+ conn.delete_subnet(SubnetId=subnet['id'])
+ if module.params['wait']:
+ handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time())
+ return {'changed': True}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete subnet")
+
+
+def main():
+ argument_spec = dict(
+ az=dict(default=None, required=False),
+ cidr=dict(required=True),
+ ipv6_cidr=dict(default='', required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']),
+ vpc_id=dict(required=True),
+ map_public=dict(default=False, required=False, type='bool'),
+ assign_instances_ipv6=dict(default=False, required=False, type='bool'),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300, required=False),
+ purge_tags=dict(default=True, type='bool')
+ )
+
+ required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
+
+ if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'):
+ module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string")
+
+ if not module.botocore_at_least("1.7.0"):
+ module.warn("botocore >= 1.7.0 is required to use wait_timeout for custom wait times")
+
+ connection = module.client('ec2')
+
+ state = module.params.get('state')
+
+ try:
+ if state == 'present':
+ result = ensure_subnet_present(connection, module)
+ elif state == 'absent':
+ result = ensure_subnet_absent(connection, module)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/hcloud_server.py b/test/support/integration/plugins/modules/hcloud_server.py
new file mode 100644
index 0000000000..791c890a29
--- /dev/null
+++ b/test/support/integration/plugins/modules/hcloud_server.py
@@ -0,0 +1,555 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = """
+---
+module: hcloud_server
+
+short_description: Create and manage cloud servers on the Hetzner Cloud.
+
+version_added: "2.8"
+
+description:
+ - Create, update and manage cloud servers on the Hetzner Cloud.
+
+author:
+ - Lukas Kaemmerling (@LKaemmerling)
+
+options:
+ id:
+ description:
+ - The ID of the Hetzner Cloud server to manage.
+ - Only required if no server I(name) is given
+ type: int
+ name:
+ description:
+ - The Name of the Hetzner Cloud server to manage.
+ - Only required if no server I(id) is given or a server does not exists.
+ type: str
+ server_type:
+ description:
+ - The Server Type of the Hetzner Cloud server to manage.
+ - Required if server does not exists.
+ type: str
+ ssh_keys:
+ description:
+ - List of SSH key names
+ - The key names correspond to the SSH keys configured for your
+ Hetzner Cloud account access.
+ type: list
+ volumes:
+ description:
+ - List of Volumes IDs that should be attached to the server on server creation.
+ type: list
+ image:
+ description:
+ - Image the server should be created from.
+ - Required if server does not exists.
+ type: str
+ location:
+ description:
+ - Location of Server.
+ - Required if no I(datacenter) is given and server does not exists.
+ type: str
+ datacenter:
+ description:
+ - Datacenter of Server.
+ - Required of no I(location) is given and server does not exists.
+ type: str
+ backups:
+ description:
+ - Enable or disable Backups for the given Server.
+ type: bool
+ default: no
+ upgrade_disk:
+ description:
+ - Resize the disk size, when resizing a server.
+ - If you want to downgrade the server later, this value should be False.
+ type: bool
+ default: no
+ force_upgrade:
+ description:
+ - Force the upgrade of the server.
+ - Power off the server if it is running on upgrade.
+ type: bool
+ default: no
+ user_data:
+ description:
+ - User Data to be passed to the server on creation.
+ - Only used if server does not exists.
+ type: str
+ rescue_mode:
+ description:
+ - Add the Hetzner rescue system type you want the server to be booted into.
+ type: str
+ version_added: 2.9
+ labels:
+ description:
+ - User-defined labels (key-value pairs).
+ type: dict
+ delete_protection:
+ description:
+ - Protect the Server for deletion.
+ - Needs to be the same as I(rebuild_protection).
+ type: bool
+ version_added: "2.10"
+ rebuild_protection:
+ description:
+ - Protect the Server for rebuild.
+ - Needs to be the same as I(delete_protection).
+ type: bool
+ version_added: "2.10"
+ state:
+ description:
+ - State of the server.
+ default: present
+ choices: [ absent, present, restarted, started, stopped, rebuild ]
+ type: str
+extends_documentation_fragment: hcloud
+"""
+
+EXAMPLES = """
+- name: Create a basic server
+ hcloud_server:
+ name: my-server
+ server_type: cx11
+ image: ubuntu-18.04
+ state: present
+
+- name: Create a basic server with ssh key
+ hcloud_server:
+ name: my-server
+ server_type: cx11
+ image: ubuntu-18.04
+ location: fsn1
+ ssh_keys:
+ - me@myorganisation
+ state: present
+
+- name: Resize an existing server
+ hcloud_server:
+ name: my-server
+ server_type: cx21
+ upgrade_disk: yes
+ state: present
+
+- name: Ensure the server is absent (remove if needed)
+ hcloud_server:
+ name: my-server
+ state: absent
+
+- name: Ensure the server is started
+ hcloud_server:
+ name: my-server
+ state: started
+
+- name: Ensure the server is stopped
+ hcloud_server:
+ name: my-server
+ state: stopped
+
+- name: Ensure the server is restarted
+ hcloud_server:
+ name: my-server
+ state: restarted
+
+- name: Ensure the server is will be booted in rescue mode and therefore restarted
+ hcloud_server:
+ name: my-server
+ rescue_mode: linux64
+ state: restarted
+
+- name: Ensure the server is rebuild
+ hcloud_server:
+ name: my-server
+ image: ubuntu-18.04
+ state: rebuild
+"""
+
+RETURN = """
+hcloud_server:
+ description: The server instance
+ returned: Always
+ type: complex
+ contains:
+ id:
+ description: Numeric identifier of the server
+ returned: always
+ type: int
+ sample: 1937415
+ name:
+ description: Name of the server
+ returned: always
+ type: str
+ sample: my-server
+ status:
+ description: Status of the server
+ returned: always
+ type: str
+ sample: running
+ server_type:
+ description: Name of the server type of the server
+ returned: always
+ type: str
+ sample: cx11
+ ipv4_address:
+ description: Public IPv4 address of the server
+ returned: always
+ type: str
+ sample: 116.203.104.109
+ ipv6:
+ description: IPv6 network of the server
+ returned: always
+ type: str
+ sample: 2a01:4f8:1c1c:c140::/64
+ location:
+ description: Name of the location of the server
+ returned: always
+ type: str
+ sample: fsn1
+ datacenter:
+ description: Name of the datacenter of the server
+ returned: always
+ type: str
+ sample: fsn1-dc14
+ rescue_enabled:
+ description: True if rescue mode is enabled, Server will then boot into rescue system on next reboot
+ returned: always
+ type: bool
+ sample: false
+ backup_window:
+ description: Time window (UTC) in which the backup will run, or null if the backups are not enabled
+ returned: always
+ type: bool
+ sample: 22-02
+ labels:
+ description: User-defined labels (key-value pairs)
+ returned: always
+ type: dict
+ delete_protection:
+ description: True if server is protected for deletion
+ type: bool
+ returned: always
+ sample: false
+ version_added: "2.10"
+ rebuild_protection:
+ description: True if server is protected for rebuild
+ type: bool
+ returned: always
+ sample: false
+ version_added: "2.10"
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.hcloud import Hcloud
+
+try:
+ from hcloud.volumes.domain import Volume
+ from hcloud.ssh_keys.domain import SSHKey
+ from hcloud.servers.domain import Server
+ from hcloud import APIException
+except ImportError:
+ pass
+
+
+class AnsibleHcloudServer(Hcloud):
+ def __init__(self, module):
+ Hcloud.__init__(self, module, "hcloud_server")
+ self.hcloud_server = None
+
+ def _prepare_result(self):
+ image = None if self.hcloud_server.image is None else to_native(self.hcloud_server.image.name)
+ return {
+ "id": to_native(self.hcloud_server.id),
+ "name": to_native(self.hcloud_server.name),
+ "ipv4_address": to_native(self.hcloud_server.public_net.ipv4.ip),
+ "ipv6": to_native(self.hcloud_server.public_net.ipv6.ip),
+ "image": image,
+ "server_type": to_native(self.hcloud_server.server_type.name),
+ "datacenter": to_native(self.hcloud_server.datacenter.name),
+ "location": to_native(self.hcloud_server.datacenter.location.name),
+ "rescue_enabled": self.hcloud_server.rescue_enabled,
+ "backup_window": to_native(self.hcloud_server.backup_window),
+ "labels": self.hcloud_server.labels,
+ "delete_protection": self.hcloud_server.protection["delete"],
+ "rebuild_protection": self.hcloud_server.protection["rebuild"],
+ "status": to_native(self.hcloud_server.status),
+ }
+
+ def _get_server(self):
+ try:
+ if self.module.params.get("id") is not None:
+ self.hcloud_server = self.client.servers.get_by_id(
+ self.module.params.get("id")
+ )
+ else:
+ self.hcloud_server = self.client.servers.get_by_name(
+ self.module.params.get("name")
+ )
+ except APIException as e:
+ self.module.fail_json(msg=e.message)
+
+ def _create_server(self):
+
+ self.module.fail_on_missing_params(
+ required_params=["name", "server_type", "image"]
+ )
+ params = {
+ "name": self.module.params.get("name"),
+ "server_type": self.client.server_types.get_by_name(
+ self.module.params.get("server_type")
+ ),
+ "user_data": self.module.params.get("user_data"),
+ "labels": self.module.params.get("labels"),
+ }
+ if self.client.images.get_by_name(self.module.params.get("image")) is not None:
+ # When image name is not available look for id instead
+ params["image"] = self.client.images.get_by_name(self.module.params.get("image"))
+ else:
+ params["image"] = self.client.images.get_by_id(self.module.params.get("image"))
+
+ if self.module.params.get("ssh_keys") is not None:
+ params["ssh_keys"] = [
+ SSHKey(name=ssh_key_name)
+ for ssh_key_name in self.module.params.get("ssh_keys")
+ ]
+
+ if self.module.params.get("volumes") is not None:
+ params["volumes"] = [
+ Volume(id=volume_id) for volume_id in self.module.params.get("volumes")
+ ]
+
+ if self.module.params.get("location") is None and self.module.params.get("datacenter") is None:
+ # When not given, the API will choose the location.
+ params["location"] = None
+ params["datacenter"] = None
+ elif self.module.params.get("location") is not None and self.module.params.get("datacenter") is None:
+ params["location"] = self.client.locations.get_by_name(
+ self.module.params.get("location")
+ )
+ elif self.module.params.get("location") is None and self.module.params.get("datacenter") is not None:
+ params["datacenter"] = self.client.datacenters.get_by_name(
+ self.module.params.get("datacenter")
+ )
+
+ if not self.module.check_mode:
+ resp = self.client.servers.create(**params)
+ self.result["root_password"] = resp.root_password
+ resp.action.wait_until_finished(max_retries=1000)
+ [action.wait_until_finished() for action in resp.next_actions]
+
+ rescue_mode = self.module.params.get("rescue_mode")
+ if rescue_mode:
+ self._get_server()
+ self._set_rescue_mode(rescue_mode)
+
+ self._mark_as_changed()
+ self._get_server()
+
+ def _update_server(self):
+ try:
+ rescue_mode = self.module.params.get("rescue_mode")
+ if rescue_mode and self.hcloud_server.rescue_enabled is False:
+ if not self.module.check_mode:
+ self._set_rescue_mode(rescue_mode)
+ self._mark_as_changed()
+ elif not rescue_mode and self.hcloud_server.rescue_enabled is True:
+ if not self.module.check_mode:
+ self.hcloud_server.disable_rescue().wait_until_finished()
+ self._mark_as_changed()
+
+ if self.module.params.get("backups") and self.hcloud_server.backup_window is None:
+ if not self.module.check_mode:
+ self.hcloud_server.enable_backup().wait_until_finished()
+ self._mark_as_changed()
+ elif not self.module.params.get("backups") and self.hcloud_server.backup_window is not None:
+ if not self.module.check_mode:
+ self.hcloud_server.disable_backup().wait_until_finished()
+ self._mark_as_changed()
+
+ labels = self.module.params.get("labels")
+ if labels is not None and labels != self.hcloud_server.labels:
+ if not self.module.check_mode:
+ self.hcloud_server.update(labels=labels)
+ self._mark_as_changed()
+
+ server_type = self.module.params.get("server_type")
+ if server_type is not None and self.hcloud_server.server_type.name != server_type:
+ previous_server_status = self.hcloud_server.status
+ state = self.module.params.get("state")
+ if previous_server_status == Server.STATUS_RUNNING:
+ if not self.module.check_mode:
+ if self.module.params.get("force_upgrade") or state == "stopped":
+ self.stop_server() # Only stopped server can be upgraded
+ else:
+ self.module.warn(
+ "You can not upgrade a running instance %s. You need to stop the instance or use force_upgrade=yes."
+ % self.hcloud_server.name
+ )
+ timeout = 100
+ if self.module.params.get("upgrade_disk"):
+ timeout = (
+ 1000
+ ) # When we upgrade the disk too the resize progress takes some more time.
+ if not self.module.check_mode:
+ self.hcloud_server.change_type(
+ server_type=self.client.server_types.get_by_name(server_type),
+ upgrade_disk=self.module.params.get("upgrade_disk"),
+ ).wait_until_finished(timeout)
+ if state == "present" and previous_server_status == Server.STATUS_RUNNING or state == "started":
+ self.start_server()
+
+ self._mark_as_changed()
+
+ delete_protection = self.module.params.get("delete_protection")
+ rebuild_protection = self.module.params.get("rebuild_protection")
+ if (delete_protection is not None and rebuild_protection is not None) and (
+ delete_protection != self.hcloud_server.protection["delete"] or rebuild_protection !=
+ self.hcloud_server.protection["rebuild"]):
+ if not self.module.check_mode:
+ self.hcloud_server.change_protection(delete=delete_protection,
+ rebuild=rebuild_protection).wait_until_finished()
+ self._mark_as_changed()
+ self._get_server()
+ except APIException as e:
+ self.module.fail_json(msg=e.message)
+
+ def _set_rescue_mode(self, rescue_mode):
+ if self.module.params.get("ssh_keys"):
+ resp = self.hcloud_server.enable_rescue(type=rescue_mode,
+ ssh_keys=[self.client.ssh_keys.get_by_name(ssh_key_name).id
+ for
+ ssh_key_name in
+ self.module.params.get("ssh_keys")])
+ else:
+ resp = self.hcloud_server.enable_rescue(type=rescue_mode)
+ resp.action.wait_until_finished()
+ self.result["root_password"] = resp.root_password
+
+ def start_server(self):
+ try:
+ if self.hcloud_server.status != Server.STATUS_RUNNING:
+ if not self.module.check_mode:
+ self.client.servers.power_on(self.hcloud_server).wait_until_finished()
+ self._mark_as_changed()
+ self._get_server()
+ except APIException as e:
+ self.module.fail_json(msg=e.message)
+
+ def stop_server(self):
+ try:
+ if self.hcloud_server.status != Server.STATUS_OFF:
+ if not self.module.check_mode:
+ self.client.servers.power_off(self.hcloud_server).wait_until_finished()
+ self._mark_as_changed()
+ self._get_server()
+ except APIException as e:
+ self.module.fail_json(msg=e.message)
+
+ def rebuild_server(self):
+ self.module.fail_on_missing_params(
+ required_params=["image"]
+ )
+ try:
+ if not self.module.check_mode:
+ self.client.servers.rebuild(self.hcloud_server, self.client.images.get_by_name(
+ self.module.params.get("image"))).wait_until_finished()
+ self._mark_as_changed()
+
+ self._get_server()
+ except APIException as e:
+ self.module.fail_json(msg=e.message)
+
+ def present_server(self):
+ self._get_server()
+ if self.hcloud_server is None:
+ self._create_server()
+ else:
+ self._update_server()
+
+ def delete_server(self):
+ try:
+ self._get_server()
+ if self.hcloud_server is not None:
+ if not self.module.check_mode:
+ self.client.servers.delete(self.hcloud_server).wait_until_finished()
+ self._mark_as_changed()
+ self.hcloud_server = None
+ except APIException as e:
+ self.module.fail_json(msg=e.message)
+
+ @staticmethod
+ def define_module():
+ return AnsibleModule(
+ argument_spec=dict(
+ id={"type": "int"},
+ name={"type": "str"},
+ image={"type": "str"},
+ server_type={"type": "str"},
+ location={"type": "str"},
+ datacenter={"type": "str"},
+ user_data={"type": "str"},
+ ssh_keys={"type": "list"},
+ volumes={"type": "list"},
+ labels={"type": "dict"},
+ backups={"type": "bool", "default": False},
+ upgrade_disk={"type": "bool", "default": False},
+ force_upgrade={"type": "bool", "default": False},
+ rescue_mode={"type": "str"},
+ delete_protection={"type": "bool"},
+ rebuild_protection={"type": "bool"},
+ state={
+ "choices": ["absent", "present", "restarted", "started", "stopped", "rebuild"],
+ "default": "present",
+ },
+ **Hcloud.base_module_arguments()
+ ),
+ required_one_of=[['id', 'name']],
+ mutually_exclusive=[["location", "datacenter"]],
+ required_together=[["delete_protection", "rebuild_protection"]],
+ supports_check_mode=True,
+ )
+
+
+def main():
+ module = AnsibleHcloudServer.define_module()
+
+ hcloud = AnsibleHcloudServer(module)
+ state = module.params.get("state")
+ if state == "absent":
+ hcloud.delete_server()
+ elif state == "present":
+ hcloud.present_server()
+ elif state == "started":
+ hcloud.present_server()
+ hcloud.start_server()
+ elif state == "stopped":
+ hcloud.present_server()
+ hcloud.stop_server()
+ elif state == "restarted":
+ hcloud.present_server()
+ hcloud.stop_server()
+ hcloud.start_server()
+ elif state == "rebuild":
+ hcloud.present_server()
+ hcloud.rebuild_server()
+
+ module.exit_json(**hcloud.get_result())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/integration/plugins/modules/iam_role.py b/test/support/integration/plugins/modules/iam_role.py
new file mode 100644
index 0000000000..71a5b0377e
--- /dev/null
+++ b/test/support/integration/plugins/modules/iam_role.py
@@ -0,0 +1,673 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: iam_role
+short_description: Manage AWS IAM roles
+description:
+ - Manage AWS IAM roles.
+version_added: "2.3"
+author: "Rob White (@wimnat)"
+options:
+ path:
+ description:
+ - The path to the role. For more information about paths, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html).
+ default: "/"
+ type: str
+ name:
+ description:
+ - The name of the role to create.
+ required: true
+ type: str
+ description:
+ description:
+ - Provides a description of the role.
+ version_added: "2.5"
+ type: str
+ boundary:
+ description:
+ - The ARN of an IAM managed policy to use to restrict the permissions this role can pass on to IAM roles/users that it creates.
+ - Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false).
+ - This is intended for roles/users that have permissions to create new IAM objects.
+ - For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html).
+ - Requires botocore 1.10.57 or above.
+ aliases: [boundary_policy_arn]
+ version_added: "2.7"
+ type: str
+ assume_role_policy_document:
+ description:
+ - The trust relationship policy document that grants an entity permission to assume the role.
+ - This parameter is required when I(state=present).
+ type: json
+ managed_policies:
+ description:
+ - A list of managed policy ARNs or, since Ansible 2.4, a list of either managed policy ARNs or friendly names.
+ - To remove all policies set I(purge_polices=true) and I(managed_policies=[None]).
+ - To embed an inline policy, use M(iam_policy).
+ aliases: ['managed_policy']
+ type: list
+ max_session_duration:
+ description:
+ - The maximum duration (in seconds) of a session when assuming the role.
+ - Valid values are between 1 and 12 hours (3600 and 43200 seconds).
+ version_added: "2.10"
+ type: int
+ purge_policies:
+ description:
+ - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
+ - By default I(purge_policies=true). In Ansible 2.14 this will be changed to I(purge_policies=false).
+ version_added: "2.5"
+ type: bool
+ aliases: ['purge_policy', 'purge_managed_policies']
+ state:
+ description:
+ - Create or remove the IAM role.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ create_instance_profile:
+ description:
+ - Creates an IAM instance profile along with the role.
+ default: true
+ version_added: "2.5"
+ type: bool
+ delete_instance_profile:
+ description:
+ - When I(delete_instance_profile=true) and I(state=absent) deleting a role will also delete the instance
+ profile created with the same I(name) as the role.
+ - Only applies when I(state=absent).
+ default: false
+ version_added: "2.10"
+ type: bool
+ tags:
+ description:
+ - Tag dict to apply to the queue.
+ - Requires botocore 1.12.46 or above.
+ version_added: "2.10"
+ type: dict
+ purge_tags:
+ description:
+ - Remove tags not listed in I(tags) when tags is specified.
+ default: true
+ version_added: "2.10"
+ type: bool
+requirements: [ botocore, boto3 ]
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create a role with description and tags
+ iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file','policy.json') }}"
+ description: This is My New Role
+ tags:
+ env: dev
+
+- name: "Create a role and attach a managed policy called 'PowerUserAccess'"
+ iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file','policy.json') }}"
+ managed_policies:
+ - arn:aws:iam::aws:policy/PowerUserAccess
+
+- name: Keep the role created above but remove all managed policies
+ iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file','policy.json') }}"
+ managed_policies: []
+
+- name: Delete the role
+ iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file', 'policy.json') }}"
+ state: absent
+
+'''
+RETURN = '''
+iam_role:
+ description: dictionary containing the IAM Role data
+ returned: success
+ type: complex
+ contains:
+ path:
+ description: the path to the role
+ type: str
+ returned: always
+ sample: /
+ role_name:
+ description: the friendly name that identifies the role
+ type: str
+ returned: always
+ sample: myrole
+ role_id:
+ description: the stable and unique string identifying the role
+ type: str
+ returned: always
+ sample: ABCDEFF4EZ4ABCDEFV4ZC
+ arn:
+ description: the Amazon Resource Name (ARN) specifying the role
+ type: str
+ returned: always
+ sample: "arn:aws:iam::1234567890:role/mynewrole"
+ create_date:
+ description: the date and time, in ISO 8601 date-time format, when the role was created
+ type: str
+ returned: always
+ sample: "2016-08-14T04:36:28+00:00"
+ assume_role_policy_document:
+ description: the policy that grants an entity permission to assume the role
+ type: str
+ returned: always
+ sample: {
+ 'statement': [
+ {
+ 'action': 'sts:AssumeRole',
+ 'effect': 'Allow',
+ 'principal': {
+ 'service': 'ec2.amazonaws.com'
+ },
+ 'sid': ''
+ }
+ ],
+ 'version': '2012-10-17'
+ }
+ attached_policies:
+ description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role
+ type: list
+ returned: always
+ sample: [
+ {
+ 'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess',
+ 'policy_name': 'PowerUserAccess'
+ }
+ ]
+ tags:
+ description: role tags
+ type: dict
+ returned: always
+ sample: '{"Env": "Prod"}'
+'''
+
+import json
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies
+from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, compare_aws_tags
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc):
+ if not compare_policies(current_policy_doc, json.loads(new_policy_doc)):
+ return True
+ else:
+ return False
+
+
+@AWSRetry.jittered_backoff()
+def _list_policies(connection):
+ paginator = connection.get_paginator('list_policies')
+ return paginator.paginate().build_full_result()['Policies']
+
+
+def convert_friendly_names_to_arns(connection, module, policy_names):
+ if not any([not policy.startswith('arn:') for policy in policy_names]):
+ return policy_names
+ allpolicies = {}
+ policies = _list_policies(connection)
+
+ for policy in policies:
+ allpolicies[policy['PolicyName']] = policy['Arn']
+ allpolicies[policy['Arn']] = policy['Arn']
+ try:
+ return [allpolicies[policy] for policy in policy_names]
+ except KeyError as e:
+ module.fail_json_aws(e, msg="Couldn't find policy")
+
+
+def attach_policies(connection, module, policies_to_attach, params):
+ changed = False
+ for policy_arn in policies_to_attach:
+ try:
+ if not module.check_mode:
+ connection.attach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, params['RoleName']))
+ changed = True
+ return changed
+
+
+def remove_policies(connection, module, policies_to_remove, params):
+ changed = False
+ for policy in policies_to_remove:
+ try:
+ if not module.check_mode:
+ connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, params['RoleName']))
+ changed = True
+ return changed
+
+
+def generate_create_params(module):
+ params = dict()
+ params['Path'] = module.params.get('path')
+ params['RoleName'] = module.params.get('name')
+ params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document')
+ if module.params.get('description') is not None:
+ params['Description'] = module.params.get('description')
+ if module.params.get('max_session_duration') is not None:
+ params['MaxSessionDuration'] = module.params.get('max_session_duration')
+ if module.params.get('boundary') is not None:
+ params['PermissionsBoundary'] = module.params.get('boundary')
+ if module.params.get('tags') is not None:
+ params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags'))
+
+ return params
+
+
+def create_basic_role(connection, module, params):
+ """
+ Perform the Role creation.
+ Assumes tests for the role existing have already been performed.
+ """
+
+ try:
+ if not module.check_mode:
+ role = connection.create_role(aws_retry=True, **params)
+ # 'Description' is documented as key of the role returned by create_role
+ # but appears to be an AWS bug (the value is not returned using the AWS CLI either).
+ # Get the role after creating it.
+ role = get_role_with_backoff(connection, module, params['RoleName'])
+ else:
+ role = {'MadeInCheckMode': True}
+ role['AssumeRolePolicyDocument'] = json.loads(params['AssumeRolePolicyDocument'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to create role")
+
+ return role
+
+
+def update_role_assumed_policy(connection, module, params, role):
+ # Check Assumed Policy document
+ if compare_assume_role_policy_doc(role['AssumeRolePolicyDocument'], params['AssumeRolePolicyDocument']):
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.update_assume_role_policy(
+ RoleName=params['RoleName'],
+ PolicyDocument=json.dumps(json.loads(params['AssumeRolePolicyDocument'])),
+ aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(params['RoleName']))
+ return True
+
+
+def update_role_description(connection, module, params, role):
+ # Check Description update
+ if params.get('Description') is None:
+ return False
+ if role.get('Description') == params['Description']:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.update_role_description(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update description for role {0}".format(params['RoleName']))
+ return True
+
+
+def update_role_max_session_duration(connection, module, params, role):
+ # Check MaxSessionDuration update
+ if params.get('MaxSessionDuration') is None:
+ return False
+ if role.get('MaxSessionDuration') == params['MaxSessionDuration']:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.update_role(RoleName=params['RoleName'], MaxSessionDuration=params['MaxSessionDuration'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(params['RoleName']))
+ return True
+
+
+def update_role_permissions_boundary(connection, module, params, role):
+ # Check PermissionsBoundary
+ if params.get('PermissionsBoundary') is None:
+ return False
+ if params.get('PermissionsBoundary') == role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', ''):
+ return False
+
+ if module.check_mode:
+ return True
+
+ if params.get('PermissionsBoundary') == '':
+ try:
+ connection.delete_role_permissions_boundary(RoleName=params['RoleName'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(params['RoleName']))
+ else:
+ try:
+ connection.put_role_permissions_boundary(RoleName=params['RoleName'], PermissionsBoundary=params['PermissionsBoundary'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(params['RoleName']))
+ return True
+
+
+def update_managed_policies(connection, module, params, role, managed_policies, purge_policies):
+ # Check Managed Policies
+ if managed_policies is None:
+ return False
+
+ # If we're manipulating a fake role
+ if role.get('MadeInCheckMode', False):
+ role['AttachedPolicies'] = list(map(lambda x: {'PolicyArn': x, 'PolicyName': x.split(':')[5]}, managed_policies))
+ return True
+
+ # Get list of current attached managed policies
+ current_attached_policies = get_attached_policy_list(connection, module, params['RoleName'])
+ current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies]
+
+ if len(managed_policies) == 1 and managed_policies[0] is None:
+ managed_policies = []
+
+ policies_to_remove = set(current_attached_policies_arn_list) - set(managed_policies)
+ policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list)
+
+ changed = False
+
+ if purge_policies:
+ changed |= remove_policies(connection, module, policies_to_remove, params)
+
+ changed |= attach_policies(connection, module, policies_to_attach, params)
+
+ return changed
+
+
+def create_or_update_role(connection, module):
+
+ params = generate_create_params(module)
+ role_name = params['RoleName']
+ create_instance_profile = module.params.get('create_instance_profile')
+ purge_policies = module.params.get('purge_policies')
+ if purge_policies is None:
+ purge_policies = True
+ managed_policies = module.params.get('managed_policies')
+ if managed_policies:
+ # Attempt to list the policies early so we don't leave things behind if we can't find them.
+ managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
+
+ changed = False
+
+ # Get role
+ role = get_role(connection, module, role_name)
+
+ # If role is None, create it
+ if role is None:
+ role = create_basic_role(connection, module, params)
+ changed = True
+ else:
+ changed |= update_role_tags(connection, module, params, role)
+ changed |= update_role_assumed_policy(connection, module, params, role)
+ changed |= update_role_description(connection, module, params, role)
+ changed |= update_role_max_session_duration(connection, module, params, role)
+ changed |= update_role_permissions_boundary(connection, module, params, role)
+
+ if create_instance_profile:
+ changed |= create_instance_profiles(connection, module, params, role)
+
+ changed |= update_managed_policies(connection, module, params, role, managed_policies, purge_policies)
+
+ # Get the role again
+ if not role.get('MadeInCheckMode', False):
+ role = get_role(connection, module, params['RoleName'])
+ role['AttachedPolicies'] = get_attached_policy_list(connection, module, params['RoleName'])
+ role['tags'] = get_role_tags(connection, module)
+
+ module.exit_json(
+ changed=changed, iam_role=camel_dict_to_snake_dict(role, ignore_list=['tags']),
+ **camel_dict_to_snake_dict(role, ignore_list=['tags']))
+
+
+def create_instance_profiles(connection, module, params, role):
+
+ if role.get('MadeInCheckMode', False):
+ return False
+
+ # Fetch existing Profiles
+ try:
+ instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'], aws_retry=True)['InstanceProfiles']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName']))
+
+ # Profile already exists
+ if any(p['InstanceProfileName'] == params['RoleName'] for p in instance_profiles):
+ return False
+
+ if module.check_mode:
+ return True
+
+ # Make sure an instance profile is created
+ try:
+ connection.create_instance_profile(InstanceProfileName=params['RoleName'], Path=params['Path'], aws_retry=True)
+ except ClientError as e:
+ # If the profile already exists, no problem, move on.
+ # Implies someone's changing things at the same time...
+ if e.response['Error']['Code'] == 'EntityAlreadyExists':
+ return False
+ else:
+ module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName']))
+ except BotoCoreError as e:
+ module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName']))
+
+ # And attach the role to the profile
+ try:
+ connection.add_role_to_instance_profile(InstanceProfileName=params['RoleName'], RoleName=params['RoleName'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(params['RoleName']))
+
+ return True
+
+
+def remove_instance_profiles(connection, module, role_params, role):
+ role_name = module.params.get('name')
+ delete_profiles = module.params.get("delete_instance_profile")
+
+ try:
+ instance_profiles = connection.list_instance_profiles_for_role(aws_retry=True, **role_params)['InstanceProfiles']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name))
+
+ # Remove the role from the instance profile(s)
+ for profile in instance_profiles:
+ profile_name = profile['InstanceProfileName']
+ try:
+ if not module.check_mode:
+ connection.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, **role_params)
+ if profile_name == role_name:
+ if delete_profiles:
+ try:
+ connection.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name))
+
+
+def destroy_role(connection, module):
+
+ role_name = module.params.get('name')
+ role = get_role(connection, module, role_name)
+ role_params = dict()
+ role_params['RoleName'] = role_name
+ boundary_params = dict(role_params)
+ boundary_params['PermissionsBoundary'] = ''
+
+ if role is None:
+ module.exit_json(changed=False)
+
+ # Before we try to delete the role we need to remove any
+ # - attached instance profiles
+ # - attached managed policies
+ # - permissions boundary
+ remove_instance_profiles(connection, module, role_params, role)
+ update_managed_policies(connection, module, role_params, role, [], True)
+ update_role_permissions_boundary(connection, module, boundary_params, role)
+
+ try:
+ if not module.check_mode:
+ connection.delete_role(aws_retry=True, **role_params)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete role")
+
+ module.exit_json(changed=True)
+
+
+def get_role_with_backoff(connection, module, name):
+ try:
+ return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(connection.get_role)(RoleName=name)['Role']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
+
+
+def get_role(connection, module, name):
+ try:
+ return connection.get_role(RoleName=name, aws_retry=True)['Role']
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ return None
+ else:
+ module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
+ except BotoCoreError as e:
+ module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
+
+
+def get_attached_policy_list(connection, module, name):
+ try:
+ return connection.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies']
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name))
+
+
+def get_role_tags(connection, module):
+ role_name = module.params.get('name')
+ if not hasattr(connection, 'list_role_tags'):
+ return {}
+ try:
+ return boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name))
+
+
+def update_role_tags(connection, module, params, role):
+ new_tags = params.get('Tags')
+ if new_tags is None:
+ return False
+ new_tags = boto3_tag_list_to_ansible_dict(new_tags)
+
+ role_name = module.params.get('name')
+ purge_tags = module.params.get('purge_tags')
+
+ try:
+ existing_tags = boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
+ except (ClientError, KeyError):
+ existing_tags = {}
+
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
+
+ if not module.check_mode:
+ try:
+ if tags_to_remove:
+ connection.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True)
+ if tags_to_add:
+ connection.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name)
+
+ changed = bool(tags_to_add) or bool(tags_to_remove)
+ return changed
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ path=dict(type='str', default="/"),
+ assume_role_policy_document=dict(type='json'),
+ managed_policies=dict(type='list', aliases=['managed_policy']),
+ max_session_duration=dict(type='int'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ description=dict(type='str'),
+ boundary=dict(type='str', aliases=['boundary_policy_arn']),
+ create_instance_profile=dict(type='bool', default=True),
+ delete_instance_profile=dict(type='bool', default=False),
+ purge_policies=dict(type='bool', aliases=['purge_policy', 'purge_managed_policies']),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=True),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['assume_role_policy_document'])],
+ supports_check_mode=True)
+
+ if module.params.get('purge_policies') is None:
+ module.deprecate('In Ansible 2.14 the default value of purge_policies will change from true to false.'
+ ' To maintain the existing behaviour explicity set purge_policies=true', version='2.14')
+
+ if module.params.get('boundary'):
+ if module.params.get('create_instance_profile'):
+ module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.")
+ if not module.params.get('boundary').startswith('arn:aws:iam'):
+ module.fail_json(msg="Boundary policy must be an ARN")
+ if module.params.get('tags') is not None and not module.botocore_at_least('1.12.46'):
+ module.fail_json(msg="When managing tags botocore must be at least v1.12.46. "
+ "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions()))
+ if module.params.get('boundary') is not None and not module.botocore_at_least('1.10.57'):
+ module.fail_json(msg="When using a boundary policy, botocore must be at least v1.10.57. "
+ "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions()))
+ if module.params.get('max_session_duration'):
+ max_session_duration = module.params.get('max_session_duration')
+ if max_session_duration < 3600 or max_session_duration > 43200:
+ module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)")
+ if module.params.get('path'):
+ path = module.params.get('path')
+ if not path.endswith('/') or not path.startswith('/'):
+ module.fail_json(msg="path must begin and end with /")
+
+ connection = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_role(connection, module)
+ else:
+ destroy_role(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/k8s.py b/test/support/integration/plugins/modules/k8s.py
new file mode 100644
index 0000000000..f3938bf39c
--- /dev/null
+++ b/test/support/integration/plugins/modules/k8s.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Chris Houseknecht <@chouseknecht>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+
+module: k8s
+
+short_description: Manage Kubernetes (K8s) objects
+
+version_added: "2.6"
+
+author:
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Fabian von Feilitzsch (@fabianvf)"
+
+description:
+ - Use the OpenShift Python client to perform CRUD operations on K8s objects.
+ - Pass the object definition from a source file or inline. See examples for reading
+ files and using Jinja templates or vault-encrypted files.
+ - Access to the full range of K8s APIs.
+ - Use the M(k8s_info) module to obtain a list of items about an object of type C(kind)
+ - Authenticate using either a config file, certificates, password or token.
+ - Supports check mode.
+
+extends_documentation_fragment:
+ - k8s_state_options
+ - k8s_name_options
+ - k8s_resource_options
+ - k8s_auth_options
+
+notes:
+ - If your OpenShift Python library is not 0.9.0 or newer and you are trying to
+ remove an item from an associative array/dictionary, for example a label or
+ an annotation, you will need to explicitly set the value of the item to be
+ removed to `null`. Simply deleting the entry in the dictionary will not
+ remove it from openshift or kubernetes.
+
+options:
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ - For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
+ want to use C(merge) if you see "strategic merge patch format is not supported"
+ - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
+ - Requires openshift >= 0.6.2
+ - If more than one merge_type is given, the merge_types will be tried in order
+ - If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
+ on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default
+ is simply C(strategic-merge).
+ - mutually exclusive with C(apply)
+ choices:
+ - json
+ - merge
+ - strategic-merge
+ type: list
+ version_added: "2.7"
+ wait:
+ description:
+ - Whether to wait for certain resource kinds to end up in the desired state. By default the module exits once Kubernetes has
+ received the request
+ - Implemented for C(state=present) for C(Deployment), C(DaemonSet) and C(Pod), and for C(state=absent) for all resource kinds.
+ - For resource kinds without an implementation, C(wait) returns immediately unless C(wait_condition) is set.
+ default: no
+ type: bool
+ version_added: "2.8"
+ wait_sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ default: 5
+ version_added: "2.9"
+ wait_timeout:
+ description:
+ - How long in seconds to wait for the resource to end up in the desired state. Ignored if C(wait) is not set.
+ default: 120
+ version_added: "2.8"
+ wait_condition:
+ description:
+ - Specifies a custom condition on the status to wait for. Ignored if C(wait) is not set or is set to False.
+ suboptions:
+ type:
+ description:
+ - The type of condition to wait for. For example, the C(Pod) resource will set the C(Ready) condition (among others)
+ - Required if you are specifying a C(wait_condition). If left empty, the C(wait_condition) field will be ignored.
+ - The possible types for a condition are specific to each resource type in Kubernetes. See the API documentation of the status field
+ for a given resource to see possible choices.
+ status:
+ description:
+ - The value of the status field in your desired condition.
+ - For example, if a C(Deployment) is paused, the C(Progressing) C(type) will have the C(Unknown) status.
+ choices:
+ - True
+ - False
+ - Unknown
+ reason:
+ description:
+ - The value of the reason field in your desired condition
+ - For example, if a C(Deployment) is paused, The C(Progressing) C(type) will have the C(DeploymentPaused) reason.
+ - The possible reasons in a condition are specific to each resource type in Kubernetes. See the API documentation of the status field
+ for a given resource to see possible choices.
+ version_added: "2.8"
+ validate:
+ description:
+ - how (if at all) to validate the resource definition against the kubernetes schema.
+ Requires the kubernetes-validate python module
+ suboptions:
+ fail_on_error:
+ description: whether to fail on validation errors.
+ required: yes
+ type: bool
+ version:
+ description: version of Kubernetes to validate against. defaults to Kubernetes server version
+ strict:
+ description: whether to fail when passing unexpected properties
+ default: no
+ type: bool
+ version_added: "2.8"
+ append_hash:
+ description:
+ - Whether to append a hash to a resource name for immutability purposes
+ - Applies only to ConfigMap and Secret resources
+ - The parameter will be silently ignored for other resource kinds
+ - The full definition of an object is needed to generate the hash - this means that deleting an object created with append_hash
+ will only work if the same object is passed with state=absent (alternatively, just use state=absent with the name including
+ the generated hash and append_hash=no)
+ type: bool
+ version_added: "2.8"
+ apply:
+ description:
+ - C(apply) compares the desired resource definition with the previously supplied resource definition,
+ ignoring properties that are automatically generated
+ - C(apply) works better with Services than 'force=yes'
+ - mutually exclusive with C(merge_type)
+ type: bool
+ version_added: "2.9"
+
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+- name: Create a k8s namespace
+ k8s:
+ name: testing
+ api_version: v1
+ kind: Namespace
+ state: present
+
+- name: Create a Service object from an inline definition
+ k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: web
+ namespace: testing
+ labels:
+ app: galaxy
+ service: web
+ spec:
+ selector:
+ app: galaxy
+ service: web
+ ports:
+ - protocol: TCP
+ targetPort: 8000
+ name: port-8000-tcp
+ port: 8000
+
+- name: Remove an existing Service object
+ k8s:
+ state: absent
+ api_version: v1
+ kind: Service
+ namespace: testing
+ name: web
+
+# Passing the object definition from a file
+
+- name: Create a Deployment by reading the definition from a local file
+ k8s:
+ state: present
+ src: /testing/deployment.yml
+
+- name: >-
+ Read definition file from the Ansible controller file system.
+ If the definition file has been encrypted with Ansible Vault it will automatically be decrypted.
+ k8s:
+ state: present
+ definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
+
+- name: Read definition file from the Ansible controller file system after Jinja templating
+ k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+
+- name: fail on validation errors
+ k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+ validate:
+ fail_on_error: yes
+
+- name: warn on validation errors, check for unexpected properties
+ k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+ validate:
+ fail_on_error: no
+ strict: yes
+'''
+
+RETURN = '''
+result:
+ description:
+ - The created, patched, or otherwise present object. Will be empty in the case of a deletion.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: complex
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+ items:
+ description: Returned only when multiple yaml documents are passed to src or resource_definition
+ returned: when resource_definition or src contains list of objects
+ type: list
+ duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+'''
+
+from ansible.module_utils.k8s.raw import KubernetesRawModule
+
+
+def main():
+ KubernetesRawModule().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/k8s_info.py b/test/support/integration/plugins/modules/k8s_info.py
new file mode 100644
index 0000000000..99a8fd8cec
--- /dev/null
+++ b/test/support/integration/plugins/modules/k8s_info.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Will Thames <@willthames>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: k8s_info
+
+short_description: Describe Kubernetes (K8s) objects
+
+version_added: "2.7"
+
+author:
+ - "Will Thames (@willthames)"
+
+description:
+ - Use the OpenShift Python client to perform read operations on K8s objects.
+ - Access to the full range of K8s APIs.
+ - Authenticate using either a config file, certificates, password or token.
+ - Supports check mode.
+ - This module was called C(k8s_facts) before Ansible 2.9. The usage did not change.
+
+options:
+ api_version:
+ description:
+ - Use to specify the API version. in conjunction with I(kind), I(name), and I(namespace) to identify a
+ specific object.
+ default: v1
+ aliases:
+ - api
+ - version
+ kind:
+ description:
+ - Use to specify an object model. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a
+ specific object.
+ required: yes
+ name:
+ description:
+ - Use to specify an object name. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a
+ specific object.
+ namespace:
+ description:
+ - Use to specify an object namespace. Use in conjunction with I(api_version), I(kind), and I(name)
+ to identify a specific object.
+ label_selectors:
+ description: List of label selectors to use to filter results
+ field_selectors:
+ description: List of field selectors to use to filter results
+
+extends_documentation_fragment:
+ - k8s_auth_options
+
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+- name: Get an existing Service object
+ k8s_info:
+ api_version: v1
+ kind: Service
+ name: web
+ namespace: testing
+ register: web_service
+
+- name: Get a list of all service objects
+ k8s_info:
+ api_version: v1
+ kind: Service
+ namespace: testing
+ register: service_list
+
+- name: Get a list of all pods from any namespace
+ k8s_info:
+ kind: Pod
+ register: pod_list
+
+- name: Search for all Pods labelled app=web
+ k8s_info:
+ kind: Pod
+ label_selectors:
+ - app = web
+ - tier in (dev, test)
+
+- name: Search for all running pods
+ k8s_info:
+ kind: Pod
+ field_selectors:
+ - status.phase=Running
+'''
+
+RETURN = '''
+resources:
+ description:
+ - The object(s) that exists
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: dict
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: dict
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: dict
+'''
+
+
+from ansible.module_utils.k8s.common import KubernetesAnsibleModule, AUTH_ARG_SPEC
+import copy
+
+
+class KubernetesInfoModule(KubernetesAnsibleModule):
+
+ def __init__(self, *args, **kwargs):
+ KubernetesAnsibleModule.__init__(self, *args,
+ supports_check_mode=True,
+ **kwargs)
+ if self._name == 'k8s_facts':
+ self.deprecate("The 'k8s_facts' module has been renamed to 'k8s_info'", version='2.13')
+
+ def execute_module(self):
+ self.client = self.get_api_client()
+
+ self.exit_json(changed=False,
+ **self.kubernetes_facts(self.params['kind'],
+ self.params['api_version'],
+ self.params['name'],
+ self.params['namespace'],
+ self.params['label_selectors'],
+ self.params['field_selectors']))
+
+ @property
+ def argspec(self):
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+ args.update(
+ dict(
+ kind=dict(required=True),
+ api_version=dict(default='v1', aliases=['api', 'version']),
+ name=dict(),
+ namespace=dict(),
+ label_selectors=dict(type='list', default=[]),
+ field_selectors=dict(type='list', default=[]),
+ )
+ )
+ return args
+
+
+def main():
+ KubernetesInfoModule().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/nios_txt_record.py b/test/support/integration/plugins/modules/nios_txt_record.py
new file mode 100644
index 0000000000..b9e63dfc6e
--- /dev/null
+++ b/test/support/integration/plugins/modules/nios_txt_record.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+---
+module: nios_txt_record
+version_added: "2.7"
+author: "Corey Wanless (@coreywan)"
+short_description: Configure Infoblox NIOS txt records
+description:
+ - Adds and/or removes instances of txt record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:txt) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment: nios
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this tst record with. The DNS
+ view must already be configured on the system
+ required: true
+ default: default
+ aliases:
+ - dns_view
+ text:
+ description:
+ - Text associated with the record. It can contain up to 255 bytes
+ per substring, up to a total of 512 bytes. To enter leading,
+ trailing, or embedded spaces in the text, add quotes around the
+ text to preserve the spaces.
+ required: true
+ ttl:
+ description:
+ - Configures the TTL to be associated with this tst record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+ - name: Ensure a text Record Exists
+ nios_txt_record:
+ name: fqdn.txt.record.com
+ text: mytext
+ state: present
+ view: External
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+
+ - name: Ensure a text Record does not exist
+ nios_txt_record:
+ name: fqdn.txt.record.com
+ text: mytext
+ state: absent
+ view: External
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.net_tools.nios.api import WapiModule
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+ text=dict(ib_req=True),
+ ttl=dict(type='int'),
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run('record:txt', ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/nios_zone.py b/test/support/integration/plugins/modules/nios_zone.py
new file mode 100644
index 0000000000..0ffb2ff0a4
--- /dev/null
+++ b/test/support/integration/plugins/modules/nios_zone.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+---
+module: nios_zone
+version_added: "2.5"
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS DNS zones
+description:
+ - Adds and/or removes instances of DNS zone objects from
+ Infoblox NIOS servers. This module manages NIOS C(zone_auth) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment: nios
+options:
+ fqdn:
+ description:
+ - Specifies the qualified domain name to either add or remove from
+ the NIOS instance based on the configured C(state) value.
+ required: true
+ aliases:
+ - name
+ view:
+ description:
+ - Configures the DNS view name for the configured resource. The
+ specified DNS zone must already exist on the running NIOS instance
+ prior to configuring zones.
+ required: true
+ default: default
+ aliases:
+ - dns_view
+ grid_primary:
+ description:
+ - Configures the grid primary servers for this zone.
+ suboptions:
+ name:
+ description:
+ - The name of the grid primary server
+ grid_secondaries:
+ description:
+ - Configures the grid secondary servers for this zone.
+ suboptions:
+ name:
+ description:
+ - The name of the grid secondary server
+ ns_group:
+ version_added: "2.6"
+ description:
+ - Configures the name server group for this zone. Name server group is
+ mutually exclusive with grid primary and grid secondaries.
+ restart_if_needed:
+ version_added: "2.6"
+ description:
+ - If set to true, causes the NIOS DNS service to restart and load the
+ new zone configuration
+ type: bool
+ zone_format:
+ version_added: "2.7"
+ description:
+ - Create an authorative Reverse-Mapping Zone which is an area of network
+ space for which one or more name servers-primary and secondary-have the
+ responsibility to respond to address-to-name queries. It supports
+ reverse-mapping zones for both IPv4 and IPv6 addresses.
+ default: FORWARD
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: configure a zone on the system using grid primary and secondaries
+ nios_zone:
+ name: ansible.com
+ grid_primary:
+ - name: gridprimary.grid.com
+ grid_secondaries:
+ - name: gridsecondary1.grid.com
+ - name: gridsecondary2.grid.com
+ restart_if_needed: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: configure a zone on the system using a name server group
+ nios_zone:
+ name: ansible.com
+ ns_group: examplensg
+ restart_if_needed: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: configure a reverse mapping zone on the system using IPV4 zone format
+ nios_zone:
+ name: 10.10.10.0/24
+ zone_format: IPV4
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: configure a reverse mapping zone on the system using IPV6 zone format
+ nios_zone:
+ name: 100::1/128
+ zone_format: IPV6
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: update the comment and ext attributes for an existing zone
+ nios_zone:
+ name: ansible.com
+ comment: this is an example comment
+ extattrs:
+ Site: west-dc
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: remove the dns zone
+ nios_zone:
+ name: ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: remove the reverse mapping dns zone from the system with IPV4 zone format
+ nios_zone:
+ name: 10.10.10.0/24
+ zone_format: IPV4
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.net_tools.nios.api import WapiModule
+from ansible.module_utils.net_tools.nios.api import NIOS_ZONE
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ grid_spec = dict(
+ name=dict(required=True),
+ )
+
+ ib_spec = dict(
+ fqdn=dict(required=True, aliases=['name'], ib_req=True, update=False),
+ zone_format=dict(default='FORWARD', aliases=['zone_format'], ib_req=False),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ grid_primary=dict(type='list', elements='dict', options=grid_spec),
+ grid_secondaries=dict(type='list', elements='dict', options=grid_spec),
+ ns_group=dict(),
+ restart_if_needed=dict(type='bool'),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['ns_group', 'grid_primary'],
+ ['ns_group', 'grid_secondaries']
+ ])
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_ZONE, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/python_requirements_info.py b/test/support/integration/plugins/modules/python_requirements_info.py
new file mode 100644
index 0000000000..aa9e70ec86
--- /dev/null
+++ b/test/support/integration/plugins/modules/python_requirements_info.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+module: python_requirements_info
+short_description: Show python path and assert dependency versions
+description:
+ - Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
+ - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
+version_added: "2.7"
+options:
+ dependencies:
+ description: >
+ A list of version-likes or module names to check for installation.
+ Supported operators: <, >, <=, >=, or ==. The bare module name like
+ I(ansible), the module with a specific version like I(boto3==1.6.1), or a
+ partial version like I(requests>2) are all valid specifications.
+author:
+- Will Thames (@willthames)
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = '''
+- name: show python lib/site paths
+ python_requirements_info:
+- name: check for modern boto3 and botocore versions
+ python_requirements_info:
+ dependencies:
+ - boto3>1.6
+ - botocore<2
+'''
+
+RETURN = '''
+python:
+ description: path to python version used
+ returned: always
+ type: str
+ sample: /usr/local/opt/python@2/bin/python2.7
+python_version:
+ description: version of python
+ returned: always
+ type: str
+ sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
+python_system_path:
+ description: List of paths python is looking for modules in
+ returned: always
+ type: list
+ sample:
+ - /usr/local/opt/python@2/site-packages/
+ - /usr/lib/python/site-packages/
+ - /usr/lib/python/site-packages/
+valid:
+ description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
+ returned: always
+ type: dict
+ sample:
+ boto3:
+ desired: null
+ installed: 1.7.60
+ botocore:
+ desired: botocore<2
+ installed: 1.10.60
+mismatched:
+ description: A dictionary of dependencies that did not satisfy the desired version
+ returned: always
+ type: dict
+ sample:
+ botocore:
+ desired: botocore>2
+ installed: 1.10.60
+not_found:
+ description: A list of packages that could not be imported at all, and are not installed
+ returned: always
+ type: list
+ sample:
+ - boto4
+ - requests
+'''
+
+import re
+import sys
+import operator
+
+HAS_DISTUTILS = False
+try:
+ import pkg_resources
+ from distutils.version import LooseVersion
+ HAS_DISTUTILS = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+
+operations = {
+ '<=': operator.le,
+ '>=': operator.ge,
+ '<': operator.lt,
+ '>': operator.gt,
+ '==': operator.eq,
+}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dependencies=dict(type='list')
+ ),
+ supports_check_mode=True,
+ )
+ if module._name == 'python_requirements_facts':
+ module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'", version='2.13')
+ if not HAS_DISTUTILS:
+ module.fail_json(
+ msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ )
+ pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
+
+ results = dict(
+ not_found=[],
+ mismatched={},
+ valid={},
+ )
+
+ for dep in (module.params.get('dependencies') or []):
+ match = pkg_dep_re.match(dep)
+ if match is None:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
+ pkg, op, version = match.groups()
+ if op is not None and op not in operations:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
+ try:
+ existing = pkg_resources.get_distribution(pkg).version
+ except pkg_resources.DistributionNotFound:
+ # not there
+ results['not_found'].append(pkg)
+ continue
+ if op is None and version is None:
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': None,
+ }
+ elif operations[op](LooseVersion(existing), LooseVersion(version)):
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+ else:
+ results['mismatched'] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+
+ module.exit_json(
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/s3_bucket.py b/test/support/integration/plugins/modules/s3_bucket.py
new file mode 100644
index 0000000000..f35cf53b5e
--- /dev/null
+++ b/test/support/integration/plugins/modules/s3_bucket.py
@@ -0,0 +1,740 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: s3_bucket
+short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
+description:
+ - Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
+version_added: "2.0"
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ force:
+ description:
+ - When trying to delete a bucket, delete all keys (including versions and delete markers)
+ in the bucket first (an s3 bucket must be empty for a successful deletion)
+ type: bool
+ default: 'no'
+ name:
+ description:
+ - Name of the s3 bucket
+ required: true
+ type: str
+ policy:
+ description:
+ - The JSON policy as a string.
+ type: json
+ s3_url:
+ description:
+ - S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and fakes3 etc.
+ - Assumes AWS if not specified.
+ - For Walrus, use FQDN of the endpoint without scheme nor path.
+ aliases: [ S3_URL ]
+ type: str
+ ceph:
+ description:
+ - Enable API compatibility with Ceph. It takes into account the S3 API subset working
+ with Ceph in order to provide the same module behaviour where possible.
+ type: bool
+ version_added: "2.2"
+ requester_pays:
+ description:
+ - With Requester Pays buckets, the requester instead of the bucket owner pays the cost
+ of the request and the data download from the bucket.
+ type: bool
+ default: False
+ state:
+ description:
+ - Create or remove the s3 bucket
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ tags:
+ description:
+ - tags dict to apply to bucket
+ type: dict
+ purge_tags:
+ description:
+ - whether to remove tags that aren't present in the C(tags) parameter
+ type: bool
+ default: True
+ version_added: "2.9"
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
+ type: bool
+ encryption:
+ description:
+ - Describes the default server-side encryption to apply to new objects in the bucket.
+ In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly.
+ choices: [ 'none', 'AES256', 'aws:kms' ]
+ version_added: "2.9"
+ type: str
+ encryption_key_id:
+ description: KMS master key ID to use for the default encryption. This parameter is allowed if encryption is aws:kms. If
+ not specified then it will default to the AWS provided KMS key.
+ version_added: "2.9"
+ type: str
+extends_documentation_fragment:
+ - aws
+ - ec2
+notes:
+ - If C(requestPayment), C(policy), C(tagging) or C(versioning)
+ operations/API aren't implemented by the endpoint, module doesn't fail
+ if each parameter satisfies the following condition.
+ I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None).
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create a simple s3 bucket
+- s3_bucket:
+ name: mys3bucket
+ state: present
+
+# Create a simple s3 bucket on Ceph Rados Gateway
+- s3_bucket:
+ name: mys3bucket
+ s3_url: http://your-ceph-rados-gateway-server.xxx
+ ceph: true
+
+# Remove an s3 bucket and any keys it contains
+- s3_bucket:
+ name: mys3bucket
+ state: absent
+ force: yes
+
+# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
+- s3_bucket:
+ name: mys3bucket
+ policy: "{{ lookup('file','policy.json') }}"
+ requester_pays: yes
+ versioning: yes
+ tags:
+ example: tag1
+ another: tag2
+
+# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint
+- s3_bucket:
+ name: mydobucket
+ s3_url: 'https://nyc3.digitaloceanspaces.com'
+
+# Create a bucket with AES256 encryption
+- s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "AES256"
+
+# Create a bucket with aws:kms encryption, KMS key
+- s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "aws:kms"
+ encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example"
+
+# Create a bucket with aws:kms encryption, default key
+- s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "aws:kms"
+'''
+
+import json
+import os
+import time
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.six import string_types
+from ansible.module_utils.basic import to_text
+from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
+from ansible.module_utils.ec2 import compare_policies, ec2_argument_spec, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
+from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, AWSRetry
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+def create_or_update_bucket(s3_client, module, location):
+
+ policy = module.params.get("policy")
+ name = module.params.get("name")
+ requester_pays = module.params.get("requester_pays")
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ versioning = module.params.get("versioning")
+ encryption = module.params.get("encryption")
+ encryption_key_id = module.params.get("encryption_key_id")
+ changed = False
+ result = {}
+
+ try:
+ bucket_is_present = bucket_exists(s3_client, name)
+ except EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check bucket presence")
+
+ if not bucket_is_present:
+ try:
+ bucket_changed = create_bucket(s3_client, name, location)
+ s3_client.get_waiter('bucket_exists').wait(Bucket=name)
+ changed = changed or bucket_changed
+ except WaiterError as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available')
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while creating bucket")
+
+ # Versioning
+ try:
+ versioning_status = get_bucket_versioning(s3_client, name)
+ except BotoCoreError as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket versioning")
+ except ClientError as exp:
+ if exp.response['Error']['Code'] != 'NotImplemented' or versioning is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket versioning")
+ else:
+ if versioning is not None:
+ required_versioning = None
+ if versioning and versioning_status.get('Status') != "Enabled":
+ required_versioning = 'Enabled'
+ elif not versioning and versioning_status.get('Status') == "Enabled":
+ required_versioning = 'Suspended'
+
+ if required_versioning:
+ try:
+ put_bucket_versioning(s3_client, name, required_versioning)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket versioning")
+
+ versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
+
+ # This output format is there to ensure compatibility with previous versions of the module
+ result['versioning'] = {
+ 'Versioning': versioning_status.get('Status', 'Disabled'),
+ 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'),
+ }
+
+ # Requester pays
+ try:
+ requester_pays_status = get_bucket_request_payment(s3_client, name)
+ except BotoCoreError as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket request payment")
+ except ClientError as exp:
+ if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or requester_pays:
+ module.fail_json_aws(exp, msg="Failed to get bucket request payment")
+ else:
+ if requester_pays:
+ payer = 'Requester' if requester_pays else 'BucketOwner'
+ if requester_pays_status != payer:
+ put_bucket_request_payment(s3_client, name, payer)
+ requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False)
+ if requester_pays_status is None:
+ # We have seen that it happens quite a lot of times that the put request was not taken into
+ # account, so we retry one more time
+ put_bucket_request_payment(s3_client, name, payer)
+ requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
+ changed = True
+
+ result['requester_pays'] = requester_pays
+
+ # Policy
+ try:
+ current_policy = get_bucket_policy(s3_client, name)
+ except BotoCoreError as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket policy")
+ except ClientError as exp:
+ if exp.response['Error']['Code'] != 'NotImplemented' or policy is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket policy")
+ else:
+ if policy is not None:
+ if isinstance(policy, string_types):
+ policy = json.loads(policy)
+
+ if not policy and current_policy:
+ try:
+ delete_bucket_policy(s3_client, name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket policy")
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy)
+ changed = True
+ elif compare_policies(current_policy, policy):
+ try:
+ put_bucket_policy(s3_client, name, policy)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket policy")
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False)
+ if current_policy is None:
+ # As for request payement, it happens quite a lot of times that the put request was not taken into
+ # account, so we retry one more time
+ put_bucket_policy(s3_client, name, policy)
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
+ changed = True
+
+ result['policy'] = current_policy
+
+ # Tags
+ try:
+ current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
+ except BotoCoreError as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket tags")
+ except ClientError as exp:
+ if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or tags is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket tags")
+ else:
+ if tags is not None:
+ # Tags are always returned as text
+ tags = dict((to_text(k), to_text(v)) for k, v in tags.items())
+ if not purge_tags:
+ # Ensure existing tags that aren't updated by desired tags remain
+ current_copy = current_tags_dict.copy()
+ current_copy.update(tags)
+ tags = current_copy
+ if current_tags_dict != tags:
+ if tags:
+ try:
+ put_bucket_tagging(s3_client, name, tags)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket tags")
+ else:
+ if purge_tags:
+ try:
+ delete_bucket_tagging(s3_client, name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket tags")
+ current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags)
+ changed = True
+
+ result['tags'] = current_tags_dict
+
+ # Encryption
+ if hasattr(s3_client, "get_bucket_encryption"):
+ try:
+ current_encryption = get_bucket_encryption(s3_client, name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket encryption")
+ elif encryption is not None:
+ module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41")
+
+ if encryption is not None:
+ current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None
+ current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None
+ if encryption == 'none' and current_encryption_algorithm is not None:
+ try:
+ delete_bucket_encryption(s3_client, name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket encryption")
+ current_encryption = wait_encryption_is_applied(module, s3_client, name, None)
+ changed = True
+ elif encryption != 'none' and (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id):
+ expected_encryption = {'SSEAlgorithm': encryption}
+ if encryption == 'aws:kms' and encryption_key_id is not None:
+ expected_encryption.update({'KMSMasterKeyID': encryption_key_id})
+ try:
+ put_bucket_encryption(s3_client, name, expected_encryption)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to set bucket encryption")
+ current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption)
+ changed = True
+
+ result['encryption'] = current_encryption
+
+ module.exit_json(changed=changed, name=name, **result)
+
+
+def bucket_exists(s3_client, bucket_name):
+ # head_bucket appeared to be really inconsistent, so we use list_buckets instead,
+ # and loop over all the buckets, even if we know it's less performant :(
+ all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets']
+ return any(bucket['Name'] == bucket_name for bucket in all_buckets)
+
+
+@AWSRetry.exponential_backoff(max_delay=120)
+def create_bucket(s3_client, bucket_name, location):
+ try:
+ configuration = {}
+ if location not in ('us-east-1', None):
+ configuration['LocationConstraint'] = location
+ if len(configuration) > 0:
+ s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
+ else:
+ s3_client.create_bucket(Bucket=bucket_name)
+ return True
+ except ClientError as e:
+ error_code = e.response['Error']['Code']
+ if error_code == 'BucketAlreadyOwnedByYou':
+ # We should never get there since we check the bucket presence before calling the create_or_update_bucket
+ # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception
+ return False
+ else:
+ raise e
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_tagging(s3_client, bucket_name, tags):
+ s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_policy(s3_client, bucket_name, policy):
+ s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def delete_bucket_policy(s3_client, bucket_name):
+ s3_client.delete_bucket_policy(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def get_bucket_policy(s3_client, bucket_name):
+ try:
+ current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy'))
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchBucketPolicy':
+ current_policy = None
+ else:
+ raise e
+ return current_policy
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_request_payment(s3_client, bucket_name, payer):
+ s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def get_bucket_request_payment(s3_client, bucket_name):
+ return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer')
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def get_bucket_versioning(s3_client, bucket_name):
+ return s3_client.get_bucket_versioning(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_versioning(s3_client, bucket_name, required_versioning):
+ s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def get_bucket_encryption(s3_client, bucket_name):
+ try:
+ result = s3_client.get_bucket_encryption(Bucket=bucket_name)
+ return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault')
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
+ return None
+ else:
+ raise e
+ except (IndexError, KeyError):
+ return None
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_encryption(s3_client, bucket_name, encryption):
+ server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]}
+ s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def delete_bucket_tagging(s3_client, bucket_name):
+ s3_client.delete_bucket_tagging(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def delete_bucket_encryption(s3_client, bucket_name):
+ s3_client.delete_bucket_encryption(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120)
+def delete_bucket(s3_client, bucket_name):
+ try:
+ s3_client.delete_bucket(Bucket=bucket_name)
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchBucket':
+ # This means bucket should have been in a deleting state when we checked it existence
+ # We just ignore the error
+ pass
+ else:
+ raise e
+
+
+def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
+ for dummy in range(0, 12):
+ try:
+ current_policy = get_bucket_policy(s3_client, bucket_name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket policy")
+
+ if compare_policies(current_policy, expected_policy):
+ time.sleep(5)
+ else:
+ return current_policy
+ if should_fail:
+ module.fail_json(msg="Bucket policy failed to apply in the expected time")
+ else:
+ return None
+
+
+def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
+ for dummy in range(0, 12):
+ try:
+ requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket request payment")
+ if requester_pays_status != expected_payer:
+ time.sleep(5)
+ else:
+ return requester_pays_status
+ if should_fail:
+ module.fail_json(msg="Bucket request payment failed to apply in the expected time")
+ else:
+ return None
+
+
+def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption):
+ for dummy in range(0, 12):
+ try:
+ encryption = get_bucket_encryption(s3_client, bucket_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get updated encryption for bucket")
+ if encryption != expected_encryption:
+ time.sleep(5)
+ else:
+ return encryption
+ module.fail_json(msg="Bucket encryption failed to apply in the expected time")
+
+
+def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
+ for dummy in range(0, 24):
+ try:
+ versioning_status = get_bucket_versioning(s3_client, bucket_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get updated versioning for bucket")
+ if versioning_status.get('Status') != required_versioning:
+ time.sleep(8)
+ else:
+ return versioning_status
+ module.fail_json(msg="Bucket versioning failed to apply in the expected time")
+
+
+def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
+ for dummy in range(0, 12):
+ try:
+ current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket policy")
+ if current_tags_dict != expected_tags_dict:
+ time.sleep(5)
+ else:
+ return current_tags_dict
+ module.fail_json(msg="Bucket tags failed to apply in the expected time")
+
+
+def get_current_bucket_tags_dict(s3_client, bucket_name):
+ try:
+ current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet')
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchTagSet':
+ return {}
+ raise e
+
+ return boto3_tag_list_to_ansible_dict(current_tags)
+
+
+def paginated_list(s3_client, **pagination_params):
+ pg = s3_client.get_paginator('list_objects_v2')
+ for page in pg.paginate(**pagination_params):
+ yield [data['Key'] for data in page.get('Contents', [])]
+
+
+def paginated_versions_list(s3_client, **pagination_params):
+ try:
+ pg = s3_client.get_paginator('list_object_versions')
+ for page in pg.paginate(**pagination_params):
+ # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion
+ yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))]
+ except is_boto3_error_code('NoSuchBucket'):
+ yield []
+
+
+def destroy_bucket(s3_client, module):
+
+ force = module.params.get("force")
+ name = module.params.get("name")
+ try:
+ bucket_is_present = bucket_exists(s3_client, name)
+ except EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check bucket presence")
+
+ if not bucket_is_present:
+ module.exit_json(changed=False)
+
+ if force:
+ # if there are contents then we need to delete them (including versions) before we can delete the bucket
+ try:
+ for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
+ formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs]
+ for fk in formatted_keys:
+ # remove VersionId from cases where they are `None` so that
+ # unversioned objects are deleted using `DeleteObject`
+ # rather than `DeleteObjectVersion`, improving backwards
+ # compatibility with older IAM policies.
+ if not fk.get('VersionId'):
+ fk.pop('VersionId')
+
+ if formatted_keys:
+ resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys})
+ if resp.get('Errors'):
+ module.fail_json(
+ msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format(
+ ', '.join([k['Key'] for k in resp['Errors']])
+ ),
+ errors=resp['Errors'], response=resp
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while deleting bucket")
+
+ try:
+ delete_bucket(s3_client, name)
+ s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60))
+ except WaiterError as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.')
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket")
+
+ module.exit_json(changed=True)
+
+
+def is_fakes3(s3_url):
+ """ Return True if s3_url has scheme fakes3:// """
+ if s3_url is not None:
+ return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
+ if s3_url and ceph: # TODO - test this
+ ceph = urlparse(s3_url)
+ params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ elif is_fakes3(s3_url):
+ fakes3 = urlparse(s3_url)
+ port = fakes3.port
+ if fakes3.scheme == 'fakes3s':
+ protocol = "https"
+ if port is None:
+ port = 443
+ else:
+ protocol = "http"
+ if port is None:
+ port = 80
+ params = dict(module=module, conn_type='client', resource='s3', region=location,
+ endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
+ use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
+ else:
+ params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ return boto3_conn(**params)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ force=dict(default=False, type='bool'),
+ policy=dict(type='json'),
+ name=dict(required=True),
+ requester_pays=dict(default=False, type='bool'),
+ s3_url=dict(aliases=['S3_URL']),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=True),
+ versioning=dict(type='bool'),
+ ceph=dict(default=False, type='bool'),
+ encryption=dict(choices=['none', 'AES256', 'aws:kms']),
+ encryption_key_id=dict()
+ )
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ )
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+
+ if region in ('us-east-1', '', None):
+ # default to US Standard region
+ location = 'us-east-1'
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+
+ s3_url = module.params.get('s3_url')
+ ceph = module.params.get('ceph')
+
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not s3_url and 'S3_URL' in os.environ:
+ s3_url = os.environ['S3_URL']
+
+ if ceph and not s3_url:
+ module.fail_json(msg='ceph flavour requires s3_url')
+
+ # Look at s3_url and tweak connection settings
+ # if connecting to Ceph RGW, Walrus or fakes3
+ if s3_url:
+ for key in ['validate_certs', 'security_token', 'profile_name']:
+ aws_connect_kwargs.pop(key, None)
+ s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)
+
+ if s3_client is None: # this should never happen
+ module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')
+
+ state = module.params.get("state")
+ encryption = module.params.get("encryption")
+ encryption_key_id = module.params.get("encryption_key_id")
+
+ # Parameter validation
+ if encryption_key_id is not None and encryption is None:
+ module.fail_json(msg="You must specify encryption parameter along with encryption_key_id.")
+ elif encryption_key_id is not None and encryption != 'aws:kms':
+ module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")
+
+ if state == 'present':
+ create_or_update_bucket(s3_client, module, location)
+ elif state == 'absent':
+ destroy_bucket(s3_client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/sts_assume_role.py b/test/support/integration/plugins/modules/sts_assume_role.py
new file mode 100644
index 0000000000..cd82a549cb
--- /dev/null
+++ b/test/support/integration/plugins/modules/sts_assume_role.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: sts_assume_role
+short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
+description:
+ - Assume a role using AWS Security Token Service and obtain temporary credentials.
+version_added: "2.0"
+author:
+ - Boris Ekelchik (@bekelchik)
+ - Marek Piatek (@piontas)
+options:
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the role that the caller is
+ assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs).
+ required: true
+ type: str
+ role_session_name:
+ description:
+ - Name of the role's session - will be used by CloudTrail.
+ required: true
+ type: str
+ policy:
+ description:
+ - Supplemental policy to use in addition to assumed role's policies.
+ type: str
+ duration_seconds:
+ description:
+ - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours).
+ - The max depends on the IAM role's sessions duration setting.
+ - By default, the value is set to 3600 seconds.
+ type: int
+ external_id:
+ description:
+ - A unique identifier that is used by third parties to assume a role in their customers' accounts.
+ type: str
+ mfa_serial_number:
+ description:
+ - The identification number of the MFA device that is associated with the user who is making the AssumeRole call.
+ type: str
+ mfa_token:
+ description:
+ - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
+ type: str
+notes:
+ - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token.
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements:
+ - boto3
+ - botocore
+ - python >= 2.6
+'''
+
+RETURN = '''
+sts_creds:
+ description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token
+ returned: always
+ type: dict
+ sample:
+ access_key: XXXXXXXXXXXXXXXXXXXX
+ expiration: 2017-11-11T11:11:11+00:00
+ secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+sts_user:
+ description: The Amazon Resource Name (ARN) and the assumed role ID
+ returned: always
+ type: dict
+ sample:
+ assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob
+ arn: ARO123EXAMPLE123:Bob
+changed:
+ description: True if obtaining the credentials succeeds
+ type: bool
+ returned: always
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
+- sts_assume_role:
+ role_arn: "arn:aws:iam::123456789012:role/someRole"
+ role_session_name: "someRoleSession"
+ register: assumed_role
+
+# Use the assumed role above to tag an instance in account 123456789012
+- ec2_tag:
+ aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
+ aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
+ security_token: "{{ assumed_role.sts_creds.session_token }}"
+ resource: i-xyzxyz01
+ state: present
+ tags:
+ MyNewTag: value
+
+'''
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, ParamValidationError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def _parse_response(response):
+ credentials = response.get('Credentials', {})
+ user = response.get('AssumedRoleUser', {})
+
+ sts_cred = {
+ 'access_key': credentials.get('AccessKeyId'),
+ 'secret_key': credentials.get('SecretAccessKey'),
+ 'session_token': credentials.get('SessionToken'),
+ 'expiration': credentials.get('Expiration')
+
+ }
+ sts_user = camel_dict_to_snake_dict(user)
+ return sts_cred, sts_user
+
+
+def assume_role_policy(connection, module):
+ params = {
+ 'RoleArn': module.params.get('role_arn'),
+ 'RoleSessionName': module.params.get('role_session_name'),
+ 'Policy': module.params.get('policy'),
+ 'DurationSeconds': module.params.get('duration_seconds'),
+ 'ExternalId': module.params.get('external_id'),
+ 'SerialNumber': module.params.get('mfa_serial_number'),
+ 'TokenCode': module.params.get('mfa_token')
+ }
+ changed = False
+
+ kwargs = dict((k, v) for k, v in params.items() if v is not None)
+
+ try:
+ response = connection.assume_role(**kwargs)
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json_aws(e)
+
+ sts_cred, sts_user = _parse_response(response)
+ module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user)
+
+
+def main():
+ argument_spec = dict(
+ role_arn=dict(required=True),
+ role_session_name=dict(required=True),
+ duration_seconds=dict(required=False, default=None, type='int'),
+ external_id=dict(required=False, default=None),
+ policy=dict(required=False, default=None),
+ mfa_serial_number=dict(required=False, default=None),
+ mfa_token=dict(required=False, default=None)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ connection = module.client('sts')
+
+ assume_role_policy(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/tower_credential_type.py b/test/support/integration/plugins/modules/tower_credential_type.py
new file mode 100644
index 0000000000..831a35ad3f
--- /dev/null
+++ b/test/support/integration/plugins/modules/tower_credential_type.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+#
+# (c) 2018, Adrien Fleury <fleu42@gmail.com>
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'metadata_version': '1.1'}
+
+
+DOCUMENTATION = '''
+---
+module: tower_credential_type
+author: "Adrien Fleury (@fleu42)"
+version_added: "2.7"
+short_description: Create, update, or destroy custom Ansible Tower credential type.
+description:
+ - Create, update, or destroy Ansible Tower credential type. See
+ U(https://www.ansible.com/tower) for an overview.
+options:
+ name:
+ description:
+ - The name of the credential type.
+ required: True
+ description:
+ description:
+ - The description of the credential type to give more detail about it.
+ required: False
+ kind:
+ description:
+ - >-
+ The type of credential type being added. Note that only cloud and
+ net can be used for creating credential types. Refer to the Ansible
+ for more information.
+ choices: [ 'ssh', 'vault', 'net', 'scm', 'cloud', 'insights' ]
+ required: False
+ inputs:
+ description:
+ - >-
+ Enter inputs using either JSON or YAML syntax. Refer to the Ansible
+ Tower documentation for example syntax.
+ required: False
+ injectors:
+ description:
+ - >-
+ Enter injectors using either JSON or YAML syntax. Refer to the
+ Ansible Tower documentation for example syntax.
+ required: False
+ state:
+ description:
+ - Desired state of the resource.
+ required: False
+ default: "present"
+ choices: ["present", "absent"]
+ validate_certs:
+ description:
+ - Tower option to avoid certificates check.
+ required: False
+ type: bool
+ aliases: [ tower_verify_ssl ]
+extends_documentation_fragment: tower
+'''
+
+
+EXAMPLES = '''
+- tower_credential_type:
+ name: Nexus
+ description: Credentials type for Nexus
+ kind: cloud
+ inputs: "{{ lookup('file', 'tower_credential_inputs_nexus.json') }}"
+ injectors: {'extra_vars': {'nexus_credential': 'test' }}
+ state: present
+ validate_certs: false
+
+- tower_credential_type:
+ name: Nexus
+ state: absent
+'''
+
+
+RETURN = ''' # '''
+
+
+from ansible.module_utils.ansible_tower import (
+ TowerModule,
+ tower_auth_config,
+ tower_check_mode
+)
+
+try:
+ import tower_cli
+ import tower_cli.exceptions as exc
+ from tower_cli.conf import settings
+except ImportError:
+ pass
+
+
+KIND_CHOICES = {
+ 'ssh': 'Machine',
+ 'vault': 'Ansible Vault',
+ 'net': 'Network',
+ 'scm': 'Source Control',
+ 'cloud': 'Lots of others',
+ 'insights': 'Insights'
+}
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(required=False),
+ kind=dict(required=False, choices=KIND_CHOICES.keys()),
+ inputs=dict(type='dict', required=False),
+ injectors=dict(type='dict', required=False),
+ state=dict(choices=['present', 'absent'], default='present'),
+ )
+
+ module = TowerModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False
+ )
+
+ name = module.params.get('name')
+ kind = module.params.get('kind')
+ state = module.params.get('state')
+
+ json_output = {'credential_type': name, 'state': state}
+
+ tower_auth = tower_auth_config(module)
+ with settings.runtime_values(**tower_auth):
+ tower_check_mode(module)
+ credential_type_res = tower_cli.get_resource('credential_type')
+
+ params = {}
+ params['name'] = name
+ params['kind'] = kind
+ params['managed_by_tower'] = False
+
+ if module.params.get('description'):
+ params['description'] = module.params.get('description')
+
+ if module.params.get('inputs'):
+ params['inputs'] = module.params.get('inputs')
+
+ if module.params.get('injectors'):
+ params['injectors'] = module.params.get('injectors')
+
+ try:
+ if state == 'present':
+ params['create_on_missing'] = True
+ result = credential_type_res.modify(**params)
+ json_output['id'] = result['id']
+ elif state == 'absent':
+ params['fail_on_missing'] = False
+ result = credential_type_res.delete(**params)
+
+ except (exc.ConnectionError, exc.BadRequest, exc.AuthError) as excinfo:
+ module.fail_json(
+ msg='Failed to update credential type: {0}'.format(excinfo),
+ changed=False
+ )
+
+ json_output['changed'] = result['changed']
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/tower_receive.py b/test/support/integration/plugins/modules/tower_receive.py
new file mode 100644
index 0000000000..57fdd16df4
--- /dev/null
+++ b/test/support/integration/plugins/modules/tower_receive.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2017, John Westcott IV <john.westcott.iv@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: tower_receive
+author: "John Westcott IV (@john-westcott-iv)"
+version_added: "2.8"
+short_description: Receive assets from Ansible Tower.
+description:
+ - Receive assets from Ansible Tower. See
+ U(https://www.ansible.com/tower) for an overview.
+options:
+ all:
+ description:
+ - Export all assets
+ type: bool
+ default: 'False'
+ organization:
+ description:
+ - List of organization names to export
+ default: []
+ user:
+ description:
+ - List of user names to export
+ default: []
+ team:
+ description:
+ - List of team names to export
+ default: []
+ credential_type:
+ description:
+ - List of credential type names to export
+ default: []
+ credential:
+ description:
+ - List of credential names to export
+ default: []
+ notification_template:
+ description:
+ - List of notification template names to export
+ default: []
+ inventory_script:
+ description:
+ - List of inventory script names to export
+ default: []
+ inventory:
+ description:
+ - List of inventory names to export
+ default: []
+ project:
+ description:
+ - List of project names to export
+ default: []
+ job_template:
+ description:
+ - List of job template names to export
+ default: []
+ workflow:
+ description:
+ - List of workflow names to export
+ default: []
+
+requirements:
+ - "ansible-tower-cli >= 3.3.0"
+
+notes:
+ - Specifying a name of "all" for any asset type will export all items of that asset type.
+
+extends_documentation_fragment: tower
+'''
+
+EXAMPLES = '''
+- name: Export all tower assets
+ tower_receive:
+ all: True
+ tower_config_file: "~/tower_cli.cfg"
+
+- name: Export all inventories
+ tower_receive:
+ inventory:
+ - all
+
+- name: Export a job template named "My Template" and all Credentials
+ tower_receive:
+ job_template:
+ - "My Template"
+ credential:
+ - all
+'''
+
+RETURN = '''
+assets:
+ description: The exported assets
+ returned: success
+ type: dict
+ sample: [ {}, {} ]
+'''
+
+from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, HAS_TOWER_CLI
+
+try:
+ from tower_cli.cli.transfer.receive import Receiver
+ from tower_cli.cli.transfer.common import SEND_ORDER
+ from tower_cli.utils.exceptions import TowerCLIError
+
+ from tower_cli.conf import settings
+ TOWER_CLI_HAS_EXPORT = True
+except ImportError:
+ TOWER_CLI_HAS_EXPORT = False
+
+
+def main():
+ argument_spec = dict(
+ all=dict(type='bool', default=False),
+ credential=dict(type='list', default=[]),
+ credential_type=dict(type='list', default=[]),
+ inventory=dict(type='list', default=[]),
+ inventory_script=dict(type='list', default=[]),
+ job_template=dict(type='list', default=[]),
+ notification_template=dict(type='list', default=[]),
+ organization=dict(type='list', default=[]),
+ project=dict(type='list', default=[]),
+ team=dict(type='list', default=[]),
+ user=dict(type='list', default=[]),
+ workflow=dict(type='list', default=[]),
+ )
+
+ module = TowerModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_TOWER_CLI:
+ module.fail_json(msg='ansible-tower-cli required for this module')
+
+ if not TOWER_CLI_HAS_EXPORT:
+ module.fail_json(msg='ansible-tower-cli version does not support export')
+
+ export_all = module.params.get('all')
+ assets_to_export = {}
+ for asset_type in SEND_ORDER:
+ assets_to_export[asset_type] = module.params.get(asset_type)
+
+ result = dict(
+ assets=None,
+ changed=False,
+ message='',
+ )
+
+ tower_auth = tower_auth_config(module)
+ with settings.runtime_values(**tower_auth):
+ try:
+ receiver = Receiver()
+ result['assets'] = receiver.export_assets(all=export_all, asset_input=assets_to_export)
+ module.exit_json(**result)
+ except TowerCLIError as e:
+ result['message'] = e.message
+ module.fail_json(msg='Receive Failed', **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/vmware_guest.py b/test/support/integration/plugins/modules/vmware_guest.py
new file mode 100644
index 0000000000..df9f695be5
--- /dev/null
+++ b/test/support/integration/plugins/modules/vmware_guest.py
@@ -0,0 +1,2914 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This module is also sponsored by E.T.A.I. (www.etai.fr)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest
+short_description: Manages virtual machines in vCenter
+description: >
+ This module can be used to create new virtual machines from templates or other virtual machines,
+ manage power state of virtual machine such as power on, power off, suspend, shutdown, reboot, restart etc.,
+ modify various virtual machine components like network, disk, customization etc.,
+ rename a virtual machine and remove a virtual machine with associated components.
+version_added: '2.2'
+author:
+- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+- Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
+- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
+requirements:
+- python >= 2.6
+- PyVmomi
+notes:
+ - Please make sure that the user used for M(vmware_guest) has the correct level of privileges.
+ - For example, following is the list of minimum privileges required by users to create virtual machines.
+ - " DataStore > Allocate Space"
+ - " Virtual Machine > Configuration > Add New Disk"
+ - " Virtual Machine > Configuration > Add or Remove Device"
+ - " Virtual Machine > Inventory > Create New"
+ - " Network > Assign Network"
+ - " Resource > Assign Virtual Machine to Resource Pool"
+ - "Module may require additional privileges as well, which may be required for gathering facts - e.g. ESXi configurations."
+ - Tested on vSphere 5.5, 6.0, 6.5 and 6.7.
+ - Use SCSI disks instead of IDE when you want to expand online disks by specifying a SCSI controller.
+ - Uses SysPrep for Windows VM (depends on 'guest_id' parameter match 'win') with PyVmomi.
+ - In order to change the VM's parameters (e.g. number of CPUs), the VM must be powered off unless the hot-add
+ support is enabled and the C(state=present) must be used to apply the changes.
+ - "For additional information please visit Ansible VMware community wiki - U(https://github.com/ansible/community/wiki/VMware)."
+options:
+ state:
+ description:
+ - Specify the state the virtual machine should be in.
+ - 'If C(state) is set to C(present) and virtual machine exists, ensure the virtual machine
+ configurations conforms to task arguments.'
+ - 'If C(state) is set to C(absent) and virtual machine exists, then the specified virtual machine
+ is removed with its associated components.'
+ - 'If C(state) is set to one of the following C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
+ and virtual machine does not exists, then virtual machine is deployed with given parameters.'
+ - 'If C(state) is set to C(poweredon) and virtual machine exists with powerstate other than powered on,
+ then the specified virtual machine is powered on.'
+ - 'If C(state) is set to C(poweredoff) and virtual machine exists with powerstate other than powered off,
+ then the specified virtual machine is powered off.'
+ - 'If C(state) is set to C(restarted) and virtual machine exists, then the virtual machine is restarted.'
+ - 'If C(state) is set to C(suspended) and virtual machine exists, then the virtual machine is set to suspended mode.'
+ - 'If C(state) is set to C(shutdownguest) and virtual machine exists, then the virtual machine is shutdown.'
+ - 'If C(state) is set to C(rebootguest) and virtual machine exists, then the virtual machine is rebooted.'
+ default: present
+ choices: [ present, absent, poweredon, poweredoff, restarted, suspended, shutdownguest, rebootguest ]
+ name:
+ description:
+ - Name of the virtual machine to work with.
+ - Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
+ - 'If multiple virtual machines with same name exists, then C(folder) is required parameter to
+ identify uniqueness of the virtual machine.'
+ - This parameter is required, if C(state) is set to C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
+ and virtual machine does not exists.
+ - This parameter is case sensitive.
+ required: yes
+ name_match:
+ description:
+ - If multiple virtual machines matching the name, use the first or last found.
+ default: 'first'
+ choices: [ first, last ]
+ uuid:
+ description:
+ - UUID of the virtual machine to manage if known, this is VMware's unique identifier.
+ - This is required if C(name) is not supplied.
+ - If virtual machine does not exists, then this parameter is ignored.
+ - Please note that a supplied UUID will be ignored on virtual machine creation, as VMware creates the UUID internally.
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: no
+ type: bool
+ version_added: '2.8'
+ template:
+ description:
+ - Template or existing virtual machine used to create new virtual machine.
+ - If this value is not set, virtual machine is created without using a template.
+ - If the virtual machine already exists, this parameter will be ignored.
+ - This parameter is case sensitive.
+ - You can also specify template or VM UUID for identifying source. version_added 2.8. Use C(hw_product_uuid) from M(vmware_guest_facts) as UUID value.
+ - From version 2.8 onwards, absolute path to virtual machine or template can be used.
+ aliases: [ 'template_src' ]
+ is_template:
+ description:
+ - Flag the instance as a template.
+ - This will mark the given virtual machine as template.
+ default: 'no'
+ type: bool
+ version_added: '2.3'
+ folder:
+ description:
+ - Destination folder, absolute path to find an existing guest or create the new guest.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter.
+ - This parameter is case sensitive.
+ - This parameter is required, while deploying new virtual machine. version_added 2.5.
+ - 'If multiple machines are found with same name, this parameter is used to identify
+ uniqueness of the virtual machine. version_added 2.5'
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ hardware:
+ description:
+ - Manage virtual machine's hardware attributes.
+ - All parameters case sensitive.
+ - 'Valid attributes are:'
+ - ' - C(hotadd_cpu) (boolean): Allow virtual CPUs to be added while the virtual machine is running.'
+ - ' - C(hotremove_cpu) (boolean): Allow virtual CPUs to be removed while the virtual machine is running.
+ version_added: 2.5'
+ - ' - C(hotadd_memory) (boolean): Allow memory to be added while the virtual machine is running.'
+ - ' - C(memory_mb) (integer): Amount of memory in MB.'
+ - ' - C(nested_virt) (bool): Enable nested virtualization. version_added: 2.5'
+ - ' - C(num_cpus) (integer): Number of CPUs.'
+ - ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket.'
+ - " C(num_cpus) must be a multiple of C(num_cpu_cores_per_socket).
+ For example to create a VM with 2 sockets of 4 cores, specify C(num_cpus): 8 and C(num_cpu_cores_per_socket): 4"
+ - ' - C(scsi) (string): Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual) (default).'
+ - " - C(memory_reservation_lock) (boolean): If set true, memory resource reservation for the virtual machine
+ will always be equal to the virtual machine's memory size. version_added: 2.5"
+ - ' - C(max_connections) (integer): Maximum number of active remote display connections for the virtual machines.
+ version_added: 2.5.'
+ - ' - C(mem_limit) (integer): The memory utilization of a virtual machine will not exceed this limit. Unit is MB.
+ version_added: 2.5'
+ - ' - C(mem_reservation) (integer): The amount of memory resource that is guaranteed available to the virtual
+ machine. Unit is MB. C(memory_reservation) is alias to this. version_added: 2.5'
+ - ' - C(cpu_limit) (integer): The CPU utilization of a virtual machine will not exceed this limit. Unit is MHz.
+ version_added: 2.5'
+ - ' - C(cpu_reservation) (integer): The amount of CPU resource that is guaranteed available to the virtual machine.
+ Unit is MHz. version_added: 2.5'
+ - ' - C(version) (integer): The Virtual machine hardware versions. Default is 10 (ESXi 5.5 and onwards).
+ If value specified as C(latest), version is set to the most current virtual hardware supported on the host.
+ C(latest) is added in version 2.10.
+ Please check VMware documentation for correct virtual machine hardware version.
+ Incorrect hardware version may lead to failure in deployment. If hardware version is already equal to the given
+ version then no action is taken. version_added: 2.6'
+ - ' - C(boot_firmware) (string): Choose which firmware should be used to boot the virtual machine.
+ Allowed values are "bios" and "efi". version_added: 2.7'
+ - ' - C(virt_based_security) (bool): Enable Virtualization Based Security feature for Windows 10.
+ (Support from Virtual machine hardware version 14, Guest OS Windows 10 64 bit, Windows Server 2016)'
+
+ guest_id:
+ description:
+ - Set the guest ID.
+ - This parameter is case sensitive.
+ - 'Examples:'
+ - " virtual machine with RHEL7 64 bit, will be 'rhel7_64Guest'"
+ - " virtual machine with CentOS 64 bit, will be 'centos64Guest'"
+ - " virtual machine with Ubuntu 64 bit, will be 'ubuntu64Guest'"
+ - This field is required when creating a virtual machine, not required when creating from the template.
+ - >
+ Valid values are referenced here:
+ U(https://code.vmware.com/apis/358/doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html)
+ version_added: '2.3'
+ disk:
+ description:
+ - A list of disks to add.
+ - This parameter is case sensitive.
+ - Shrinking disks is not supported.
+ - Removing existing disks of the virtual machine is not supported.
+ - 'Valid attributes are:'
+ - ' - C(size_[tb,gb,mb,kb]) (integer): Disk storage size in specified unit.'
+ - ' - C(type) (string): Valid values are:'
+ - ' - C(thin) thin disk'
+ - ' - C(eagerzeroedthick) eagerzeroedthick disk, added in version 2.5'
+ - ' Default: C(None) thick disk, no eagerzero.'
+ - ' - C(datastore) (string): The name of datastore which will be used for the disk. If C(autoselect_datastore) is set to True,
+ then will select the less used datastore whose name contains this "disk.datastore" string.'
+ - ' - C(filename) (string): Existing disk image to be used. Filename must already exist on the datastore.'
+ - ' Specify filename string in C([datastore_name] path/to/file.vmdk) format. Added in version 2.8.'
+ - ' - C(autoselect_datastore) (bool): select the less used datastore. "disk.datastore" and "disk.autoselect_datastore"
+ will not be used if C(datastore) is specified outside this C(disk) configuration.'
+ - ' - C(disk_mode) (string): Type of disk mode. Added in version 2.6'
+ - ' - Available options are :'
+ - ' - C(persistent): Changes are immediately and permanently written to the virtual disk. This is default.'
+ - ' - C(independent_persistent): Same as persistent, but not affected by snapshots.'
+ - ' - C(independent_nonpersistent): Changes to virtual disk are made to a redo log and discarded at power off, but not affected by snapshots.'
+ cdrom:
+ description:
+ - A CD-ROM configuration for the virtual machine.
+ - Or a list of CD-ROMs configuration for the virtual machine. Added in version 2.9.
+ - 'Parameters C(controller_type), C(controller_number), C(unit_number), C(state) are added for a list of CD-ROMs
+ configuration support.'
+ - 'Valid attributes are:'
+ - ' - C(type) (string): The type of CD-ROM, valid options are C(none), C(client) or C(iso). With C(none) the CD-ROM
+ will be disconnected but present.'
+ - ' - C(iso_path) (string): The datastore path to the ISO file to use, in the form of C([datastore1] path/to/file.iso).
+ Required if type is set C(iso).'
+ - ' - C(controller_type) (string): Default value is C(ide). Only C(ide) controller type for CD-ROM is supported for
+ now, will add SATA controller type in the future.'
+ - ' - C(controller_number) (int): For C(ide) controller, valid value is 0 or 1.'
+ - ' - C(unit_number) (int): For CD-ROM device attach to C(ide) controller, valid value is 0 or 1.
+ C(controller_number) and C(unit_number) are mandatory attributes.'
+ - ' - C(state) (string): Valid value is C(present) or C(absent). Default is C(present). If set to C(absent), then
+ the specified CD-ROM will be removed. For C(ide) controller, hot-add or hot-remove CD-ROM is not supported.'
+ version_added: '2.5'
+ resource_pool:
+ description:
+ - Use the given resource pool for virtual machine operation.
+ - This parameter is case sensitive.
+ - Resource pool should be child of the selected host parent.
+ version_added: '2.3'
+ wait_for_ip_address:
+ description:
+ - Wait until vCenter detects an IP address for the virtual machine.
+ - This requires vmware-tools (vmtoolsd) to properly work after creation.
+ - "vmware-tools needs to be installed on the given virtual machine in order to work with this parameter."
+ default: 'no'
+ type: bool
+ wait_for_ip_address_timeout:
+ description:
+ - Define a timeout (in seconds) for the wait_for_ip_address parameter.
+ default: '300'
+ type: int
+ version_added: '2.10'
+ wait_for_customization_timeout:
+ description:
+ - Define a timeout (in seconds) for the wait_for_customization parameter.
+ - Be careful when setting this value since the time guest customization took may differ among guest OSes.
+ default: '3600'
+ type: int
+ version_added: '2.10'
+ wait_for_customization:
+ description:
+ - Wait until vCenter detects all guest customizations as successfully completed.
+ - When enabled, the VM will automatically be powered on.
+ - "If vCenter does not detect guest customization start or succeed, failed events after time
+ C(wait_for_customization_timeout) parameter specified, warning message will be printed and task result is fail."
+ default: 'no'
+ type: bool
+ version_added: '2.8'
+ state_change_timeout:
+ description:
+ - If the C(state) is set to C(shutdownguest), by default the module will return immediately after sending the shutdown signal.
+ - If this argument is set to a positive integer, the module will instead wait for the virtual machine to reach the poweredoff state.
+ - The value sets a timeout in seconds for the module to wait for the state change.
+ default: 0
+ version_added: '2.6'
+ snapshot_src:
+ description:
+ - Name of the existing snapshot to use to create a clone of a virtual machine.
+ - This parameter is case sensitive.
+ - While creating linked clone using C(linked_clone) parameter, this parameter is required.
+ version_added: '2.4'
+ linked_clone:
+ description:
+ - Whether to create a linked clone from the snapshot specified.
+ - If specified, then C(snapshot_src) is required parameter.
+ default: 'no'
+ type: bool
+ version_added: '2.4'
+ force:
+ description:
+ - Ignore warnings and complete the actions.
+ - This parameter is useful while removing virtual machine which is powered on state.
+ - 'This module reflects the VMware vCenter API and UI workflow, as such, in some cases the `force` flag will
+ be mandatory to perform the action to ensure you are certain the action has to be taken, no matter what the consequence.
+ This is specifically the case for removing a powered on the virtual machine when C(state) is set to C(absent).'
+ default: 'no'
+ type: bool
+ delete_from_inventory:
+ description:
+ - Whether to delete Virtual machine from inventory or delete from disk.
+ default: False
+ type: bool
+ version_added: '2.10'
+ datacenter:
+ description:
+ - Destination datacenter for the deploy operation.
+ - This parameter is case sensitive.
+ default: ha-datacenter
+ cluster:
+ description:
+ - The cluster name where the virtual machine will run.
+ - This is a required parameter, if C(esxi_hostname) is not set.
+ - C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
+ - This parameter is case sensitive.
+ version_added: '2.3'
+ esxi_hostname:
+ description:
+ - The ESXi hostname where the virtual machine will run.
+ - This is a required parameter, if C(cluster) is not set.
+ - C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
+ - This parameter is case sensitive.
+ annotation:
+ description:
+ - A note or annotation to include in the virtual machine.
+ version_added: '2.3'
+ customvalues:
+ description:
+ - Define a list of custom values to set on virtual machine.
+ - A custom value object takes two fields C(key) and C(value).
+ - Incorrect key and values will be ignored.
+ version_added: '2.3'
+ networks:
+ description:
+ - A list of networks (in the order of the NICs).
+ - Removing NICs is not allowed, while reconfiguring the virtual machine.
+ - All parameters and VMware object names are case sensitive.
+ - 'One of the below parameters is required per entry:'
+ - ' - C(name) (string): Name of the portgroup or distributed virtual portgroup for this interface.
+ When specifying distributed virtual portgroup make sure given C(esxi_hostname) or C(cluster) is associated with it.'
+ - ' - C(vlan) (integer): VLAN number for this interface.'
+ - 'Optional parameters per entry (used for virtual hardware):'
+ - ' - C(device_type) (string): Virtual network device (one of C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3) (default), C(sriov)).'
+ - ' - C(mac) (string): Customize MAC address.'
+ - ' - C(dvswitch_name) (string): Name of the distributed vSwitch.
+ This value is required if multiple distributed portgroups exists with the same name. version_added 2.7'
+ - ' - C(start_connected) (bool): Indicates that virtual network adapter starts with associated virtual machine powers on. version_added: 2.5'
+ - 'Optional parameters per entry (used for OS customization):'
+ - ' - C(type) (string): Type of IP assignment (either C(dhcp) or C(static)). C(dhcp) is default.'
+ - ' - C(ip) (string): Static IP address (implies C(type: static)).'
+ - ' - C(netmask) (string): Static netmask required for C(ip).'
+ - ' - C(gateway) (string): Static gateway.'
+ - ' - C(dns_servers) (string): DNS servers for this network interface (Windows).'
+ - ' - C(domain) (string): Domain name for this network interface (Windows).'
+ - ' - C(wake_on_lan) (bool): Indicates if wake-on-LAN is enabled on this virtual network adapter. version_added: 2.5'
+ - ' - C(allow_guest_control) (bool): Enables guest control over whether the connectable device is connected. version_added: 2.5'
+ version_added: '2.3'
+ customization:
+ description:
+ - Parameters for OS customization when cloning from the template or the virtual machine, or apply to the existing virtual machine directly.
+ - Not all operating systems are supported for customization with respective vCenter version,
+ please check VMware documentation for respective OS customization.
+ - For supported customization operating system matrix, (see U(http://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf))
+ - All parameters and VMware object names are case sensitive.
+ - Linux based OSes requires Perl package to be installed for OS customizations.
+ - 'Common parameters (Linux/Windows):'
+ - ' - C(existing_vm) (bool): If set to C(True), do OS customization on the specified virtual machine directly.
+ If set to C(False) or not specified, do OS customization when cloning from the template or the virtual machine. version_added: 2.8'
+ - ' - C(dns_servers) (list): List of DNS servers to configure.'
+ - ' - C(dns_suffix) (list): List of domain suffixes, also known as DNS search path (default: C(domain) parameter).'
+ - ' - C(domain) (string): DNS domain name to use.'
+ - ' - C(hostname) (string): Computer hostname (default: shorted C(name) parameter). Allowed characters are alphanumeric (uppercase and lowercase)
+ and minus, rest of the characters are dropped as per RFC 952.'
+ - 'Parameters related to Linux customization:'
+ - ' - C(timezone) (string): Timezone (See List of supported time zones for different vSphere versions in Linux/Unix
+ systems (2145518) U(https://kb.vmware.com/s/article/2145518)). version_added: 2.9'
+ - ' - C(hwclockUTC) (bool): Specifies whether the hardware clock is in UTC or local time.
+ True when the hardware clock is in UTC, False when the hardware clock is in local time. version_added: 2.9'
+ - 'Parameters related to Windows customization:'
+ - ' - C(autologon) (bool): Auto logon after virtual machine customization (default: False).'
+ - ' - C(autologoncount) (int): Number of autologon after reboot (default: 1).'
+ - ' - C(domainadmin) (string): User used to join in AD domain (mandatory with C(joindomain)).'
+ - ' - C(domainadminpassword) (string): Password used to join in AD domain (mandatory with C(joindomain)).'
+ - ' - C(fullname) (string): Server owner name (default: Administrator).'
+ - ' - C(joindomain) (string): AD domain to join (Not compatible with C(joinworkgroup)).'
+ - ' - C(joinworkgroup) (string): Workgroup to join (Not compatible with C(joindomain), default: WORKGROUP).'
+ - ' - C(orgname) (string): Organisation name (default: ACME).'
+ - ' - C(password) (string): Local administrator password.'
+ - ' - C(productid) (string): Product ID.'
+ - ' - C(runonce) (list): List of commands to run at first user logon.'
+ - ' - C(timezone) (int): Timezone (See U(https://msdn.microsoft.com/en-us/library/ms912391.aspx)).'
+ version_added: '2.3'
+ vapp_properties:
+ description:
+ - A list of vApp properties.
+ - 'For full list of attributes and types refer to:'
+ - 'U(https://vdc-download.vmware.com/vmwb-repository/dcr-public/6b586ed2-655c-49d9-9029-bc416323cb22/
+ fa0b429a-a695-4c11-b7d2-2cbc284049dc/doc/vim.vApp.PropertyInfo.html)'
+ - 'Basic attributes are:'
+ - ' - C(id) (string): Property id - required.'
+ - ' - C(value) (string): Property value.'
+ - ' - C(type) (string): Value type, string type by default.'
+ - ' - C(operation): C(remove): This attribute is required only when removing properties.'
+ version_added: '2.6'
+ customization_spec:
+ description:
+ - Unique name identifying the requested customization specification.
+ - This parameter is case sensitive.
+ - If set, then overrides C(customization) parameter values.
+ version_added: '2.6'
+ datastore:
+ description:
+ - Specify datastore or datastore cluster to provision virtual machine.
+ - 'This parameter takes precedence over "disk.datastore" parameter.'
+ - 'This parameter can be used to override datastore or datastore cluster setting of the virtual machine when deployed
+ from the template.'
+ - Please see example for more usage.
+ version_added: '2.7'
+ convert:
+ description:
+ - Specify convert disk type while cloning template or virtual machine.
+ choices: [ thin, thick, eagerzeroedthick ]
+ version_added: '2.8'
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Create a virtual machine on given ESXi hostname
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ folder: /DC1/vm/
+ name: test_vm_0001
+ state: poweredon
+ guest_id: centos64Guest
+ # This is hostname of particular ESXi server on which user wants VM to be deployed
+ esxi_hostname: "{{ esxi_hostname }}"
+ disk:
+ - size_gb: 10
+ type: thin
+ datastore: datastore1
+ hardware:
+ memory_mb: 512
+ num_cpus: 4
+ scsi: paravirtual
+ networks:
+ - name: VM Network
+ mac: aa:bb:dd:aa:00:14
+ ip: 10.10.10.100
+ netmask: 255.255.255.0
+ device_type: vmxnet3
+ wait_for_ip_address: yes
+ wait_for_ip_address_timeout: 600
+ delegate_to: localhost
+ register: deploy_vm
+
+- name: Create a virtual machine from a template
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ folder: /testvms
+ name: testvm_2
+ state: poweredon
+ template: template_el7
+ disk:
+ - size_gb: 10
+ type: thin
+ datastore: g73_datastore
+ # Add another disk from an existing VMDK
+ - filename: "[datastore1] testvms/testvm_2_1/testvm_2_1.vmdk"
+ hardware:
+ memory_mb: 512
+ num_cpus: 6
+ num_cpu_cores_per_socket: 3
+ scsi: paravirtual
+ memory_reservation_lock: True
+ mem_limit: 8096
+ mem_reservation: 4096
+ cpu_limit: 8096
+ cpu_reservation: 4096
+ max_connections: 5
+ hotadd_cpu: True
+ hotremove_cpu: True
+ hotadd_memory: False
+ version: 12 # Hardware version of virtual machine
+ boot_firmware: "efi"
+ cdrom:
+ type: iso
+ iso_path: "[datastore1] livecd.iso"
+ networks:
+ - name: VM Network
+ mac: aa:bb:dd:aa:00:14
+ wait_for_ip_address: yes
+ delegate_to: localhost
+ register: deploy
+
+- name: Clone a virtual machine from Windows template and customize
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ datacenter: datacenter1
+ cluster: cluster
+ name: testvm-2
+ template: template_windows
+ networks:
+ - name: VM Network
+ ip: 192.168.1.100
+ netmask: 255.255.255.0
+ gateway: 192.168.1.1
+ mac: aa:bb:dd:aa:00:14
+ domain: my_domain
+ dns_servers:
+ - 192.168.1.1
+ - 192.168.1.2
+ - vlan: 1234
+ type: dhcp
+ customization:
+ autologon: yes
+ dns_servers:
+ - 192.168.1.1
+ - 192.168.1.2
+ domain: my_domain
+ password: new_vm_password
+ runonce:
+ - powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
+ delegate_to: localhost
+
+- name: Clone a virtual machine from Linux template and customize
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ datacenter: "{{ datacenter }}"
+ state: present
+ folder: /DC1/vm
+ template: "{{ template }}"
+ name: "{{ vm_name }}"
+ cluster: DC1_C1
+ networks:
+ - name: VM Network
+ ip: 192.168.10.11
+ netmask: 255.255.255.0
+ wait_for_ip_address: True
+ customization:
+ domain: "{{ guest_domain }}"
+ dns_servers:
+ - 8.9.9.9
+ - 7.8.8.9
+ dns_suffix:
+ - example.com
+ - example2.com
+ delegate_to: localhost
+
+- name: Rename a virtual machine (requires the virtual machine's uuid)
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ uuid: "{{ vm_uuid }}"
+ name: new_name
+ state: present
+ delegate_to: localhost
+
+- name: Remove a virtual machine by uuid
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ uuid: "{{ vm_uuid }}"
+ state: absent
+ delegate_to: localhost
+
+- name: Remove a virtual machine from inventory
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ name: vm_name
+ delete_from_inventory: True
+ state: absent
+ delegate_to: localhost
+
+- name: Manipulate vApp properties
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ name: vm_name
+ state: present
+ vapp_properties:
+ - id: remoteIP
+ category: Backup
+ label: Backup server IP
+ type: string
+ value: 10.10.10.1
+ - id: old_property
+ operation: remove
+ delegate_to: localhost
+
+- name: Set powerstate of a virtual machine to poweroff by using UUID
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ uuid: "{{ vm_uuid }}"
+ state: poweredoff
+ delegate_to: localhost
+
+- name: Deploy a virtual machine in a datastore different from the datastore of the template
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "{{ vm_name }}"
+ state: present
+ template: "{{ template_name }}"
+ # Here datastore can be different which holds template
+ datastore: "{{ virtual_machine_datastore }}"
+ hardware:
+ memory_mb: 512
+ num_cpus: 2
+ scsi: paravirtual
+ delegate_to: localhost
+
+- name: Create a diskless VM
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ dc1 }}"
+ state: poweredoff
+ cluster: "{{ ccr1 }}"
+ name: diskless_vm
+ folder: /Asia-Datacenter1/vm
+ guest_id: centos64Guest
+ datastore: "{{ ds1 }}"
+ hardware:
+ memory_mb: 1024
+ num_cpus: 2
+ num_cpu_cores_per_socket: 1
+'''
+
+RETURN = r'''
+instance:
+ description: metadata about the new virtual machine
+ returned: always
+ type: dict
+ sample: None
+'''
+
+import re
+import time
+import string
+
+HAS_PYVMOMI = False
+try:
+ from pyVmomi import vim, vmodl, VmomiSupport
+ HAS_PYVMOMI = True
+except ImportError:
+ pass
+
+from random import randint
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.vmware import (find_obj, gather_vm_facts, get_all_objs,
+ compile_folder_path_for_object, serialize_spec,
+ vmware_argument_spec, set_vm_power_state, PyVmomi,
+ find_dvs_by_name, find_dvspg_by_name, wait_for_vm_ip,
+ wait_for_task, TaskError, quote_obj_name)
+
+
+def list_or_dict(value):
+ if isinstance(value, list) or isinstance(value, dict):
+ return value
+ else:
+ raise ValueError("'%s' is not valid, valid type is 'list' or 'dict'." % value)
+
+
+class PyVmomiDeviceHelper(object):
+ """ This class is a helper to create easily VMware Objects for PyVmomiHelper """
+
+ def __init__(self, module):
+ self.module = module
+ self.next_disk_unit_number = 0
+ self.scsi_device_type = {
+ 'lsilogic': vim.vm.device.VirtualLsiLogicController,
+ 'paravirtual': vim.vm.device.ParaVirtualSCSIController,
+ 'buslogic': vim.vm.device.VirtualBusLogicController,
+ 'lsilogicsas': vim.vm.device.VirtualLsiLogicSASController,
+ }
+
+ def create_scsi_controller(self, scsi_type):
+ scsi_ctl = vim.vm.device.VirtualDeviceSpec()
+ scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ scsi_device = self.scsi_device_type.get(scsi_type, vim.vm.device.ParaVirtualSCSIController)
+ scsi_ctl.device = scsi_device()
+ scsi_ctl.device.busNumber = 0
+ # While creating a new SCSI controller, temporary key value
+ # should be unique negative integers
+ scsi_ctl.device.key = -randint(1000, 9999)
+ scsi_ctl.device.hotAddRemove = True
+ scsi_ctl.device.sharedBus = 'noSharing'
+ scsi_ctl.device.scsiCtlrUnitNumber = 7
+
+ return scsi_ctl
+
+ def is_scsi_controller(self, device):
+ return isinstance(device, tuple(self.scsi_device_type.values()))
+
+ @staticmethod
+ def create_ide_controller(bus_number=0):
+ ide_ctl = vim.vm.device.VirtualDeviceSpec()
+ ide_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ ide_ctl.device = vim.vm.device.VirtualIDEController()
+ ide_ctl.device.deviceInfo = vim.Description()
+ # While creating a new IDE controller, temporary key value
+ # should be unique negative integers
+ ide_ctl.device.key = -randint(200, 299)
+ ide_ctl.device.busNumber = bus_number
+
+ return ide_ctl
+
+ @staticmethod
+ def create_cdrom(ide_device, cdrom_type, iso_path=None, unit_number=0):
+ cdrom_spec = vim.vm.device.VirtualDeviceSpec()
+ cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ cdrom_spec.device = vim.vm.device.VirtualCdrom()
+ cdrom_spec.device.controllerKey = ide_device.key
+ cdrom_spec.device.key = -randint(3000, 3999)
+ cdrom_spec.device.unitNumber = unit_number
+ cdrom_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+ cdrom_spec.device.connectable.allowGuestControl = True
+ cdrom_spec.device.connectable.startConnected = (cdrom_type != "none")
+ if cdrom_type in ["none", "client"]:
+ cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
+ elif cdrom_type == "iso":
+ cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
+
+ return cdrom_spec
+
+ @staticmethod
+ def is_equal_cdrom(vm_obj, cdrom_device, cdrom_type, iso_path):
+ if cdrom_type == "none":
+ return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
+ cdrom_device.connectable.allowGuestControl and
+ not cdrom_device.connectable.startConnected and
+ (vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or not cdrom_device.connectable.connected))
+ elif cdrom_type == "client":
+ return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
+ cdrom_device.connectable.allowGuestControl and
+ cdrom_device.connectable.startConnected and
+ (vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
+ elif cdrom_type == "iso":
+ return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo) and
+ cdrom_device.backing.fileName == iso_path and
+ cdrom_device.connectable.allowGuestControl and
+ cdrom_device.connectable.startConnected and
+ (vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
+
+ @staticmethod
+ def update_cdrom_config(vm_obj, cdrom_spec, cdrom_device, iso_path=None):
+ # Updating an existing CD-ROM
+ if cdrom_spec["type"] in ["client", "none"]:
+ cdrom_device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
+ elif cdrom_spec["type"] == "iso" and iso_path is not None:
+ cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
+ cdrom_device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+ cdrom_device.connectable.allowGuestControl = True
+ cdrom_device.connectable.startConnected = (cdrom_spec["type"] != "none")
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ cdrom_device.connectable.connected = (cdrom_spec["type"] != "none")
+
+ def remove_cdrom(self, cdrom_device):
+ cdrom_spec = vim.vm.device.VirtualDeviceSpec()
+ cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
+ cdrom_spec.device = cdrom_device
+
+ return cdrom_spec
+
+ def create_scsi_disk(self, scsi_ctl, disk_index=None):
+ diskspec = vim.vm.device.VirtualDeviceSpec()
+ diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ diskspec.device = vim.vm.device.VirtualDisk()
+ diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+ diskspec.device.controllerKey = scsi_ctl.device.key
+
+ if self.next_disk_unit_number == 7:
+ raise AssertionError()
+ if disk_index == 7:
+ raise AssertionError()
+ """
+ Configure disk unit number.
+ """
+ if disk_index is not None:
+ diskspec.device.unitNumber = disk_index
+ self.next_disk_unit_number = disk_index + 1
+ else:
+ diskspec.device.unitNumber = self.next_disk_unit_number
+ self.next_disk_unit_number += 1
+
+ # unit number 7 is reserved to SCSI controller, increase next index
+ if self.next_disk_unit_number == 7:
+ self.next_disk_unit_number += 1
+
+ return diskspec
+
+ def get_device(self, device_type, name):
+ nic_dict = dict(pcnet32=vim.vm.device.VirtualPCNet32(),
+ vmxnet2=vim.vm.device.VirtualVmxnet2(),
+ vmxnet3=vim.vm.device.VirtualVmxnet3(),
+ e1000=vim.vm.device.VirtualE1000(),
+ e1000e=vim.vm.device.VirtualE1000e(),
+ sriov=vim.vm.device.VirtualSriovEthernetCard(),
+ )
+ if device_type in nic_dict:
+ return nic_dict[device_type]
+ else:
+ self.module.fail_json(msg='Invalid device_type "%s"'
+ ' for network "%s"' % (device_type, name))
+
+ def create_nic(self, device_type, device_label, device_infos):
+ nic = vim.vm.device.VirtualDeviceSpec()
+ nic.device = self.get_device(device_type, device_infos['name'])
+ nic.device.wakeOnLanEnabled = bool(device_infos.get('wake_on_lan', True))
+ nic.device.deviceInfo = vim.Description()
+ nic.device.deviceInfo.label = device_label
+ nic.device.deviceInfo.summary = device_infos['name']
+ nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+ nic.device.connectable.startConnected = bool(device_infos.get('start_connected', True))
+ nic.device.connectable.allowGuestControl = bool(device_infos.get('allow_guest_control', True))
+ nic.device.connectable.connected = True
+ if 'mac' in device_infos and is_mac(device_infos['mac']):
+ nic.device.addressType = 'manual'
+ nic.device.macAddress = device_infos['mac']
+ else:
+ nic.device.addressType = 'generated'
+
+ return nic
+
+ def integer_value(self, input_value, name):
+ """
+ Function to return int value for given input, else return error
+ Args:
+ input_value: Input value to retrieve int value from
+ name: Name of the Input value (used to build error message)
+ Returns: (int) if integer value can be obtained, otherwise will send a error message.
+ """
+ if isinstance(input_value, int):
+ return input_value
+ elif isinstance(input_value, str) and input_value.isdigit():
+ return int(input_value)
+ else:
+ self.module.fail_json(msg='"%s" attribute should be an'
+ ' integer value.' % name)
+
+
+class PyVmomiCache(object):
+ """ This class caches references to objects which are requested multiples times but not modified """
+
+ def __init__(self, content, dc_name=None):
+ self.content = content
+ self.dc_name = dc_name
+ self.networks = {}
+ self.clusters = {}
+ self.esx_hosts = {}
+ self.parent_datacenters = {}
+
+ def find_obj(self, content, types, name, confine_to_datacenter=True):
+ """ Wrapper around find_obj to set datacenter context """
+ result = find_obj(content, types, name)
+ if result and confine_to_datacenter:
+ if to_text(self.get_parent_datacenter(result).name) != to_text(self.dc_name):
+ result = None
+ objects = self.get_all_objs(content, types, confine_to_datacenter=True)
+ for obj in objects:
+ if name is None or to_text(obj.name) == to_text(name):
+ return obj
+ return result
+
+ def get_all_objs(self, content, types, confine_to_datacenter=True):
+ """ Wrapper around get_all_objs to set datacenter context """
+ objects = get_all_objs(content, types)
+ if confine_to_datacenter:
+ if hasattr(objects, 'items'):
+ # resource pools come back as a dictionary
+ # make a copy
+ for k, v in tuple(objects.items()):
+ parent_dc = self.get_parent_datacenter(k)
+ if parent_dc.name != self.dc_name:
+ del objects[k]
+ else:
+ # everything else should be a list
+ objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name]
+
+ return objects
+
+ def get_network(self, network):
+ network = quote_obj_name(network)
+
+ if network not in self.networks:
+ self.networks[network] = self.find_obj(self.content, [vim.Network], network)
+
+ return self.networks[network]
+
+ def get_cluster(self, cluster):
+ if cluster not in self.clusters:
+ self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster)
+
+ return self.clusters[cluster]
+
+ def get_esx_host(self, host):
+ if host not in self.esx_hosts:
+ self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host)
+
+ return self.esx_hosts[host]
+
+ def get_parent_datacenter(self, obj):
+ """ Walk the parent tree to find the objects datacenter """
+ if isinstance(obj, vim.Datacenter):
+ return obj
+ if obj in self.parent_datacenters:
+ return self.parent_datacenters[obj]
+ datacenter = None
+ while True:
+ if not hasattr(obj, 'parent'):
+ break
+ obj = obj.parent
+ if isinstance(obj, vim.Datacenter):
+ datacenter = obj
+ break
+ self.parent_datacenters[obj] = datacenter
+ return datacenter
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.device_helper = PyVmomiDeviceHelper(self.module)
+ self.configspec = None
+ self.relospec = None
+ self.change_detected = False # a change was detected and needs to be applied through reconfiguration
+ self.change_applied = False # a change was applied meaning at least one task succeeded
+ self.customspec = None
+ self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
+
+ def gather_facts(self, vm):
+ return gather_vm_facts(self.content, vm)
+
+ def remove_vm(self, vm, delete_from_inventory=False):
+ # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
+ if vm.summary.runtime.powerState.lower() == 'poweredon':
+ self.module.fail_json(msg="Virtual machine %s found in 'powered on' state, "
+ "please use 'force' parameter to remove or poweroff VM "
+ "and try removing VM again." % vm.name)
+ # Delete VM from Inventory
+ if delete_from_inventory:
+ try:
+ vm.UnregisterVM()
+ except (vim.fault.TaskInProgress,
+ vmodl.RuntimeFault) as e:
+ return {'changed': self.change_applied, 'failed': True, 'msg': e.msg, 'op': 'UnregisterVM'}
+ self.change_applied = True
+ return {'changed': self.change_applied, 'failed': False}
+ # Delete VM from Disk
+ task = vm.Destroy()
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'destroy'}
+ else:
+ return {'changed': self.change_applied, 'failed': False}
+
+ def configure_guestid(self, vm_obj, vm_creation=False):
+ # guest_id is not required when using templates
+ if self.params['template']:
+ return
+
+ # guest_id is only mandatory on VM creation
+ if vm_creation and self.params['guest_id'] is None:
+ self.module.fail_json(msg="guest_id attribute is mandatory for VM creation")
+
+ if self.params['guest_id'] and \
+ (vm_obj is None or self.params['guest_id'].lower() != vm_obj.summary.config.guestId.lower()):
+ self.change_detected = True
+ self.configspec.guestId = self.params['guest_id']
+
+ def configure_resource_alloc_info(self, vm_obj):
+ """
+ Function to configure resource allocation information about virtual machine
+ :param vm_obj: VM object in case of reconfigure, None in case of deploy
+ :return: None
+ """
+ rai_change_detected = False
+ memory_allocation = vim.ResourceAllocationInfo()
+ cpu_allocation = vim.ResourceAllocationInfo()
+
+ if 'hardware' in self.params:
+ if 'mem_limit' in self.params['hardware']:
+ mem_limit = None
+ try:
+ mem_limit = int(self.params['hardware'].get('mem_limit'))
+ except ValueError:
+ self.module.fail_json(msg="hardware.mem_limit attribute should be an integer value.")
+ memory_allocation.limit = mem_limit
+ if vm_obj is None or memory_allocation.limit != vm_obj.config.memoryAllocation.limit:
+ rai_change_detected = True
+
+ if 'mem_reservation' in self.params['hardware'] or 'memory_reservation' in self.params['hardware']:
+ mem_reservation = self.params['hardware'].get('mem_reservation')
+ if mem_reservation is None:
+ mem_reservation = self.params['hardware'].get('memory_reservation')
+ try:
+ mem_reservation = int(mem_reservation)
+ except ValueError:
+ self.module.fail_json(msg="hardware.mem_reservation or hardware.memory_reservation should be an integer value.")
+
+ memory_allocation.reservation = mem_reservation
+ if vm_obj is None or \
+ memory_allocation.reservation != vm_obj.config.memoryAllocation.reservation:
+ rai_change_detected = True
+
+ if 'cpu_limit' in self.params['hardware']:
+ cpu_limit = None
+ try:
+ cpu_limit = int(self.params['hardware'].get('cpu_limit'))
+ except ValueError:
+ self.module.fail_json(msg="hardware.cpu_limit attribute should be an integer value.")
+ cpu_allocation.limit = cpu_limit
+ if vm_obj is None or cpu_allocation.limit != vm_obj.config.cpuAllocation.limit:
+ rai_change_detected = True
+
+ if 'cpu_reservation' in self.params['hardware']:
+ cpu_reservation = None
+ try:
+ cpu_reservation = int(self.params['hardware'].get('cpu_reservation'))
+ except ValueError:
+ self.module.fail_json(msg="hardware.cpu_reservation should be an integer value.")
+ cpu_allocation.reservation = cpu_reservation
+ if vm_obj is None or \
+ cpu_allocation.reservation != vm_obj.config.cpuAllocation.reservation:
+ rai_change_detected = True
+
+ if rai_change_detected:
+ self.configspec.memoryAllocation = memory_allocation
+ self.configspec.cpuAllocation = cpu_allocation
+ self.change_detected = True
+
+ def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
+ # set cpu/memory/etc
+ if 'hardware' in self.params:
+ if 'num_cpus' in self.params['hardware']:
+ try:
+ num_cpus = int(self.params['hardware']['num_cpus'])
+ except ValueError:
+ self.module.fail_json(msg="hardware.num_cpus attribute should be an integer value.")
+ # check VM power state and cpu hot-add/hot-remove state before re-config VM
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ if not vm_obj.config.cpuHotRemoveEnabled and num_cpus < vm_obj.config.hardware.numCPU:
+ self.module.fail_json(msg="Configured cpu number is less than the cpu number of the VM, "
+ "cpuHotRemove is not enabled")
+ if not vm_obj.config.cpuHotAddEnabled and num_cpus > vm_obj.config.hardware.numCPU:
+ self.module.fail_json(msg="Configured cpu number is more than the cpu number of the VM, "
+ "cpuHotAdd is not enabled")
+
+ if 'num_cpu_cores_per_socket' in self.params['hardware']:
+ try:
+ num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket'])
+ except ValueError:
+ self.module.fail_json(msg="hardware.num_cpu_cores_per_socket attribute "
+ "should be an integer value.")
+ if num_cpus % num_cpu_cores_per_socket != 0:
+ self.module.fail_json(msg="hardware.num_cpus attribute should be a multiple "
+ "of hardware.num_cpu_cores_per_socket")
+ self.configspec.numCoresPerSocket = num_cpu_cores_per_socket
+ if vm_obj is None or self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket:
+ self.change_detected = True
+ self.configspec.numCPUs = num_cpus
+ if vm_obj is None or self.configspec.numCPUs != vm_obj.config.hardware.numCPU:
+ self.change_detected = True
+ # num_cpu is mandatory for VM creation
+ elif vm_creation and not self.params['template']:
+ self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation")
+
+ if 'memory_mb' in self.params['hardware']:
+ try:
+ memory_mb = int(self.params['hardware']['memory_mb'])
+ except ValueError:
+ self.module.fail_json(msg="Failed to parse hardware.memory_mb value."
+ " Please refer the documentation and provide"
+ " correct value.")
+ # check VM power state and memory hotadd state before re-config VM
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ if vm_obj.config.memoryHotAddEnabled and memory_mb < vm_obj.config.hardware.memoryMB:
+ self.module.fail_json(msg="Configured memory is less than memory size of the VM, "
+ "operation is not supported")
+ elif not vm_obj.config.memoryHotAddEnabled and memory_mb != vm_obj.config.hardware.memoryMB:
+ self.module.fail_json(msg="memoryHotAdd is not enabled")
+ self.configspec.memoryMB = memory_mb
+ if vm_obj is None or self.configspec.memoryMB != vm_obj.config.hardware.memoryMB:
+ self.change_detected = True
+ # memory_mb is mandatory for VM creation
+ elif vm_creation and not self.params['template']:
+ self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation")
+
+ if 'hotadd_memory' in self.params['hardware']:
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
+ vm_obj.config.memoryHotAddEnabled != bool(self.params['hardware']['hotadd_memory']):
+ self.module.fail_json(msg="Configure hotadd memory operation is not supported when VM is power on")
+ self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory'])
+ if vm_obj is None or self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled:
+ self.change_detected = True
+
+ if 'hotadd_cpu' in self.params['hardware']:
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
+ vm_obj.config.cpuHotAddEnabled != bool(self.params['hardware']['hotadd_cpu']):
+ self.module.fail_json(msg="Configure hotadd cpu operation is not supported when VM is power on")
+ self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu'])
+ if vm_obj is None or self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled:
+ self.change_detected = True
+
+ if 'hotremove_cpu' in self.params['hardware']:
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
+ vm_obj.config.cpuHotRemoveEnabled != bool(self.params['hardware']['hotremove_cpu']):
+ self.module.fail_json(msg="Configure hotremove cpu operation is not supported when VM is power on")
+ self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu'])
+ if vm_obj is None or self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled:
+ self.change_detected = True
+
+ if 'memory_reservation_lock' in self.params['hardware']:
+ self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock'])
+ if vm_obj is None or self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax:
+ self.change_detected = True
+
+ if 'boot_firmware' in self.params['hardware']:
+ # boot firmware re-config can cause boot issue
+ if vm_obj is not None:
+ return
+ boot_firmware = self.params['hardware']['boot_firmware'].lower()
+ if boot_firmware not in ('bios', 'efi'):
+ self.module.fail_json(msg="hardware.boot_firmware value is invalid [%s]."
+ " Need one of ['bios', 'efi']." % boot_firmware)
+ self.configspec.firmware = boot_firmware
+ self.change_detected = True
+
+ def sanitize_cdrom_params(self):
+ # cdroms {'ide': [{num: 0, cdrom: []}, {}], 'sata': [{num: 0, cdrom: []}, {}, ...]}
+ cdroms = {'ide': [], 'sata': []}
+ expected_cdrom_spec = self.params.get('cdrom')
+ if expected_cdrom_spec:
+ for cdrom_spec in expected_cdrom_spec:
+ cdrom_spec['controller_type'] = cdrom_spec.get('controller_type', 'ide').lower()
+ if cdrom_spec['controller_type'] not in ['ide', 'sata']:
+ self.module.fail_json(msg="Invalid cdrom.controller_type: %s, valid value is 'ide' or 'sata'."
+ % cdrom_spec['controller_type'])
+
+ cdrom_spec['state'] = cdrom_spec.get('state', 'present').lower()
+ if cdrom_spec['state'] not in ['present', 'absent']:
+ self.module.fail_json(msg="Invalid cdrom.state: %s, valid value is 'present', 'absent'."
+ % cdrom_spec['state'])
+
+ if cdrom_spec['state'] == 'present':
+ if 'type' in cdrom_spec and cdrom_spec.get('type') not in ['none', 'client', 'iso']:
+ self.module.fail_json(msg="Invalid cdrom.type: %s, valid value is 'none', 'client' or 'iso'."
+ % cdrom_spec.get('type'))
+ if cdrom_spec.get('type') == 'iso' and not cdrom_spec.get('iso_path'):
+ self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.")
+
+ if cdrom_spec['controller_type'] == 'ide' and \
+ (cdrom_spec.get('controller_number') not in [0, 1] or cdrom_spec.get('unit_number') not in [0, 1]):
+ self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s, valid"
+ " values are 0 or 1 for IDE controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number')))
+
+ if cdrom_spec['controller_type'] == 'sata' and \
+ (cdrom_spec.get('controller_number') not in range(0, 4) or cdrom_spec.get('unit_number') not in range(0, 30)):
+ self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s,"
+ " valid controller_number value is 0-3, valid unit_number is 0-29"
+ " for SATA controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number')))
+
+ ctl_exist = False
+ for exist_spec in cdroms.get(cdrom_spec['controller_type']):
+ if exist_spec['num'] == cdrom_spec['controller_number']:
+ ctl_exist = True
+ exist_spec['cdrom'].append(cdrom_spec)
+ break
+ if not ctl_exist:
+ cdroms.get(cdrom_spec['controller_type']).append({'num': cdrom_spec['controller_number'], 'cdrom': [cdrom_spec]})
+
+ return cdroms
+
+ def configure_cdrom(self, vm_obj):
+ # Configure the VM CD-ROM
+ if self.params.get('cdrom'):
+ if vm_obj and vm_obj.config.template:
+ # Changing CD-ROM settings on a template is not supported
+ return
+
+ if isinstance(self.params.get('cdrom'), dict):
+ self.configure_cdrom_dict(vm_obj)
+ elif isinstance(self.params.get('cdrom'), list):
+ self.configure_cdrom_list(vm_obj)
+
+ def configure_cdrom_dict(self, vm_obj):
+ if self.params["cdrom"].get('type') not in ['none', 'client', 'iso']:
+ self.module.fail_json(msg="cdrom.type is mandatory. Options are 'none', 'client', and 'iso'.")
+ if self.params["cdrom"]['type'] == 'iso' and not self.params["cdrom"].get('iso_path'):
+ self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.")
+
+ cdrom_spec = None
+ cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj)
+ iso_path = self.params["cdrom"].get("iso_path")
+ if len(cdrom_devices) == 0:
+ # Creating new CD-ROM
+ ide_devices = self.get_vm_ide_devices(vm=vm_obj)
+ if len(ide_devices) == 0:
+ # Creating new IDE device
+ ide_ctl = self.device_helper.create_ide_controller()
+ ide_device = ide_ctl.device
+ self.change_detected = True
+ self.configspec.deviceChange.append(ide_ctl)
+ else:
+ ide_device = ide_devices[0]
+ if len(ide_device.device) > 3:
+ self.module.fail_json(msg="hardware.cdrom specified for a VM or template which already has 4"
+ " IDE devices of which none are a cdrom")
+
+ cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=self.params["cdrom"]["type"],
+ iso_path=iso_path)
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ cdrom_spec.device.connectable.connected = (self.params["cdrom"]["type"] != "none")
+
+ elif not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_devices[0],
+ cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path):
+ self.device_helper.update_cdrom_config(vm_obj, self.params["cdrom"], cdrom_devices[0], iso_path=iso_path)
+ cdrom_spec = vim.vm.device.VirtualDeviceSpec()
+ cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ cdrom_spec.device = cdrom_devices[0]
+
+ if cdrom_spec:
+ self.change_detected = True
+ self.configspec.deviceChange.append(cdrom_spec)
+
+ def configure_cdrom_list(self, vm_obj):
+ configured_cdroms = self.sanitize_cdrom_params()
+ cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj)
+ # configure IDE CD-ROMs
+ if configured_cdroms['ide']:
+ ide_devices = self.get_vm_ide_devices(vm=vm_obj)
+ for expected_cdrom_spec in configured_cdroms['ide']:
+ ide_device = None
+ for device in ide_devices:
+ if device.busNumber == expected_cdrom_spec['num']:
+ ide_device = device
+ break
+ # if not find the matched ide controller or no existing ide controller
+ if not ide_device:
+ ide_ctl = self.device_helper.create_ide_controller(bus_number=expected_cdrom_spec['num'])
+ ide_device = ide_ctl.device
+ self.change_detected = True
+ self.configspec.deviceChange.append(ide_ctl)
+
+ for cdrom in expected_cdrom_spec['cdrom']:
+ cdrom_device = None
+ iso_path = cdrom.get('iso_path')
+ unit_number = cdrom.get('unit_number')
+ for target_cdrom in cdrom_devices:
+ if target_cdrom.controllerKey == ide_device.key and target_cdrom.unitNumber == unit_number:
+ cdrom_device = target_cdrom
+ break
+ # create new CD-ROM
+ if not cdrom_device and cdrom.get('state') != 'absent':
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-add.')
+ if len(ide_device.device) == 2:
+ self.module.fail_json(msg='Maximum number of CD-ROMs attached to IDE controller is 2.')
+ cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=cdrom['type'],
+ iso_path=iso_path, unit_number=unit_number)
+ self.change_detected = True
+ self.configspec.deviceChange.append(cdrom_spec)
+ # re-configure CD-ROM
+ elif cdrom_device and cdrom.get('state') != 'absent' and \
+ not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_device,
+ cdrom_type=cdrom['type'], iso_path=iso_path):
+ self.device_helper.update_cdrom_config(vm_obj, cdrom, cdrom_device, iso_path=iso_path)
+ cdrom_spec = vim.vm.device.VirtualDeviceSpec()
+ cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ cdrom_spec.device = cdrom_device
+ self.change_detected = True
+ self.configspec.deviceChange.append(cdrom_spec)
+ # delete CD-ROM
+ elif cdrom_device and cdrom.get('state') == 'absent':
+ if vm_obj and vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
+ self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-remove.')
+ cdrom_spec = self.device_helper.remove_cdrom(cdrom_device)
+ self.change_detected = True
+ self.configspec.deviceChange.append(cdrom_spec)
+ # configure SATA CD-ROMs is not supported yet
+ if configured_cdroms['sata']:
+ pass
+
+ def configure_hardware_params(self, vm_obj):
+ """
+ Function to configure hardware related configuration of virtual machine
+ Args:
+ vm_obj: virtual machine object
+ """
+ if 'hardware' in self.params:
+ if 'max_connections' in self.params['hardware']:
+ # maxMksConnections == max_connections
+ self.configspec.maxMksConnections = int(self.params['hardware']['max_connections'])
+ if vm_obj is None or self.configspec.maxMksConnections != vm_obj.config.maxMksConnections:
+ self.change_detected = True
+
+ if 'nested_virt' in self.params['hardware']:
+ self.configspec.nestedHVEnabled = bool(self.params['hardware']['nested_virt'])
+ if vm_obj is None or self.configspec.nestedHVEnabled != bool(vm_obj.config.nestedHVEnabled):
+ self.change_detected = True
+
+ if 'version' in self.params['hardware']:
+ hw_version_check_failed = False
+ temp_version = self.params['hardware'].get('version', 10)
+ if isinstance(temp_version, str) and temp_version.lower() == 'latest':
+ # Check is to make sure vm_obj is not of type template
+ if vm_obj and not vm_obj.config.template:
+ try:
+ task = vm_obj.UpgradeVM_Task()
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'}
+ except vim.fault.AlreadyUpgraded:
+ # Don't fail if VM is already upgraded.
+ pass
+ else:
+ try:
+ temp_version = int(temp_version)
+ except ValueError:
+ hw_version_check_failed = True
+
+ if temp_version not in range(3, 16):
+ hw_version_check_failed = True
+
+ if hw_version_check_failed:
+ self.module.fail_json(msg="Failed to set hardware.version '%s' value as valid"
+ " values range from 3 (ESX 2.x) to 14 (ESXi 6.5 and greater)." % temp_version)
+ # Hardware version is denoted as "vmx-10"
+ version = "vmx-%02d" % temp_version
+ self.configspec.version = version
+ if vm_obj is None or self.configspec.version != vm_obj.config.version:
+ self.change_detected = True
+ # Check is to make sure vm_obj is not of type template
+ if vm_obj and not vm_obj.config.template:
+ # VM exists and we need to update the hardware version
+ current_version = vm_obj.config.version
+ # current_version = "vmx-10"
+ version_digit = int(current_version.split("-", 1)[-1])
+ if temp_version < version_digit:
+ self.module.fail_json(msg="Current hardware version '%d' which is greater than the specified"
+ " version '%d'. Downgrading hardware version is"
+ " not supported. Please specify version greater"
+ " than the current version." % (version_digit,
+ temp_version))
+ new_version = "vmx-%02d" % temp_version
+ try:
+ task = vm_obj.UpgradeVM_Task(new_version)
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'}
+ except vim.fault.AlreadyUpgraded:
+ # Don't fail if VM is already upgraded.
+ pass
+
+ if 'virt_based_security' in self.params['hardware']:
+ host_version = self.select_host().summary.config.product.version
+ if int(host_version.split('.')[0]) < 6 or (int(host_version.split('.')[0]) == 6 and int(host_version.split('.')[1]) < 7):
+ self.module.fail_json(msg="ESXi version %s not support VBS." % host_version)
+ guest_ids = ['windows9_64Guest', 'windows9Server64Guest']
+ if vm_obj is None:
+ guestid = self.configspec.guestId
+ else:
+ guestid = vm_obj.summary.config.guestId
+ if guestid not in guest_ids:
+ self.module.fail_json(msg="Guest '%s' not support VBS." % guestid)
+ if (vm_obj is None and int(self.configspec.version.split('-')[1]) >= 14) or \
+ (vm_obj and int(vm_obj.config.version.split('-')[1]) >= 14 and (vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff)):
+ self.configspec.flags = vim.vm.FlagInfo()
+ self.configspec.flags.vbsEnabled = bool(self.params['hardware']['virt_based_security'])
+ if bool(self.params['hardware']['virt_based_security']):
+ self.configspec.flags.vvtdEnabled = True
+ self.configspec.nestedHVEnabled = True
+ if (vm_obj is None and self.configspec.firmware == 'efi') or \
+ (vm_obj and vm_obj.config.firmware == 'efi'):
+ self.configspec.bootOptions = vim.vm.BootOptions()
+ self.configspec.bootOptions.efiSecureBootEnabled = True
+ else:
+ self.module.fail_json(msg="Not support VBS when firmware is BIOS.")
+ if vm_obj is None or self.configspec.flags.vbsEnabled != vm_obj.config.flags.vbsEnabled:
+ self.change_detected = True
+
+ def get_device_by_type(self, vm=None, type=None):
+ device_list = []
+ if vm is None or type is None:
+ return device_list
+ for device in vm.config.hardware.device:
+ if isinstance(device, type):
+ device_list.append(device)
+
+ return device_list
+
+ def get_vm_cdrom_devices(self, vm=None):
+ return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualCdrom)
+
+ def get_vm_ide_devices(self, vm=None):
+ return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualIDEController)
+
+ def get_vm_network_interfaces(self, vm=None):
+ device_list = []
+ if vm is None:
+ return device_list
+
+ nw_device_types = (vim.vm.device.VirtualPCNet32, vim.vm.device.VirtualVmxnet2,
+ vim.vm.device.VirtualVmxnet3, vim.vm.device.VirtualE1000,
+ vim.vm.device.VirtualE1000e, vim.vm.device.VirtualSriovEthernetCard)
+ for device in vm.config.hardware.device:
+ if isinstance(device, nw_device_types):
+ device_list.append(device)
+
+ return device_list
+
+ def sanitize_network_params(self):
+ """
+ Sanitize user provided network provided params
+
+ Returns: A sanitized list of network params, else fails
+
+ """
+ network_devices = list()
+ # Clean up user data here
+ for network in self.params['networks']:
+ if 'name' not in network and 'vlan' not in network:
+ self.module.fail_json(msg="Please specify at least a network name or"
+ " a VLAN name under VM network list.")
+
+ if 'name' in network and self.cache.get_network(network['name']) is None:
+ self.module.fail_json(msg="Network '%(name)s' does not exist." % network)
+ elif 'vlan' in network:
+ dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
+ for dvp in dvps:
+ if hasattr(dvp.config.defaultPortConfig, 'vlan') and \
+ isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \
+ str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']):
+ network['name'] = dvp.config.name
+ break
+ if 'dvswitch_name' in network and \
+ dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \
+ dvp.config.name == network['vlan']:
+ network['name'] = dvp.config.name
+ break
+
+ if dvp.config.name == network['vlan']:
+ network['name'] = dvp.config.name
+ break
+ else:
+ self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network)
+
+ if 'type' in network:
+ if network['type'] not in ['dhcp', 'static']:
+ self.module.fail_json(msg="Network type '%(type)s' is not a valid parameter."
+ " Valid parameters are ['dhcp', 'static']." % network)
+ if network['type'] != 'static' and ('ip' in network or 'netmask' in network):
+ self.module.fail_json(msg='Static IP information provided for network "%(name)s",'
+ ' but "type" is set to "%(type)s".' % network)
+ else:
+ # Type is optional parameter, if user provided IP or Subnet assume
+ # network type as 'static'
+ if 'ip' in network or 'netmask' in network:
+ network['type'] = 'static'
+ else:
+ # User wants network type as 'dhcp'
+ network['type'] = 'dhcp'
+
+ if network.get('type') == 'static':
+ if 'ip' in network and 'netmask' not in network:
+ self.module.fail_json(msg="'netmask' is required if 'ip' is"
+ " specified under VM network list.")
+ if 'ip' not in network and 'netmask' in network:
+ self.module.fail_json(msg="'ip' is required if 'netmask' is"
+ " specified under VM network list.")
+
+ validate_device_types = ['pcnet32', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e', 'sriov']
+ if 'device_type' in network and network['device_type'] not in validate_device_types:
+ self.module.fail_json(msg="Device type specified '%s' is not valid."
+ " Please specify correct device"
+ " type from ['%s']." % (network['device_type'],
+ "', '".join(validate_device_types)))
+
+ if 'mac' in network and not is_mac(network['mac']):
+ self.module.fail_json(msg="Device MAC address '%s' is invalid."
+ " Please provide correct MAC address." % network['mac'])
+
+ network_devices.append(network)
+
+ return network_devices
+
+ def configure_network(self, vm_obj):
+ # Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM
+ if len(self.params['networks']) == 0:
+ return
+
+ network_devices = self.sanitize_network_params()
+
+ # List current device for Clone or Idempotency
+ current_net_devices = self.get_vm_network_interfaces(vm=vm_obj)
+ if len(network_devices) < len(current_net_devices):
+ self.module.fail_json(msg="Given network device list is lesser than current VM device list (%d < %d). "
+ "Removing interfaces is not allowed"
+ % (len(network_devices), len(current_net_devices)))
+
+ for key in range(0, len(network_devices)):
+ nic_change_detected = False
+ network_name = network_devices[key]['name']
+ if key < len(current_net_devices) and (vm_obj or self.params['template']):
+ # We are editing existing network devices, this is either when
+ # are cloning from VM or Template
+ nic = vim.vm.device.VirtualDeviceSpec()
+ nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+
+ nic.device = current_net_devices[key]
+ if ('wake_on_lan' in network_devices[key] and
+ nic.device.wakeOnLanEnabled != network_devices[key].get('wake_on_lan')):
+ nic.device.wakeOnLanEnabled = network_devices[key].get('wake_on_lan')
+ nic_change_detected = True
+ if ('start_connected' in network_devices[key] and
+ nic.device.connectable.startConnected != network_devices[key].get('start_connected')):
+ nic.device.connectable.startConnected = network_devices[key].get('start_connected')
+ nic_change_detected = True
+ if ('allow_guest_control' in network_devices[key] and
+ nic.device.connectable.allowGuestControl != network_devices[key].get('allow_guest_control')):
+ nic.device.connectable.allowGuestControl = network_devices[key].get('allow_guest_control')
+ nic_change_detected = True
+
+ if nic.device.deviceInfo.summary != network_name:
+ nic.device.deviceInfo.summary = network_name
+ nic_change_detected = True
+ if 'device_type' in network_devices[key]:
+ device = self.device_helper.get_device(network_devices[key]['device_type'], network_name)
+ device_class = type(device)
+ if not isinstance(nic.device, device_class):
+ self.module.fail_json(msg="Changing the device type is not possible when interface is already present. "
+ "The failing device type is %s" % network_devices[key]['device_type'])
+ # Changing mac address has no effect when editing interface
+ if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress:
+ self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. "
+ "The failing new MAC address is %s" % nic.device.macAddress)
+
+ else:
+ # Default device type is vmxnet3, VMware best practice
+ device_type = network_devices[key].get('device_type', 'vmxnet3')
+ nic = self.device_helper.create_nic(device_type,
+ 'Network Adapter %s' % (key + 1),
+ network_devices[key])
+ nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ nic_change_detected = True
+
+ if hasattr(self.cache.get_network(network_name), 'portKeys'):
+ # VDS switch
+
+ pg_obj = None
+ if 'dvswitch_name' in network_devices[key]:
+ dvs_name = network_devices[key]['dvswitch_name']
+ dvs_obj = find_dvs_by_name(self.content, dvs_name)
+ if dvs_obj is None:
+ self.module.fail_json(msg="Unable to find distributed virtual switch %s" % dvs_name)
+ pg_obj = find_dvspg_by_name(dvs_obj, network_name)
+ if pg_obj is None:
+ self.module.fail_json(msg="Unable to find distributed port group %s" % network_name)
+ else:
+ pg_obj = self.cache.find_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_name)
+
+ # TODO: (akasurde) There is no way to find association between resource pool and distributed virtual portgroup
+ # For now, check if we are able to find distributed virtual switch
+ if not pg_obj.config.distributedVirtualSwitch:
+ self.module.fail_json(msg="Failed to find distributed virtual switch which is associated with"
+ " distributed virtual portgroup '%s'. Make sure hostsystem is associated with"
+ " the given distributed virtual portgroup. Also, check if user has correct"
+ " permission to access distributed virtual switch in the given portgroup." % pg_obj.name)
+ if (nic.device.backing and
+ (not hasattr(nic.device.backing, 'port') or
+ (nic.device.backing.port.portgroupKey != pg_obj.key or
+ nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid))):
+ nic_change_detected = True
+
+ dvs_port_connection = vim.dvs.PortConnection()
+ dvs_port_connection.portgroupKey = pg_obj.key
+ # If user specifies distributed port group without associating to the hostsystem on which
+ # virtual machine is going to be deployed then we get error. We can infer that there is no
+ # association between given distributed port group and host system.
+ host_system = self.params.get('esxi_hostname')
+ if host_system and host_system not in [host.config.host.name for host in pg_obj.config.distributedVirtualSwitch.config.host]:
+ self.module.fail_json(msg="It seems that host system '%s' is not associated with distributed"
+ " virtual portgroup '%s'. Please make sure host system is associated"
+ " with given distributed virtual portgroup" % (host_system, pg_obj.name))
+ dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid
+ nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
+ nic.device.backing.port = dvs_port_connection
+
+ elif isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
+ # NSX-T Logical Switch
+ nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
+ network_id = self.cache.get_network(network_name).summary.opaqueNetworkId
+ nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch'
+ nic.device.backing.opaqueNetworkId = network_id
+ nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id
+ nic_change_detected = True
+ else:
+ # vSwitch
+ if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):
+ nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
+ nic_change_detected = True
+
+ net_obj = self.cache.get_network(network_name)
+ if nic.device.backing.network != net_obj:
+ nic.device.backing.network = net_obj
+ nic_change_detected = True
+
+ if nic.device.backing.deviceName != network_name:
+ nic.device.backing.deviceName = network_name
+ nic_change_detected = True
+
+ if nic_change_detected:
+ # Change to fix the issue found while configuring opaque network
+ # VMs cloned from a template with opaque network will get disconnected
+ # Replacing deprecated config parameter with relocation Spec
+ if isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
+ self.relospec.deviceChange.append(nic)
+ else:
+ self.configspec.deviceChange.append(nic)
+ self.change_detected = True
+
+ def configure_vapp_properties(self, vm_obj):
+ if len(self.params['vapp_properties']) == 0:
+ return
+
+ for x in self.params['vapp_properties']:
+ if not x.get('id'):
+ self.module.fail_json(msg="id is required to set vApp property")
+
+ new_vmconfig_spec = vim.vApp.VmConfigSpec()
+
+ if vm_obj:
+ # VM exists
+ # This is primarily for vcsim/integration tests, unset vAppConfig was not seen on my deployments
+ orig_spec = vm_obj.config.vAppConfig if vm_obj.config.vAppConfig else new_vmconfig_spec
+
+ vapp_properties_current = dict((x.id, x) for x in orig_spec.property)
+ vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
+
+ # each property must have a unique key
+ # init key counter with max value + 1
+ all_keys = [x.key for x in orig_spec.property]
+ new_property_index = max(all_keys) + 1 if all_keys else 0
+
+ for property_id, property_spec in vapp_properties_to_change.items():
+ is_property_changed = False
+ new_vapp_property_spec = vim.vApp.PropertySpec()
+
+ if property_id in vapp_properties_current:
+ if property_spec.get('operation') == 'remove':
+ new_vapp_property_spec.operation = 'remove'
+ new_vapp_property_spec.removeKey = vapp_properties_current[property_id].key
+ is_property_changed = True
+ else:
+ # this is 'edit' branch
+ new_vapp_property_spec.operation = 'edit'
+ new_vapp_property_spec.info = vapp_properties_current[property_id]
+ try:
+ for property_name, property_value in property_spec.items():
+
+ if property_name == 'operation':
+ # operation is not an info object property
+ # if set to anything other than 'remove' we don't fail
+ continue
+
+ # Updating attributes only if needed
+ if getattr(new_vapp_property_spec.info, property_name) != property_value:
+ setattr(new_vapp_property_spec.info, property_name, property_value)
+ is_property_changed = True
+
+ except Exception as e:
+ msg = "Failed to set vApp property field='%s' and value='%s'. Error: %s" % (property_name, property_value, to_text(e))
+ self.module.fail_json(msg=msg)
+ else:
+ if property_spec.get('operation') == 'remove':
+ # attempt to delete non-existent property
+ continue
+
+ # this is add new property branch
+ new_vapp_property_spec.operation = 'add'
+
+ property_info = vim.vApp.PropertyInfo()
+ property_info.classId = property_spec.get('classId')
+ property_info.instanceId = property_spec.get('instanceId')
+ property_info.id = property_spec.get('id')
+ property_info.category = property_spec.get('category')
+ property_info.label = property_spec.get('label')
+ property_info.type = property_spec.get('type', 'string')
+ property_info.userConfigurable = property_spec.get('userConfigurable', True)
+ property_info.defaultValue = property_spec.get('defaultValue')
+ property_info.value = property_spec.get('value', '')
+ property_info.description = property_spec.get('description')
+
+ new_vapp_property_spec.info = property_info
+ new_vapp_property_spec.info.key = new_property_index
+ new_property_index += 1
+ is_property_changed = True
+
+ if is_property_changed:
+ new_vmconfig_spec.property.append(new_vapp_property_spec)
+ else:
+ # New VM
+ all_keys = [x.key for x in new_vmconfig_spec.property]
+ new_property_index = max(all_keys) + 1 if all_keys else 0
+ vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
+ is_property_changed = False
+
+ for property_id, property_spec in vapp_properties_to_change.items():
+ new_vapp_property_spec = vim.vApp.PropertySpec()
+ # this is add new property branch
+ new_vapp_property_spec.operation = 'add'
+
+ property_info = vim.vApp.PropertyInfo()
+ property_info.classId = property_spec.get('classId')
+ property_info.instanceId = property_spec.get('instanceId')
+ property_info.id = property_spec.get('id')
+ property_info.category = property_spec.get('category')
+ property_info.label = property_spec.get('label')
+ property_info.type = property_spec.get('type', 'string')
+ property_info.userConfigurable = property_spec.get('userConfigurable', True)
+ property_info.defaultValue = property_spec.get('defaultValue')
+ property_info.value = property_spec.get('value', '')
+ property_info.description = property_spec.get('description')
+
+ new_vapp_property_spec.info = property_info
+ new_vapp_property_spec.info.key = new_property_index
+ new_property_index += 1
+ is_property_changed = True
+
+ if is_property_changed:
+ new_vmconfig_spec.property.append(new_vapp_property_spec)
+
+ if new_vmconfig_spec.property:
+ self.configspec.vAppConfig = new_vmconfig_spec
+ self.change_detected = True
+
+ def customize_customvalues(self, vm_obj, config_spec):
+ if len(self.params['customvalues']) == 0:
+ return
+
+ vm_custom_spec = config_spec
+ vm_custom_spec.extraConfig = []
+
+ changed = False
+ facts = self.gather_facts(vm_obj)
+ for kv in self.params['customvalues']:
+ if 'key' not in kv or 'value' not in kv:
+ self.module.exit_json(msg="customvalues items required both 'key' and 'value' fields.")
+
+ # If kv is not kv fetched from facts, change it
+ if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']:
+ option = vim.option.OptionValue()
+ option.key = kv['key']
+ option.value = kv['value']
+
+ vm_custom_spec.extraConfig.append(option)
+ changed = True
+
+ if changed:
+ self.change_detected = True
+
+ def customize_vm(self, vm_obj):
+
+ # User specified customization specification
+ custom_spec_name = self.params.get('customization_spec')
+ if custom_spec_name:
+ cc_mgr = self.content.customizationSpecManager
+ if cc_mgr.DoesCustomizationSpecExist(name=custom_spec_name):
+ temp_spec = cc_mgr.GetCustomizationSpec(name=custom_spec_name)
+ self.customspec = temp_spec.spec
+ return
+ else:
+ self.module.fail_json(msg="Unable to find customization specification"
+ " '%s' in given configuration." % custom_spec_name)
+
+ # Network settings
+ adaptermaps = []
+ for network in self.params['networks']:
+
+ guest_map = vim.vm.customization.AdapterMapping()
+ guest_map.adapter = vim.vm.customization.IPSettings()
+
+ if 'ip' in network and 'netmask' in network:
+ guest_map.adapter.ip = vim.vm.customization.FixedIp()
+ guest_map.adapter.ip.ipAddress = str(network['ip'])
+ guest_map.adapter.subnetMask = str(network['netmask'])
+ elif 'type' in network and network['type'] == 'dhcp':
+ guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()
+
+ if 'gateway' in network:
+ guest_map.adapter.gateway = network['gateway']
+
+ # On Windows, DNS domain and DNS servers can be set by network interface
+ # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html
+ if 'domain' in network:
+ guest_map.adapter.dnsDomain = network['domain']
+ elif 'domain' in self.params['customization']:
+ guest_map.adapter.dnsDomain = self.params['customization']['domain']
+
+ if 'dns_servers' in network:
+ guest_map.adapter.dnsServerList = network['dns_servers']
+ elif 'dns_servers' in self.params['customization']:
+ guest_map.adapter.dnsServerList = self.params['customization']['dns_servers']
+
+ adaptermaps.append(guest_map)
+
+ # Global DNS settings
+ globalip = vim.vm.customization.GlobalIPSettings()
+ if 'dns_servers' in self.params['customization']:
+ globalip.dnsServerList = self.params['customization']['dns_servers']
+
+ # TODO: Maybe list the different domains from the interfaces here by default ?
+ if 'dns_suffix' in self.params['customization']:
+ dns_suffix = self.params['customization']['dns_suffix']
+ if isinstance(dns_suffix, list):
+ globalip.dnsSuffixList = " ".join(dns_suffix)
+ else:
+ globalip.dnsSuffixList = dns_suffix
+ elif 'domain' in self.params['customization']:
+ globalip.dnsSuffixList = self.params['customization']['domain']
+
+ if self.params['guest_id']:
+ guest_id = self.params['guest_id']
+ else:
+ guest_id = vm_obj.summary.config.guestId
+
+ # For windows guest OS, use SysPrep
+ # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail
+ if 'win' in guest_id:
+ ident = vim.vm.customization.Sysprep()
+
+ ident.userData = vim.vm.customization.UserData()
+
+ # Setting hostName, orgName and fullName is mandatory, so we set some default when missing
+ ident.userData.computerName = vim.vm.customization.FixedName()
+ # computer name will be truncated to 15 characters if using VM name
+ default_name = self.params['name'].replace(' ', '')
+ punctuation = string.punctuation.replace('-', '')
+ default_name = ''.join([c for c in default_name if c not in punctuation])
+ ident.userData.computerName.name = str(self.params['customization'].get('hostname', default_name[0:15]))
+ ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator'))
+ ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME'))
+
+ if 'productid' in self.params['customization']:
+ ident.userData.productId = str(self.params['customization']['productid'])
+
+ ident.guiUnattended = vim.vm.customization.GuiUnattended()
+
+ if 'autologon' in self.params['customization']:
+ ident.guiUnattended.autoLogon = self.params['customization']['autologon']
+ ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1)
+
+ if 'timezone' in self.params['customization']:
+ # Check if timezone value is a int before proceeding.
+ ident.guiUnattended.timeZone = self.device_helper.integer_value(
+ self.params['customization']['timezone'],
+ 'customization.timezone')
+
+ ident.identification = vim.vm.customization.Identification()
+
+ if self.params['customization'].get('password', '') != '':
+ ident.guiUnattended.password = vim.vm.customization.Password()
+ ident.guiUnattended.password.value = str(self.params['customization']['password'])
+ ident.guiUnattended.password.plainText = True
+
+ if 'joindomain' in self.params['customization']:
+ if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']:
+ self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
+ "joindomain feature")
+
+ ident.identification.domainAdmin = str(self.params['customization']['domainadmin'])
+ ident.identification.joinDomain = str(self.params['customization']['joindomain'])
+ ident.identification.domainAdminPassword = vim.vm.customization.Password()
+ ident.identification.domainAdminPassword.value = str(self.params['customization']['domainadminpassword'])
+ ident.identification.domainAdminPassword.plainText = True
+
+ elif 'joinworkgroup' in self.params['customization']:
+ ident.identification.joinWorkgroup = str(self.params['customization']['joinworkgroup'])
+
+ if 'runonce' in self.params['customization']:
+ ident.guiRunOnce = vim.vm.customization.GuiRunOnce()
+ ident.guiRunOnce.commandList = self.params['customization']['runonce']
+
+ else:
+ # FIXME: We have no clue whether this non-Windows OS is actually Linux, hence it might fail!
+
+ # For Linux guest OS, use LinuxPrep
+ # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.LinuxPrep.html
+ ident = vim.vm.customization.LinuxPrep()
+
+ # TODO: Maybe add domain from interface if missing ?
+ if 'domain' in self.params['customization']:
+ ident.domain = str(self.params['customization']['domain'])
+
+ ident.hostName = vim.vm.customization.FixedName()
+ hostname = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0]))
+ # Remove all characters except alphanumeric and minus which is allowed by RFC 952
+ valid_hostname = re.sub(r"[^a-zA-Z0-9\-]", "", hostname)
+ ident.hostName.name = valid_hostname
+
+ # List of supported time zones for different vSphere versions in Linux/Unix systems
+ # https://kb.vmware.com/s/article/2145518
+ if 'timezone' in self.params['customization']:
+ ident.timeZone = str(self.params['customization']['timezone'])
+ if 'hwclockUTC' in self.params['customization']:
+ ident.hwClockUTC = self.params['customization']['hwclockUTC']
+
+ self.customspec = vim.vm.customization.Specification()
+ self.customspec.nicSettingMap = adaptermaps
+ self.customspec.globalIPSettings = globalip
+ self.customspec.identity = ident
+
+ def get_vm_scsi_controller(self, vm_obj):
+ # If vm_obj doesn't exist there is no SCSI controller to find
+ if vm_obj is None:
+ return None
+
+ for device in vm_obj.config.hardware.device:
+ if self.device_helper.is_scsi_controller(device):
+ scsi_ctl = vim.vm.device.VirtualDeviceSpec()
+ scsi_ctl.device = device
+ return scsi_ctl
+
+ return None
+
+ def get_configured_disk_size(self, expected_disk_spec):
+ # what size is it?
+ if [x for x in expected_disk_spec.keys() if x.startswith('size_') or x == 'size']:
+ # size, size_tb, size_gb, size_mb, size_kb
+ if 'size' in expected_disk_spec:
+ size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
+ disk_size_m = size_regex.match(expected_disk_spec['size'])
+ try:
+ if disk_size_m:
+ expected = disk_size_m.group(1)
+ unit = disk_size_m.group(2)
+ else:
+ raise ValueError
+
+ if re.match(r'\d+\.\d+', expected):
+ # We found float value in string, let's typecast it
+ expected = float(expected)
+ else:
+ # We found int value in string, let's typecast it
+ expected = int(expected)
+
+ if not expected or not unit:
+ raise ValueError
+
+ except (TypeError, ValueError, NameError):
+ # Common failure
+ self.module.fail_json(msg="Failed to parse disk size please review value"
+ " provided using documentation.")
+ else:
+ param = [x for x in expected_disk_spec.keys() if x.startswith('size_')][0]
+ unit = param.split('_')[-1].lower()
+ expected = [x[1] for x in expected_disk_spec.items() if x[0].startswith('size_')][0]
+ expected = int(expected)
+
+ disk_units = dict(tb=3, gb=2, mb=1, kb=0)
+ if unit in disk_units:
+ unit = unit.lower()
+ return expected * (1024 ** disk_units[unit])
+ else:
+ self.module.fail_json(msg="%s is not a supported unit for disk size."
+ " Supported units are ['%s']." % (unit,
+ "', '".join(disk_units.keys())))
+
+ # No size found but disk, fail
+ self.module.fail_json(
+ msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration")
+
+ def add_existing_vmdk(self, vm_obj, expected_disk_spec, diskspec, scsi_ctl):
+ """
+ Adds vmdk file described by expected_disk_spec['filename'], retrieves the file
+ information and adds the correct spec to self.configspec.deviceChange.
+ """
+ filename = expected_disk_spec['filename']
+ # If this is a new disk, or the disk file names are different
+ if (vm_obj and diskspec.device.backing.fileName != filename) or vm_obj is None:
+ diskspec.device.backing.fileName = filename
+ diskspec.device.key = -1
+ self.change_detected = True
+ self.configspec.deviceChange.append(diskspec)
+
+ def configure_disks(self, vm_obj):
+ # Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM
+ if len(self.params['disk']) == 0:
+ return
+
+ scsi_ctl = self.get_vm_scsi_controller(vm_obj)
+
+ # Create scsi controller only if we are deploying a new VM, not a template or reconfiguring
+ if vm_obj is None or scsi_ctl is None:
+ scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type())
+ self.change_detected = True
+ self.configspec.deviceChange.append(scsi_ctl)
+
+ disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \
+ if vm_obj is not None else None
+
+ if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks):
+ self.module.fail_json(msg="Provided disks configuration has less disks than "
+ "the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks)))
+
+ disk_index = 0
+ for expected_disk_spec in self.params.get('disk'):
+ disk_modified = False
+ # If we are manipulating and existing objects which has disks and disk_index is in disks
+ if vm_obj is not None and disks is not None and disk_index < len(disks):
+ diskspec = vim.vm.device.VirtualDeviceSpec()
+ # set the operation to edit so that it knows to keep other settings
+ diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ diskspec.device = disks[disk_index]
+ else:
+ diskspec = self.device_helper.create_scsi_disk(scsi_ctl, disk_index)
+ disk_modified = True
+
+ # increment index for next disk search
+ disk_index += 1
+ # index 7 is reserved to SCSI controller
+ if disk_index == 7:
+ disk_index += 1
+
+ if 'disk_mode' in expected_disk_spec:
+ disk_mode = expected_disk_spec.get('disk_mode', 'persistent').lower()
+ valid_disk_mode = ['persistent', 'independent_persistent', 'independent_nonpersistent']
+ if disk_mode not in valid_disk_mode:
+ self.module.fail_json(msg="disk_mode specified is not valid."
+ " Should be one of ['%s']" % "', '".join(valid_disk_mode))
+
+ if (vm_obj and diskspec.device.backing.diskMode != disk_mode) or (vm_obj is None):
+ diskspec.device.backing.diskMode = disk_mode
+ disk_modified = True
+ else:
+ diskspec.device.backing.diskMode = "persistent"
+
+ # is it thin?
+ if 'type' in expected_disk_spec:
+ disk_type = expected_disk_spec.get('type', '').lower()
+ if disk_type == 'thin':
+ diskspec.device.backing.thinProvisioned = True
+ elif disk_type == 'eagerzeroedthick':
+ diskspec.device.backing.eagerlyScrub = True
+
+ if 'filename' in expected_disk_spec and expected_disk_spec['filename'] is not None:
+ self.add_existing_vmdk(vm_obj, expected_disk_spec, diskspec, scsi_ctl)
+ continue
+ elif vm_obj is None or self.params['template']:
+ # We are creating new VM or from Template
+ # Only create virtual device if not backed by vmdk in original template
+ if diskspec.device.backing.fileName == '':
+ diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
+
+ # which datastore?
+ if expected_disk_spec.get('datastore'):
+ # TODO: This is already handled by the relocation spec,
+ # but it needs to eventually be handled for all the
+ # other disks defined
+ pass
+
+ kb = self.get_configured_disk_size(expected_disk_spec)
+ # VMware doesn't allow to reduce disk sizes
+ if kb < diskspec.device.capacityInKB:
+ self.module.fail_json(
+ msg="Given disk size is smaller than found (%d < %d). Reducing disks is not allowed." %
+ (kb, diskspec.device.capacityInKB))
+
+ if kb != diskspec.device.capacityInKB or disk_modified:
+ diskspec.device.capacityInKB = kb
+ self.configspec.deviceChange.append(diskspec)
+
+ self.change_detected = True
+
+ def select_host(self):
+ hostsystem = self.cache.get_esx_host(self.params['esxi_hostname'])
+ if not hostsystem:
+ self.module.fail_json(msg='Failed to find ESX host "%(esxi_hostname)s"' % self.params)
+ if hostsystem.runtime.connectionState != 'connected' or hostsystem.runtime.inMaintenanceMode:
+ self.module.fail_json(msg='ESXi "%(esxi_hostname)s" is in invalid state or in maintenance mode.' % self.params)
+ return hostsystem
+
+ def autoselect_datastore(self):
+ datastore = None
+ datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
+
+ if datastores is None or len(datastores) == 0:
+ self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
+
+ datastore_freespace = 0
+ for ds in datastores:
+ if not self.is_datastore_valid(datastore_obj=ds):
+ continue
+
+ if ds.summary.freeSpace > datastore_freespace:
+ datastore = ds
+ datastore_freespace = ds.summary.freeSpace
+
+ return datastore
+
+ def get_recommended_datastore(self, datastore_cluster_obj=None):
+ """
+ Function to return Storage DRS recommended datastore from datastore cluster
+ Args:
+ datastore_cluster_obj: datastore cluster managed object
+
+ Returns: Name of recommended datastore from the given datastore cluster
+
+ """
+ if datastore_cluster_obj is None:
+ return None
+ # Check if Datastore Cluster provided by user is SDRS ready
+ sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
+ if sdrs_status:
+ # We can get storage recommendation only if SDRS is enabled on given datastorage cluster
+ pod_sel_spec = vim.storageDrs.PodSelectionSpec()
+ pod_sel_spec.storagePod = datastore_cluster_obj
+ storage_spec = vim.storageDrs.StoragePlacementSpec()
+ storage_spec.podSelectionSpec = pod_sel_spec
+ storage_spec.type = 'create'
+
+ try:
+ rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
+ rec_action = rec.recommendations[0].action[0]
+ return rec_action.destination.name
+ except Exception:
+ # There is some error so we fall back to general workflow
+ pass
+ datastore = None
+ datastore_freespace = 0
+ for ds in datastore_cluster_obj.childEntity:
+ if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
+ # If datastore field is provided, filter destination datastores
+ if not self.is_datastore_valid(datastore_obj=ds):
+ continue
+
+ datastore = ds
+ datastore_freespace = ds.summary.freeSpace
+ if datastore:
+ return datastore.name
+ return None
+
+ def select_datastore(self, vm_obj=None):
+ datastore = None
+ datastore_name = None
+
+ if len(self.params['disk']) != 0:
+ # TODO: really use the datastore for newly created disks
+ if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']:
+ datastores = []
+
+ if self.params['cluster']:
+ cluster = self.find_cluster_by_name(self.params['cluster'], self.content)
+
+ for host in cluster.host:
+ for mi in host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo:
+ if mi.volume.type == "VMFS":
+ datastores.append(self.cache.find_obj(self.content, [vim.Datastore], mi.volume.name))
+ elif self.params['esxi_hostname']:
+ host = self.find_hostsystem_by_name(self.params['esxi_hostname'])
+
+ for mi in host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo:
+ if mi.volume.type == "VMFS":
+ datastores.append(self.cache.find_obj(self.content, [vim.Datastore], mi.volume.name))
+ else:
+ datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
+ datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']]
+
+ datastore_freespace = 0
+ for ds in datastores:
+ if not self.is_datastore_valid(datastore_obj=ds):
+ continue
+
+ if (ds.summary.freeSpace > datastore_freespace) or (ds.summary.freeSpace == datastore_freespace and not datastore):
+ # If datastore field is provided, filter destination datastores
+ if 'datastore' in self.params['disk'][0] and \
+ isinstance(self.params['disk'][0]['datastore'], str) and \
+ ds.name.find(self.params['disk'][0]['datastore']) < 0:
+ continue
+
+ datastore = ds
+ datastore_name = datastore.name
+ datastore_freespace = ds.summary.freeSpace
+
+ elif 'datastore' in self.params['disk'][0]:
+ datastore_name = self.params['disk'][0]['datastore']
+ # Check if user has provided datastore cluster first
+ datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
+ if datastore_cluster:
+ # If user specified datastore cluster so get recommended datastore
+ datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
+ # Check if get_recommended_datastore or user specified datastore exists or not
+ datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
+ else:
+ self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore")
+
+ if not datastore and self.params['template']:
+ # use the template's existing DS
+ disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
+ if disks:
+ datastore = disks[0].backing.datastore
+ datastore_name = datastore.name
+ # validation
+ if datastore:
+ dc = self.cache.get_parent_datacenter(datastore)
+ if dc.name != self.params['datacenter']:
+ datastore = self.autoselect_datastore()
+ datastore_name = datastore.name
+
+ if not datastore:
+ if len(self.params['disk']) != 0 or self.params['template'] is None:
+ self.module.fail_json(msg="Unable to find the datastore with given parameters."
+ " This could mean, %s is a non-existent virtual machine and module tried to"
+ " deploy it as new virtual machine with no disk. Please specify disks parameter"
+ " or specify template to clone from." % self.params['name'])
+ self.module.fail_json(msg="Failed to find a matching datastore")
+
+ return datastore, datastore_name
+
+ def obj_has_parent(self, obj, parent):
+ if obj is None and parent is None:
+ raise AssertionError()
+ current_parent = obj
+
+ while True:
+ if current_parent.name == parent.name:
+ return True
+
+ # Check if we have reached till root folder
+ moid = current_parent._moId
+ if moid in ['group-d1', 'ha-folder-root']:
+ return False
+
+ current_parent = current_parent.parent
+ if current_parent is None:
+ return False
+
+ def get_scsi_type(self):
+ disk_controller_type = "paravirtual"
+ # set cpu/memory/etc
+ if 'hardware' in self.params:
+ if 'scsi' in self.params['hardware']:
+ if self.params['hardware']['scsi'] in ['buslogic', 'paravirtual', 'lsilogic', 'lsilogicsas']:
+ disk_controller_type = self.params['hardware']['scsi']
+ else:
+ self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'")
+ return disk_controller_type
+
+ def find_folder(self, searchpath):
+ """ Walk inventory objects one position of the searchpath at a time """
+
+ # split the searchpath so we can iterate through it
+ paths = [x.replace('/', '') for x in searchpath.split('/')]
+ paths_total = len(paths) - 1
+ position = 0
+
+ # recursive walk while looking for next element in searchpath
+ root = self.content.rootFolder
+ while root and position <= paths_total:
+ change = False
+ if hasattr(root, 'childEntity'):
+ for child in root.childEntity:
+ if child.name == paths[position]:
+ root = child
+ position += 1
+ change = True
+ break
+ elif isinstance(root, vim.Datacenter):
+ if hasattr(root, 'vmFolder'):
+ if root.vmFolder.name == paths[position]:
+ root = root.vmFolder
+ position += 1
+ change = True
+ else:
+ root = None
+
+ if not change:
+ root = None
+
+ return root
+
+ def get_resource_pool(self, cluster=None, host=None, resource_pool=None):
+ """ Get a resource pool, filter on cluster, esxi_hostname or resource_pool if given """
+
+ cluster_name = cluster or self.params.get('cluster', None)
+ host_name = host or self.params.get('esxi_hostname', None)
+ resource_pool_name = resource_pool or self.params.get('resource_pool', None)
+
+ # get the datacenter object
+ datacenter = find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
+ if not datacenter:
+ self.module.fail_json(msg='Unable to find datacenter "%s"' % self.params['datacenter'])
+
+ # if cluster is given, get the cluster object
+ if cluster_name:
+ cluster = find_obj(self.content, [vim.ComputeResource], cluster_name, folder=datacenter)
+ if not cluster:
+ self.module.fail_json(msg='Unable to find cluster "%s"' % cluster_name)
+ # if host is given, get the cluster object using the host
+ elif host_name:
+ host = find_obj(self.content, [vim.HostSystem], host_name, folder=datacenter)
+ if not host:
+ self.module.fail_json(msg='Unable to find host "%s"' % host_name)
+ cluster = host.parent
+ else:
+ cluster = None
+
+ # get resource pools limiting search to cluster or datacenter
+ resource_pool = find_obj(self.content, [vim.ResourcePool], resource_pool_name, folder=cluster or datacenter)
+ if not resource_pool:
+ if resource_pool_name:
+ self.module.fail_json(msg='Unable to find resource_pool "%s"' % resource_pool_name)
+ else:
+ self.module.fail_json(msg='Unable to find resource pool, need esxi_hostname, resource_pool, or cluster')
+ return resource_pool
+
+ def deploy_vm(self):
+ # https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
+
+ # FIXME:
+ # - static IPs
+
+ self.folder = self.params.get('folder', None)
+ if self.folder is None:
+ self.module.fail_json(msg="Folder is required parameter while deploying new virtual machine")
+
+ # Prepend / if it was missing from the folder path, also strip trailing slashes
+ if not self.folder.startswith('/'):
+ self.folder = '/%(folder)s' % self.params
+ self.folder = self.folder.rstrip('/')
+
+ datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
+ if datacenter is None:
+ self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params)
+
+ dcpath = compile_folder_path_for_object(datacenter)
+
+ # Nested folder does not have trailing /
+ if not dcpath.endswith('/'):
+ dcpath += '/'
+
+ # Check for full path first in case it was already supplied
+ if (self.folder.startswith(dcpath + self.params['datacenter'] + '/vm') or
+ self.folder.startswith(dcpath + '/' + self.params['datacenter'] + '/vm')):
+ fullpath = self.folder
+ elif self.folder.startswith('/vm/') or self.folder == '/vm':
+ fullpath = "%s%s%s" % (dcpath, self.params['datacenter'], self.folder)
+ elif self.folder.startswith('/'):
+ fullpath = "%s%s/vm%s" % (dcpath, self.params['datacenter'], self.folder)
+ else:
+ fullpath = "%s%s/vm/%s" % (dcpath, self.params['datacenter'], self.folder)
+
+ f_obj = self.content.searchIndex.FindByInventoryPath(fullpath)
+
+ # abort if no strategy was successful
+ if f_obj is None:
+ # Add some debugging values in failure.
+ details = {
+ 'datacenter': datacenter.name,
+ 'datacenter_path': dcpath,
+ 'folder': self.folder,
+ 'full_search_path': fullpath,
+ }
+ self.module.fail_json(msg='No folder %s matched in the search path : %s' % (self.folder, fullpath),
+ details=details)
+
+ destfolder = f_obj
+
+ if self.params['template']:
+ vm_obj = self.get_vm_or_template(template_name=self.params['template'])
+ if vm_obj is None:
+ self.module.fail_json(msg="Could not find a template named %(template)s" % self.params)
+ else:
+ vm_obj = None
+
+ # always get a resource_pool
+ resource_pool = self.get_resource_pool()
+
+ # set the destination datastore for VM & disks
+ if self.params['datastore']:
+ # Give precedence to datastore value provided by user
+ # User may want to deploy VM to specific datastore.
+ datastore_name = self.params['datastore']
+ # Check if user has provided datastore cluster first
+ datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
+ if datastore_cluster:
+ # If user specified datastore cluster so get recommended datastore
+ datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
+ # Check if get_recommended_datastore or user specified datastore exists or not
+ datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
+ else:
+ (datastore, datastore_name) = self.select_datastore(vm_obj)
+
+ self.configspec = vim.vm.ConfigSpec()
+ self.configspec.deviceChange = []
+ # create the relocation spec
+ self.relospec = vim.vm.RelocateSpec()
+ self.relospec.deviceChange = []
+ self.configure_guestid(vm_obj=vm_obj, vm_creation=True)
+ self.configure_cpu_and_memory(vm_obj=vm_obj, vm_creation=True)
+ self.configure_hardware_params(vm_obj=vm_obj)
+ self.configure_resource_alloc_info(vm_obj=vm_obj)
+ self.configure_vapp_properties(vm_obj=vm_obj)
+ self.configure_disks(vm_obj=vm_obj)
+ self.configure_network(vm_obj=vm_obj)
+ self.configure_cdrom(vm_obj=vm_obj)
+
+ # Find if we need network customizations (find keys in dictionary that requires customizations)
+ network_changes = False
+ for nw in self.params['networks']:
+ for key in nw:
+ # We don't need customizations for these keys
+ if key == 'type' and nw['type'] == 'dhcp':
+ network_changes = True
+ break
+ if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected', 'dvswitch_name'):
+ network_changes = True
+ break
+
+ if len(self.params['customization']) > 0 or network_changes or self.params.get('customization_spec') is not None:
+ self.customize_vm(vm_obj=vm_obj)
+
+ clonespec = None
+ clone_method = None
+ try:
+ if self.params['template']:
+ # Only select specific host when ESXi hostname is provided
+ if self.params['esxi_hostname']:
+ self.relospec.host = self.select_host()
+ self.relospec.datastore = datastore
+
+ # Convert disk present in template if is set
+ if self.params['convert']:
+ for device in vm_obj.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualDisk):
+ disk_locator = vim.vm.RelocateSpec.DiskLocator()
+ disk_locator.diskBackingInfo = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+ if self.params['convert'] in ['thin']:
+ disk_locator.diskBackingInfo.thinProvisioned = True
+ if self.params['convert'] in ['eagerzeroedthick']:
+ disk_locator.diskBackingInfo.eagerlyScrub = True
+ if self.params['convert'] in ['thick']:
+ disk_locator.diskBackingInfo.diskMode = "persistent"
+ disk_locator.diskId = device.key
+ disk_locator.datastore = datastore
+ self.relospec.disk.append(disk_locator)
+
+ # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
+ # > pool: For a clone operation from a template to a virtual machine, this argument is required.
+ self.relospec.pool = resource_pool
+ linked_clone = self.params.get('linked_clone')
+ snapshot_src = self.params.get('snapshot_src', None)
+ if linked_clone:
+ if snapshot_src is not None:
+ self.relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking
+ else:
+ self.module.fail_json(msg="Parameter 'linked_src' and 'snapshot_src' are"
+ " required together for linked clone operation.")
+
+ clonespec = vim.vm.CloneSpec(template=self.params['is_template'], location=self.relospec)
+ if self.customspec:
+ clonespec.customization = self.customspec
+
+ if snapshot_src is not None:
+ if vm_obj.snapshot is None:
+ self.module.fail_json(msg="No snapshots present for virtual machine or template [%(template)s]" % self.params)
+ snapshot = self.get_snapshots_by_name_recursively(snapshots=vm_obj.snapshot.rootSnapshotList,
+ snapname=snapshot_src)
+ if len(snapshot) != 1:
+ self.module.fail_json(msg='virtual machine "%(template)s" does not contain'
+ ' snapshot named "%(snapshot_src)s"' % self.params)
+
+ clonespec.snapshot = snapshot[0].snapshot
+
+ clonespec.config = self.configspec
+ clone_method = 'Clone'
+ try:
+ task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
+ except vim.fault.NoPermission as e:
+ self.module.fail_json(msg="Failed to clone virtual machine %s to folder %s "
+ "due to permission issue: %s" % (self.params['name'],
+ destfolder,
+ to_native(e.msg)))
+ self.change_detected = True
+ else:
+ # ConfigSpec require name for VM creation
+ self.configspec.name = self.params['name']
+ self.configspec.files = vim.vm.FileInfo(logDirectory=None,
+ snapshotDirectory=None,
+ suspendDirectory=None,
+ vmPathName="[" + datastore_name + "]")
+
+ clone_method = 'CreateVM_Task'
+ try:
+ task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool)
+ except vmodl.fault.InvalidRequest as e:
+ self.module.fail_json(msg="Failed to create virtual machine due to invalid configuration "
+ "parameter %s" % to_native(e.msg))
+ except vim.fault.RestrictedVersion as e:
+ self.module.fail_json(msg="Failed to create virtual machine due to "
+ "product versioning restrictions: %s" % to_native(e.msg))
+ self.change_detected = True
+ self.wait_for_task(task)
+ except TypeError as e:
+ self.module.fail_json(msg="TypeError was returned, please ensure to give correct inputs. %s" % to_text(e))
+
+ if task.info.state == 'error':
+ # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
+ # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
+
+ # provide these to the user for debugging
+ clonespec_json = serialize_spec(clonespec)
+ configspec_json = serialize_spec(self.configspec)
+ kwargs = {
+ 'changed': self.change_applied,
+ 'failed': True,
+ 'msg': task.info.error.msg,
+ 'clonespec': clonespec_json,
+ 'configspec': configspec_json,
+ 'clone_method': clone_method
+ }
+
+ return kwargs
+ else:
+ # set annotation
+ vm = task.info.result
+ if self.params['annotation']:
+ annotation_spec = vim.vm.ConfigSpec()
+ annotation_spec.annotation = str(self.params['annotation'])
+ task = vm.ReconfigVM_Task(annotation_spec)
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'annotation'}
+
+ if self.params['customvalues']:
+ vm_custom_spec = vim.vm.ConfigSpec()
+ self.customize_customvalues(vm_obj=vm, config_spec=vm_custom_spec)
+ task = vm.ReconfigVM_Task(vm_custom_spec)
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customvalues'}
+
+ if self.params['wait_for_ip_address'] or self.params['wait_for_customization'] or self.params['state'] in ['poweredon', 'restarted']:
+ set_vm_power_state(self.content, vm, 'poweredon', force=False)
+
+ if self.params['wait_for_ip_address']:
+ wait_for_vm_ip(self.content, vm, self.params['wait_for_ip_address_timeout'])
+
+ if self.params['wait_for_customization']:
+ is_customization_ok = self.wait_for_customization(vm=vm, timeout=self.params['wait_for_customization_timeout'])
+ if not is_customization_ok:
+ vm_facts = self.gather_facts(vm)
+ return {'changed': self.change_applied, 'failed': True, 'instance': vm_facts, 'op': 'customization'}
+
+ vm_facts = self.gather_facts(vm)
+ return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
+
+ def get_snapshots_by_name_recursively(self, snapshots, snapname):
+ snap_obj = []
+ for snapshot in snapshots:
+ if snapshot.name == snapname:
+ snap_obj.append(snapshot)
+ else:
+ snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname)
+ return snap_obj
+
+ def reconfigure_vm(self):
+ self.configspec = vim.vm.ConfigSpec()
+ self.configspec.deviceChange = []
+ # create the relocation spec
+ self.relospec = vim.vm.RelocateSpec()
+ self.relospec.deviceChange = []
+ self.configure_guestid(vm_obj=self.current_vm_obj)
+ self.configure_cpu_and_memory(vm_obj=self.current_vm_obj)
+ self.configure_hardware_params(vm_obj=self.current_vm_obj)
+ self.configure_disks(vm_obj=self.current_vm_obj)
+ self.configure_network(vm_obj=self.current_vm_obj)
+ self.configure_cdrom(vm_obj=self.current_vm_obj)
+ self.customize_customvalues(vm_obj=self.current_vm_obj, config_spec=self.configspec)
+ self.configure_resource_alloc_info(vm_obj=self.current_vm_obj)
+ self.configure_vapp_properties(vm_obj=self.current_vm_obj)
+
+ if self.params['annotation'] and self.current_vm_obj.config.annotation != self.params['annotation']:
+ self.configspec.annotation = str(self.params['annotation'])
+ self.change_detected = True
+
+ if self.params['resource_pool']:
+ self.relospec.pool = self.get_resource_pool()
+
+ if self.relospec.pool != self.current_vm_obj.resourcePool:
+ task = self.current_vm_obj.RelocateVM_Task(spec=self.relospec)
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'relocate'}
+
+ # Only send VMware task if we see a modification
+ if self.change_detected:
+ task = None
+ try:
+ task = self.current_vm_obj.ReconfigVM_Task(spec=self.configspec)
+ except vim.fault.RestrictedVersion as e:
+ self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
+ " product versioning restrictions: %s" % to_native(e.msg))
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'reconfig'}
+
+ # Rename VM
+ if self.params['uuid'] and self.params['name'] and self.params['name'] != self.current_vm_obj.config.name:
+ task = self.current_vm_obj.Rename_Task(self.params['name'])
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'rename'}
+
+ # Mark VM as Template
+ if self.params['is_template'] and not self.current_vm_obj.config.template:
+ try:
+ self.current_vm_obj.MarkAsTemplate()
+ self.change_applied = True
+ except vmodl.fault.NotSupported as e:
+ self.module.fail_json(msg="Failed to mark virtual machine [%s] "
+ "as template: %s" % (self.params['name'], e.msg))
+
+ # Mark Template as VM
+ elif not self.params['is_template'] and self.current_vm_obj.config.template:
+ resource_pool = self.get_resource_pool()
+ kwargs = dict(pool=resource_pool)
+
+ if self.params.get('esxi_hostname', None):
+ host_system_obj = self.select_host()
+ kwargs.update(host=host_system_obj)
+
+ try:
+ self.current_vm_obj.MarkAsVirtualMachine(**kwargs)
+ self.change_applied = True
+ except vim.fault.InvalidState as invalid_state:
+ self.module.fail_json(msg="Virtual machine is not marked"
+ " as template : %s" % to_native(invalid_state.msg))
+ except vim.fault.InvalidDatastore as invalid_ds:
+ self.module.fail_json(msg="Converting template to virtual machine"
+ " operation cannot be performed on the"
+ " target datastores: %s" % to_native(invalid_ds.msg))
+ except vim.fault.CannotAccessVmComponent as cannot_access:
+ self.module.fail_json(msg="Failed to convert template to virtual machine"
+ " as operation unable access virtual machine"
+ " component: %s" % to_native(cannot_access.msg))
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(msg="Failed to convert template to virtual machine"
+ " due to : %s" % to_native(invalid_argument.msg))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to convert template to virtual machine"
+ " due to generic error : %s" % to_native(generic_exc))
+
+ # Automatically update VMware UUID when converting template to VM.
+ # This avoids an interactive prompt during VM startup.
+ uuid_action = [x for x in self.current_vm_obj.config.extraConfig if x.key == "uuid.action"]
+ if not uuid_action:
+ uuid_action_opt = vim.option.OptionValue()
+ uuid_action_opt.key = "uuid.action"
+ uuid_action_opt.value = "create"
+ self.configspec.extraConfig.append(uuid_action_opt)
+
+ self.change_detected = True
+
+ # add customize existing VM after VM re-configure
+ if 'existing_vm' in self.params['customization'] and self.params['customization']['existing_vm']:
+ if self.current_vm_obj.config.template:
+ self.module.fail_json(msg="VM is template, not support guest OS customization.")
+ if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
+ self.module.fail_json(msg="VM is not in poweroff state, can not do guest OS customization.")
+ cus_result = self.customize_exist_vm()
+ if cus_result['failed']:
+ return cus_result
+
+ vm_facts = self.gather_facts(self.current_vm_obj)
+ return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
+
+ def customize_exist_vm(self):
+ task = None
+ # Find if we need network customizations (find keys in dictionary that requires customizations)
+ network_changes = False
+ for nw in self.params['networks']:
+ for key in nw:
+ # We don't need customizations for these keys
+ if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected', 'dvswitch_name'):
+ network_changes = True
+ break
+ if len(self.params['customization']) > 1 or network_changes or self.params.get('customization_spec'):
+ self.customize_vm(vm_obj=self.current_vm_obj)
+ try:
+ task = self.current_vm_obj.CustomizeVM_Task(self.customspec)
+ except vim.fault.CustomizationFault as e:
+ self.module.fail_json(msg="Failed to customization virtual machine due to CustomizationFault: %s" % to_native(e.msg))
+ except vim.fault.RuntimeFault as e:
+ self.module.fail_json(msg="failed to customization virtual machine due to RuntimeFault: %s" % to_native(e.msg))
+ except Exception as e:
+ self.module.fail_json(msg="failed to customization virtual machine due to fault: %s" % to_native(e.msg))
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customize_exist'}
+
+ if self.params['wait_for_customization']:
+ set_vm_power_state(self.content, self.current_vm_obj, 'poweredon', force=False)
+ is_customization_ok = self.wait_for_customization(vm=self.current_vm_obj, timeout=self.params['wait_for_customization_timeout'])
+ if not is_customization_ok:
+ return {'changed': self.change_applied, 'failed': True,
+ 'msg': 'Wait for customization failed due to timeout', 'op': 'wait_for_customize_exist'}
+
+ return {'changed': self.change_applied, 'failed': False}
+
+ def wait_for_task(self, task, poll_interval=1):
+ """
+ Wait for a VMware task to complete. Terminal states are 'error' and 'success'.
+
+ Inputs:
+ - task: the task to wait for
+ - poll_interval: polling interval to check the task, in seconds
+
+ Modifies:
+ - self.change_applied
+ """
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
+ # https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
+ while task.info.state not in ['error', 'success']:
+ time.sleep(poll_interval)
+ self.change_applied = self.change_applied or task.info.state == 'success'
+
+ def get_vm_events(self, vm, eventTypeIdList):
+ byEntity = vim.event.EventFilterSpec.ByEntity(entity=vm, recursion="self")
+ filterSpec = vim.event.EventFilterSpec(entity=byEntity, eventTypeId=eventTypeIdList)
+ eventManager = self.content.eventManager
+ return eventManager.QueryEvent(filterSpec)
+
+ def wait_for_customization(self, vm, timeout=3600, sleep=10):
+ poll = int(timeout // sleep)
+ thispoll = 0
+ while thispoll <= poll:
+ eventStarted = self.get_vm_events(vm, ['CustomizationStartedEvent'])
+ if len(eventStarted):
+ thispoll = 0
+ while thispoll <= poll:
+ eventsFinishedResult = self.get_vm_events(vm, ['CustomizationSucceeded', 'CustomizationFailed'])
+ if len(eventsFinishedResult):
+ if not isinstance(eventsFinishedResult[0], vim.event.CustomizationSucceeded):
+ self.module.warn("Customization failed with error {%s}:{%s}"
+ % (eventsFinishedResult[0]._wsdlName, eventsFinishedResult[0].fullFormattedMessage))
+ return False
+ else:
+ return True
+ else:
+ time.sleep(sleep)
+ thispoll += 1
+ if len(eventsFinishedResult) == 0:
+ self.module.warn('Waiting for customization result event timed out.')
+ return False
+ else:
+ time.sleep(sleep)
+ thispoll += 1
+ if len(eventStarted):
+ self.module.warn('Waiting for customization result event timed out.')
+ else:
+ self.module.warn('Waiting for customization start event timed out.')
+ return False
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['absent', 'poweredoff', 'poweredon', 'present', 'rebootguest', 'restarted', 'shutdownguest', 'suspended']),
+ template=dict(type='str', aliases=['template_src']),
+ is_template=dict(type='bool', default=False),
+ annotation=dict(type='str', aliases=['notes']),
+ customvalues=dict(type='list', default=[]),
+ name=dict(type='str'),
+ name_match=dict(type='str', choices=['first', 'last'], default='first'),
+ uuid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ guest_id=dict(type='str'),
+ disk=dict(type='list', default=[]),
+ cdrom=dict(type=list_or_dict, default=[]),
+ hardware=dict(type='dict', default={}),
+ force=dict(type='bool', default=False),
+ datacenter=dict(type='str', default='ha-datacenter'),
+ esxi_hostname=dict(type='str'),
+ cluster=dict(type='str'),
+ wait_for_ip_address=dict(type='bool', default=False),
+ wait_for_ip_address_timeout=dict(type='int', default=300),
+ state_change_timeout=dict(type='int', default=0),
+ snapshot_src=dict(type='str'),
+ linked_clone=dict(type='bool', default=False),
+ networks=dict(type='list', default=[]),
+ resource_pool=dict(type='str'),
+ customization=dict(type='dict', default={}, no_log=True),
+ customization_spec=dict(type='str', default=None),
+ wait_for_customization=dict(type='bool', default=False),
+ wait_for_customization_timeout=dict(type='int', default=3600),
+ vapp_properties=dict(type='list', default=[]),
+ datastore=dict(type='str'),
+ convert=dict(type='str', choices=['thin', 'thick', 'eagerzeroedthick']),
+ delete_from_inventory=dict(type='bool', default=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['cluster', 'esxi_hostname'],
+ ],
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ pyv = PyVmomiHelper(module)
+
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ # VM already exists
+ if vm:
+ if module.params['state'] == 'absent':
+ # destroy it
+ if module.check_mode:
+ result.update(
+ vm_name=vm.name,
+ changed=True,
+ current_powerstate=vm.summary.runtime.powerState.lower(),
+ desired_operation='remove_vm',
+ )
+ module.exit_json(**result)
+ if module.params['force']:
+ # has to be poweredoff first
+ set_vm_power_state(pyv.content, vm, 'poweredoff', module.params['force'])
+ result = pyv.remove_vm(vm, module.params['delete_from_inventory'])
+ elif module.params['state'] == 'present':
+ if module.check_mode:
+ result.update(
+ vm_name=vm.name,
+ changed=True,
+ desired_operation='reconfigure_vm',
+ )
+ module.exit_json(**result)
+ result = pyv.reconfigure_vm()
+ elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted', 'suspended', 'shutdownguest', 'rebootguest']:
+ if module.check_mode:
+ result.update(
+ vm_name=vm.name,
+ changed=True,
+ current_powerstate=vm.summary.runtime.powerState.lower(),
+ desired_operation='set_vm_power_state',
+ )
+ module.exit_json(**result)
+ # set powerstate
+ tmp_result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'])
+ if tmp_result['changed']:
+ result["changed"] = True
+ if module.params['state'] in ['poweredon', 'restarted', 'rebootguest'] and module.params['wait_for_ip_address']:
+ wait_result = wait_for_vm_ip(pyv.content, vm, module.params['wait_for_ip_address_timeout'])
+ if not wait_result:
+ module.fail_json(msg='Waiting for IP address timed out')
+ tmp_result['instance'] = wait_result
+ if not tmp_result["failed"]:
+ result["failed"] = False
+ result['instance'] = tmp_result['instance']
+ if tmp_result["failed"]:
+ result["failed"] = True
+ result["msg"] = tmp_result["msg"]
+ else:
+ # This should not happen
+ raise AssertionError()
+ # VM doesn't exist
+ else:
+ if module.params['state'] in ['poweredon', 'poweredoff', 'present', 'restarted', 'suspended']:
+ if module.check_mode:
+ result.update(
+ changed=True,
+ desired_operation='deploy_vm',
+ )
+ module.exit_json(**result)
+ result = pyv.deploy_vm()
+ if result['failed']:
+ module.fail_json(msg='Failed to create a virtual machine : %s' % result['msg'])
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/vmware_guest_custom_attributes.py b/test/support/integration/plugins/modules/vmware_guest_custom_attributes.py
new file mode 100644
index 0000000000..e55a3ad754
--- /dev/null
+++ b/test/support/integration/plugins/modules/vmware_guest_custom_attributes.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright, (c) 2018, Ansible Project
+# Copyright, (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+
+DOCUMENTATION = '''
+---
+module: vmware_guest_custom_attributes
+short_description: Manage custom attributes from VMware for the given virtual machine
+description:
+ - This module can be used to add, remove and update custom attributes for the given virtual machine.
+version_added: 2.7
+author:
+ - Jimmy Conner (@cigamit)
+ - Abhijeet Kasurde (@Akasurde)
+notes:
+ - Tested on vSphere 6.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ name:
+ description:
+ - Name of the virtual machine to work with.
+ - This is required parameter, if C(uuid) or C(moid) is not supplied.
+ type: str
+ state:
+ description:
+ - The action to take.
+ - If set to C(present), then custom attribute is added or updated.
+ - If set to C(absent), then custom attribute is removed.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+ uuid:
+ description:
+ - UUID of the virtual machine to manage if known. This is VMware's unique identifier.
+ - This is required parameter, if C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ version_added: '2.9'
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: no
+ type: bool
+ version_added: '2.8'
+ folder:
+ description:
+ - Absolute path to find an existing guest.
+ - This is required parameter, if C(name) is supplied and multiple virtual machines with same name are found.
+ type: str
+ datacenter:
+ description:
+ - Datacenter name where the virtual machine is located in.
+ required: True
+ type: str
+ attributes:
+ description:
+ - A list of name and value of custom attributes that needs to be manage.
+ - Value of custom attribute is not required and will be ignored, if C(state) is set to C(absent).
+ default: []
+ type: list
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+- name: Add virtual machine custom attributes
+ vmware_guest_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: present
+ attributes:
+ - name: MyAttribute
+ value: MyValue
+ delegate_to: localhost
+ register: attributes
+
+- name: Add multiple virtual machine custom attributes
+ vmware_guest_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: present
+ attributes:
+ - name: MyAttribute
+ value: MyValue
+ - name: MyAttribute2
+ value: MyValue2
+ delegate_to: localhost
+ register: attributes
+
+- name: Remove virtual machine Attribute
+ vmware_guest_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: absent
+ attributes:
+ - name: MyAttribute
+ delegate_to: localhost
+ register: attributes
+
+- name: Remove virtual machine Attribute using Virtual Machine MoID
+ vmware_guest_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ moid: vm-42
+ state: absent
+ attributes:
+ - name: MyAttribute
+ delegate_to: localhost
+ register: attributes
+'''
+
+RETURN = """
+custom_attributes:
+ description: metadata about the virtual machine attributes
+ returned: always
+ type: dict
+ sample: {
+ "mycustom": "my_custom_value",
+ "mycustom_2": "my_custom_value_2",
+ "sample_1": "sample_1_value",
+ "sample_2": "sample_2_value",
+ "sample_3": "sample_3_value"
+ }
+"""
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VmAttributeManager(PyVmomi):
+ def __init__(self, module):
+ super(VmAttributeManager, self).__init__(module)
+
+ def set_custom_field(self, vm, user_fields):
+ result_fields = dict()
+ change_list = list()
+ changed = False
+
+ for field in user_fields:
+ field_key = self.check_exists(field['name'])
+ found = False
+ field_value = field.get('value', '')
+
+ for k, v in [(x.name, v.value) for x in self.custom_field_mgr for v in vm.customValue if x.key == v.key]:
+ if k == field['name']:
+ found = True
+ if v != field_value:
+ if not self.module.check_mode:
+ self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value)
+ result_fields[k] = field_value
+ change_list.append(True)
+ if not found and field_value != "":
+ if not field_key and not self.module.check_mode:
+ field_key = self.content.customFieldsManager.AddFieldDefinition(name=field['name'], moType=vim.VirtualMachine)
+ change_list.append(True)
+ if not self.module.check_mode:
+ self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value)
+ result_fields[field['name']] = field_value
+
+ if any(change_list):
+ changed = True
+
+ return {'changed': changed, 'failed': False, 'custom_attributes': result_fields}
+
+ def check_exists(self, field):
+ for x in self.custom_field_mgr:
+ if x.name == field:
+ return x
+ return False
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter=dict(type='str'),
+ name=dict(type='str'),
+ folder=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ state=dict(type='str', default='present',
+ choices=['absent', 'present']),
+ attributes=dict(
+ type='list',
+ default=[],
+ options=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str'),
+ )
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ],
+ )
+
+ if module.params.get('folder'):
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ pyv = VmAttributeManager(module)
+ results = {'changed': False, 'failed': False, 'instance': dict()}
+
+ # Check if the virtual machine exists before continuing
+ vm = pyv.get_vm()
+
+ if vm:
+ # virtual machine already exists
+ if module.params['state'] == "present":
+ results = pyv.set_custom_field(vm, module.params['attributes'])
+ elif module.params['state'] == "absent":
+ results = pyv.set_custom_field(vm, module.params['attributes'])
+ module.exit_json(**results)
+ else:
+ # virtual machine does not exists
+ vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
+ module.fail_json(msg="Unable to manage custom attributes for non-existing"
+ " virtual machine %s" % vm_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/vmware_host_hyperthreading.py b/test/support/integration/plugins/modules/vmware_host_hyperthreading.py
new file mode 100644
index 0000000000..ad579e1e5e
--- /dev/null
+++ b/test/support/integration/plugins/modules/vmware_host_hyperthreading.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_hyperthreading
+short_description: Enables/Disables Hyperthreading optimization for an ESXi host system
+description:
+- This module can be used to enable or disable Hyperthreading optimization for ESXi host systems in given vCenter infrastructure.
+- It also checks if Hyperthreading is activated/deactivated and if the host needs to be restarted.
+- The module informs the user if Hyperthreading is enabled but inactive because the processor is vulnerable to L1 Terminal Fault (L1TF).
+version_added: 2.8
+author:
+- Christian Kotte (@ckotte)
+notes:
+- Tested on vSphere 6.5
+requirements:
+- python >= 2.6
+- PyVmomi
+options:
+ state:
+ description:
+ - Enable or disable Hyperthreading.
+ - You need to reboot the ESXi host if you change the configuration.
+ - Make sure that Hyperthreading is enabled in the BIOS. Otherwise, it will be enabled, but never activated.
+ type: str
+ choices: [ enabled, disabled ]
+ default: 'enabled'
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - This parameter is required if C(cluster_name) is not specified.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - This parameter is required if C(esxi_hostname) is not specified.
+ type: str
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Enable Hyperthreading for an host system
+ vmware_host_hyperthreading:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: enabled
+ validate_certs: no
+ delegate_to: localhost
+
+- name: Disable Hyperthreading for an host system
+ vmware_host_hyperthreading:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: disabled
+ validate_certs: no
+ delegate_to: localhost
+
+- name: Disable Hyperthreading for all host systems from cluster
+ vmware_host_hyperthreading:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ state: disabled
+ validate_certs: no
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+results:
+ description: metadata about host system's Hyperthreading configuration
+ returned: always
+ type: dict
+ sample: {
+ "esxi01": {
+ "msg": "Hyperthreading is already enabled and active for host 'esxi01'",
+ "state_current": "active",
+ "state": "enabled",
+ },
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VmwareHostHyperthreading(PyVmomi):
+ """Manage Hyperthreading for an ESXi host system"""
+ def __init__(self, module):
+ super(VmwareHostHyperthreading, self).__init__(module)
+ cluster_name = self.params.get('cluster_name')
+ esxi_host_name = self.params.get('esxi_hostname')
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system.")
+
+ def ensure(self):
+ """Manage Hyperthreading for an ESXi host system"""
+ results = dict(changed=False, result=dict())
+ desired_state = self.params.get('state')
+ host_change_list = []
+ for host in self.hosts:
+ changed = False
+ results['result'][host.name] = dict(msg='')
+
+ hyperthreading_info = host.config.hyperThread
+
+ results['result'][host.name]['state'] = desired_state
+ if desired_state == 'enabled':
+ # Don't do anything if Hyperthreading is already enabled
+ if hyperthreading_info.config:
+ if hyperthreading_info.active:
+ results['result'][host.name]['changed'] = False
+ results['result'][host.name]['state_current'] = "active"
+ results['result'][host.name]['msg'] = "Hyperthreading is enabled and active"
+ if not hyperthreading_info.active:
+ # L1 Terminal Fault (L1TF)/Foreshadow mitigation workaround (https://kb.vmware.com/s/article/55806)
+ option_manager = host.configManager.advancedOption
+ try:
+ mitigation = option_manager.QueryOptions('VMkernel.Boot.hyperthreadingMitigation')
+ except vim.fault.InvalidName:
+ mitigation = None
+ if mitigation and mitigation[0].value:
+ results['result'][host.name]['changed'] = False
+ results['result'][host.name]['state_current'] = "enabled"
+ results['result'][host.name]['msg'] = ("Hyperthreading is enabled, but not active because the"
+ " processor is vulnerable to L1 Terminal Fault (L1TF).")
+ else:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_current'] = "enabled"
+ results['result'][host.name]['msg'] = ("Hyperthreading is enabled, but not active."
+ " A reboot is required!")
+ # Enable Hyperthreading
+ else:
+ # Check if Hyperthreading is available
+ if hyperthreading_info.available:
+ if not self.module.check_mode:
+ try:
+ host.configManager.cpuScheduler.EnableHyperThreading()
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_previous'] = "disabled"
+ results['result'][host.name]['state_current'] = "enabled"
+ results['result'][host.name]['msg'] = (
+ "Hyperthreading enabled for host. Reboot the host to activate it."
+ )
+ except vmodl.fault.NotSupported as not_supported:
+ # This should never happen since Hyperthreading is available
+ self.module.fail_json(
+ msg="Failed to enable Hyperthreading for host '%s' : %s" %
+ (host.name, to_native(not_supported.msg))
+ )
+ except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
+ self.module.fail_json(
+ msg="Failed to enable Hyperthreading for host '%s' due to : %s" %
+ (host.name, to_native(runtime_fault.msg))
+ )
+ else:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_previous'] = "disabled"
+ results['result'][host.name]['state_current'] = "enabled"
+ results['result'][host.name]['msg'] = "Hyperthreading will be enabled"
+ else:
+ self.module.fail_json(msg="Hyperthreading optimization is not available for host '%s'" % host.name)
+ elif desired_state == 'disabled':
+ # Don't do anything if Hyperthreading is already disabled
+ if not hyperthreading_info.config:
+ if not hyperthreading_info.active:
+ results['result'][host.name]['changed'] = False
+ results['result'][host.name]['state_current'] = "inactive"
+ results['result'][host.name]['msg'] = "Hyperthreading is disabled and inactive"
+ if hyperthreading_info.active:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_current'] = "disabled"
+ results['result'][host.name]['msg'] = ("Hyperthreading is already disabled"
+ " but still active. A reboot is required!")
+ # Disable Hyperthreading
+ else:
+ # Check if Hyperthreading is available
+ if hyperthreading_info.available:
+ if not self.module.check_mode:
+ try:
+ host.configManager.cpuScheduler.DisableHyperThreading()
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_previous'] = "enabled"
+ results['result'][host.name]['state_current'] = "disabled"
+ results['result'][host.name]['msg'] = (
+ "Hyperthreading disabled. Reboot the host to deactivate it."
+ )
+ except vmodl.fault.NotSupported as not_supported:
+ # This should never happen since Hyperthreading is available
+ self.module.fail_json(
+ msg="Failed to disable Hyperthreading for host '%s' : %s" %
+ (host.name, to_native(not_supported.msg))
+ )
+ except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
+ self.module.fail_json(
+ msg="Failed to disable Hyperthreading for host '%s' due to : %s" %
+ (host.name, to_native(runtime_fault.msg))
+ )
+ else:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_previous'] = "enabled"
+ results['result'][host.name]['state_current'] = "disabled"
+ results['result'][host.name]['msg'] = "Hyperthreading will be disabled"
+ else:
+ self.module.fail_json(msg="Hyperthreading optimization is not available for host '%s'" % host.name)
+
+ host_change_list.append(changed)
+
+ if any(host_change_list):
+ results['changed'] = True
+ self.module.exit_json(**results)
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ state=dict(default='enabled', choices=['enabled', 'disabled']),
+ esxi_hostname=dict(type='str', required=False),
+ cluster_name=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ hyperthreading = VmwareHostHyperthreading(module)
+ hyperthreading.ensure()
+
+
+if __name__ == '__main__':
+ main()