summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatt Clay <mclay@redhat.com>2020-02-25 23:18:50 -0800
committerGitHub <noreply@github.com>2020-02-25 23:18:50 -0800
commite3591223a042caa537fcc88cf7553227b8657f70 (patch)
tree137991b3e16fb757859fcbbeafd93d7d46ef2a7b
parenta19ae28326ce861c0eeae803e939050b5bcd64bd (diff)
downloadansible-e3591223a042caa537fcc88cf7553227b8657f70.tar.gz
Second batch of incidental integration tests. (#67765)
* Update incidental test aliases. * Rewrite target references for renamed targets. * Add incidental tests to CI. * Update sanity tests for incidental cloud tests. * Initial copy of incidental tests. * Copy contrib files into test. * Update paths in test. * Add support plugins. * Update plugin to work around missing deps. * Update sanity ignores. * Fix matrix entries. * Remove debug echo.
-rw-r--r--shippable.yml16
-rw-r--r--test/integration/targets/incidental_aws_codebuild/aliases2
-rw-r--r--test/integration/targets/incidental_aws_codebuild/defaults/main.yml10
-rw-r--r--test/integration/targets/incidental_aws_codebuild/files/codebuild_iam_trust_policy.json12
-rw-r--r--test/integration/targets/incidental_aws_codebuild/tasks/main.yml119
-rw-r--r--test/integration/targets/incidental_aws_codebuild/vars/main.yml0
-rw-r--r--test/integration/targets/incidental_aws_step_functions_state_machine/aliases2
-rw-r--r--test/integration/targets/incidental_aws_step_functions_state_machine/defaults/main.yml4
-rw-r--r--test/integration/targets/incidental_aws_step_functions_state_machine/files/alternative_state_machine.json15
-rw-r--r--test/integration/targets/incidental_aws_step_functions_state_machine/files/state_machine.json10
-rw-r--r--test/integration/targets/incidental_aws_step_functions_state_machine/files/state_machines_iam_trust_policy.json12
-rw-r--r--test/integration/targets/incidental_aws_step_functions_state_machine/tasks/main.yml300
-rw-r--r--test/integration/targets/incidental_azure_rm_functionapp/aliases3
-rw-r--r--test/integration/targets/incidental_azure_rm_functionapp/tasks/main.yml131
-rw-r--r--test/integration/targets/incidental_azure_rm_mariadbserver/aliases3
-rw-r--r--test/integration/targets/incidental_azure_rm_mariadbserver/tasks/main.yml640
-rw-r--r--test/integration/targets/incidental_azure_rm_resource/aliases3
-rw-r--r--test/integration/targets/incidental_azure_rm_resource/tasks/main.yml158
-rw-r--r--test/integration/targets/incidental_azure_rm_webapp/aliases3
-rw-r--r--test/integration/targets/incidental_azure_rm_webapp/tasks/main.yml434
-rw-r--r--test/integration/targets/incidental_cloudformation/aliases2
-rw-r--r--test/integration/targets/incidental_cloudformation/defaults/main.yml8
-rw-r--r--test/integration/targets/incidental_cloudformation/files/cf_template.json37
-rw-r--r--test/integration/targets/incidental_cloudformation/tasks/main.yml463
-rw-r--r--test/integration/targets/incidental_cs_common/aliases1
-rw-r--r--test/integration/targets/incidental_cs_common/defaults/main.yml6
-rw-r--r--test/integration/targets/incidental_cs_role_permission/aliases2
-rw-r--r--test/integration/targets/incidental_cs_role_permission/meta/main.yml3
-rw-r--r--test/integration/targets/incidental_cs_role_permission/tasks/main.yml303
-rw-r--r--test/integration/targets/incidental_cs_service_offering/aliases2
-rw-r--r--test/integration/targets/incidental_cs_service_offering/meta/main.yml3
-rw-r--r--test/integration/targets/incidental_cs_service_offering/tasks/guest_vm_service_offering.yml223
-rw-r--r--test/integration/targets/incidental_cs_service_offering/tasks/main.yml3
-rw-r--r--test/integration/targets/incidental_cs_service_offering/tasks/system_vm_service_offering.yml151
-rw-r--r--test/integration/targets/incidental_ec2_instance/aliases2
-rw-r--r--test/integration/targets/incidental_ec2_instance/inventory17
-rw-r--r--test/integration/targets/incidental_ec2_instance/main.yml43
-rw-r--r--test/integration/targets/incidental_ec2_instance/meta/main.yml3
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/defaults/main.yml14
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/files/assume-role-policy.json13
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/block_devices.yml82
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/checkmode_tests.yml172
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/cpu_options.yml86
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/default_vpc_tests.yml57
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/ebs_optimized.yml41
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/env_cleanup.yml93
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/env_setup.yml79
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/external_resource_attach.yml129
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/find_ami.yml15
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/iam_instance_role.yml127
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/instance_no_wait.yml68
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/main.yml48
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/tags_and_vpc_settings.yml158
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/termination_protection.yml101
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/version_fail.yml29
-rw-r--r--test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/version_fail_wrapper.yml30
-rwxr-xr-xtest/integration/targets/incidental_ec2_instance/runme.sh12
-rw-r--r--test/integration/targets/incidental_hcloud_server/aliases2
-rw-r--r--test/integration/targets/incidental_hcloud_server/defaults/main.yml5
-rw-r--r--test/integration/targets/incidental_hcloud_server/tasks/main.yml565
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/aliases2
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml11
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml9
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml64
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml62
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml39
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml9
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml18
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml91
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml79
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml74
-rwxr-xr-xtest/integration/targets/incidental_inventory_aws_ec2/runme.sh35
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml12
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml12
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml20
-rw-r--r--test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml0
-rw-r--r--test/integration/targets/incidental_inventory_foreman/aliases3
-rw-r--r--test/integration/targets/incidental_inventory_foreman/ansible.cfg5
-rw-r--r--test/integration/targets/incidental_inventory_foreman/inspect_cache.yml31
-rwxr-xr-xtest/integration/targets/incidental_inventory_foreman/runme.sh50
-rw-r--r--test/integration/targets/incidental_inventory_foreman/test_foreman_inventory.yml59
-rw-r--r--test/integration/targets/incidental_inventory_vmware_vm_inventory/aliases3
-rw-r--r--test/integration/targets/incidental_inventory_vmware_vm_inventory/ansible.cfg8
-rwxr-xr-xtest/integration/targets/incidental_inventory_vmware_vm_inventory/runme.sh83
-rw-r--r--test/integration/targets/incidental_inventory_vmware_vm_inventory/test_vmware_vm_inventory.yml24
-rw-r--r--test/integration/targets/incidental_k8s/README.md23
-rw-r--r--test/integration/targets/incidental_k8s/aliases2
-rw-r--r--test/integration/targets/incidental_k8s/defaults/main.yml32
-rw-r--r--test/integration/targets/incidental_k8s/files/crd-resource.yml20
-rw-r--r--test/integration/targets/incidental_k8s/files/kuard-extra-property.yml21
-rw-r--r--test/integration/targets/incidental_k8s/files/kuard-invalid-type.yml20
-rw-r--r--test/integration/targets/incidental_k8s/files/setup-crd.yml14
-rw-r--r--test/integration/targets/incidental_k8s/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_k8s/tasks/append_hash.yml68
-rw-r--r--test/integration/targets/incidental_k8s/tasks/apply.yml277
-rw-r--r--test/integration/targets/incidental_k8s/tasks/crd.yml71
-rw-r--r--test/integration/targets/incidental_k8s/tasks/delete.yml101
-rw-r--r--test/integration/targets/incidental_k8s/tasks/full_test.yml375
-rw-r--r--test/integration/targets/incidental_k8s/tasks/lists.yml140
-rw-r--r--test/integration/targets/incidental_k8s/tasks/main.yml92
-rw-r--r--test/integration/targets/incidental_k8s/tasks/older_openshift_fail.yml69
-rw-r--r--test/integration/targets/incidental_k8s/tasks/openshift.yml61
-rw-r--r--test/integration/targets/incidental_k8s/tasks/validate_installed.yml125
-rw-r--r--test/integration/targets/incidental_k8s/tasks/validate_not_installed.yml23
-rw-r--r--test/integration/targets/incidental_k8s/tasks/waiter.yml355
-rw-r--r--test/integration/targets/incidental_nios_prepare_tests/aliases1
-rw-r--r--test/integration/targets/incidental_nios_prepare_tests/tasks/main.yml0
-rw-r--r--test/integration/targets/incidental_nios_txt_record/aliases3
-rw-r--r--test/integration/targets/incidental_nios_txt_record/defaults/main.yaml3
-rw-r--r--test/integration/targets/incidental_nios_txt_record/meta/main.yaml2
-rw-r--r--test/integration/targets/incidental_nios_txt_record/tasks/main.yml1
-rw-r--r--test/integration/targets/incidental_nios_txt_record/tasks/nios_txt_record_idempotence.yml80
-rw-r--r--test/integration/targets/incidental_script_inventory_vmware_inventory/aliases3
-rwxr-xr-xtest/integration/targets/incidental_script_inventory_vmware_inventory/runme.sh58
-rw-r--r--test/integration/targets/incidental_script_inventory_vmware_inventory/test_vmware_inventory.yml18
-rw-r--r--test/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.ini127
-rwxr-xr-xtest/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.py793
-rwxr-xr-xtest/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.sh3
-rw-r--r--test/integration/targets/incidental_setup_ec2/aliases1
-rw-r--r--test/integration/targets/incidental_setup_ec2/defaults/main.yml2
-rw-r--r--test/integration/targets/incidental_setup_ec2/tasks/common.yml119
-rw-r--r--test/integration/targets/incidental_setup_ec2/vars/main.yml3
-rw-r--r--test/integration/targets/incidental_sts_assume_role/aliases2
-rw-r--r--test/integration/targets/incidental_sts_assume_role/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_sts_assume_role/tasks/main.yml384
-rw-r--r--test/integration/targets/incidental_sts_assume_role/templates/policy.json.j212
-rw-r--r--test/integration/targets/incidental_tower_credential_type/aliases2
-rw-r--r--test/integration/targets/incidental_tower_credential_type/tasks/main.yml23
-rw-r--r--test/integration/targets/incidental_tower_receive/aliases2
-rw-r--r--test/integration/targets/incidental_tower_receive/tasks/main.yml17
-rw-r--r--test/integration/targets/incidental_vmware_guest/aliases3
-rw-r--r--test/integration/targets/incidental_vmware_guest/defaults/main.yml33
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/boot_firmware_d1_c1_f0.yml117
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/cdrom_d1_c1_f0.yml269
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/check_mode.yml60
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/clone_customize_guest_test.yml47
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/clone_d1_c1_f0.yml101
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/clone_resize_disks.yml77
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/clone_with_convert.yml66
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/create_d1_c1_f0.yml164
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/create_guest_invalid_d1_c1_f0.yml32
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/create_nw_d1_c1_f0.yml38
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/create_rp_d1_c1_f0.yml205
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/delete_vm.yml22
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/disk_mode_d1_c1_f0.yml89
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/disk_size_d1_c1_f0.yml31
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/disk_type_d1_c1_f0.yml33
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/linked_clone_d1_c1_f0.yml100
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/mac_address_d1_c1_f0.yml37
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/main.yml19
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/max_connections.yml45
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/mem_reservation.yml125
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/network_negative_test.yml339
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/network_with_device.yml60
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/network_with_dvpg.yml152
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/network_with_portgroup.yml47
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/non_existent_vm_ops.yml23
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/poweroff_d1_c1_f0.yml27
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/poweroff_d1_c1_f1.yml22
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/reconfig_vm_to_latest_version.yml73
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/remove_vm_from_inventory.yml61
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/run_test_playbook.yml17
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/template_d1_c1_f0.yml105
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/vapp_d1_c1_f0.yml100
-rw-r--r--test/integration/targets/incidental_vmware_guest/tasks/windows_vbs_d1_c1_f0.yml93
-rw-r--r--test/integration/targets/incidental_vmware_guest_custom_attributes/aliases3
-rw-r--r--test/integration/targets/incidental_vmware_guest_custom_attributes/tasks/main.yml110
-rw-r--r--test/integration/targets/incidental_vmware_host_hyperthreading/aliases3
-rw-r--r--test/integration/targets/incidental_vmware_host_hyperthreading/tasks/main.yml92
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/aliases1
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/meta/main.yml2
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/init_real_lab.yml29
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/init_vcsim.yml44
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/main.yml25
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_attach_hosts.yml30
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_category.yml5
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_cluster.yml10
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_content_library.yml7
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_datacenter.yml11
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_datastore.yml42
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_dvs_portgroup.yml18
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_dvswitch.yml20
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_resource_pool.yml15
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_switch.yml7
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_tag.yml15
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_virtualmachines.yml46
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/teardown.yml24
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/tasks/teardown_with_esxi.yml96
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/vars/common.yml12
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/vars/vcenter_1esxi.yml33
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/vars/vcenter_2esxi.yml34
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/vars/vcenter_only.yml6
-rw-r--r--test/integration/targets/incidental_vmware_prepare_tests/vars/vcsim.yml19
-rw-r--r--test/lib/ansible_test/_internal/sanity/integration_aliases.py11
-rw-r--r--test/sanity/ignore.txt21
-rw-r--r--test/support/integration/plugins/cache/jsonfile.py63
-rw-r--r--test/support/integration/plugins/filter/json_query.py53
-rw-r--r--test/support/integration/plugins/inventory/aws_ec2.py760
-rw-r--r--test/support/integration/plugins/inventory/foreman.py295
-rw-r--r--test/support/integration/plugins/inventory/vmware_vm_inventory.py477
-rw-r--r--test/support/integration/plugins/module_utils/ansible_tower.py113
-rw-r--r--test/support/integration/plugins/module_utils/aws/core.py335
-rw-r--r--test/support/integration/plugins/module_utils/aws/iam.py49
-rw-r--r--test/support/integration/plugins/module_utils/aws/s3.py50
-rw-r--r--test/support/integration/plugins/module_utils/aws/waiters.py405
-rw-r--r--test/support/integration/plugins/module_utils/azure_rm_common.py1473
-rw-r--r--test/support/integration/plugins/module_utils/azure_rm_common_rest.py97
-rw-r--r--test/support/integration/plugins/module_utils/cloud.py217
-rw-r--r--test/support/integration/plugins/module_utils/cloudstack.py664
-rw-r--r--test/support/integration/plugins/module_utils/common/network.py158
-rw-r--r--test/support/integration/plugins/module_utils/compat/ipaddress.py2476
-rw-r--r--test/support/integration/plugins/module_utils/ec2.py758
-rw-r--r--test/support/integration/plugins/module_utils/hcloud.py63
-rw-r--r--test/support/integration/plugins/module_utils/k8s/common.py290
-rw-r--r--test/support/integration/plugins/module_utils/k8s/raw.py519
-rw-r--r--test/support/integration/plugins/module_utils/net_tools/nios/api.py601
-rw-r--r--test/support/integration/plugins/module_utils/network/common/utils.py643
-rw-r--r--test/support/integration/plugins/module_utils/vmware.py1630
l---------test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_resource_facts.py1
l---------test/support/integration/plugins/modules/_azure_rm_webapp_facts.py1
-rw-r--r--test/support/integration/plugins/modules/aws_az_info.py110
-rw-r--r--test/support/integration/plugins/modules/aws_codebuild.py408
-rw-r--r--test/support/integration/plugins/modules/aws_s3.py925
-rw-r--r--test/support/integration/plugins/modules/aws_step_functions_state_machine.py232
-rw-r--r--test/support/integration/plugins/modules/aws_step_functions_state_machine_execution.py197
-rw-r--r--test/support/integration/plugins/modules/azure_rm_appserviceplan.py379
-rw-r--r--test/support/integration/plugins/modules/azure_rm_functionapp.py421
-rw-r--r--test/support/integration/plugins/modules/azure_rm_functionapp_info.py206
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py241
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py216
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py304
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py211
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py277
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py207
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbserver.py388
-rw-r--r--test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py264
-rw-r--r--test/support/integration/plugins/modules/azure_rm_resource.py427
-rw-r--r--test/support/integration/plugins/modules/azure_rm_resource_info.py431
-rw-r--r--test/support/integration/plugins/modules/azure_rm_storageaccount.py684
-rw-r--r--test/support/integration/plugins/modules/azure_rm_webapp.py1070
-rw-r--r--test/support/integration/plugins/modules/azure_rm_webapp_info.py488
-rw-r--r--test/support/integration/plugins/modules/azure_rm_webappslot.py1058
-rw-r--r--test/support/integration/plugins/modules/cloudformation.py837
-rw-r--r--test/support/integration/plugins/modules/cloudformation_info.py354
-rw-r--r--test/support/integration/plugins/modules/cs_role.py211
-rw-r--r--test/support/integration/plugins/modules/cs_role_permission.py351
-rw-r--r--test/support/integration/plugins/modules/cs_service_offering.py583
-rw-r--r--test/support/integration/plugins/modules/ec2.py1766
-rw-r--r--test/support/integration/plugins/modules/ec2_ami_info.py281
-rw-r--r--test/support/integration/plugins/modules/ec2_eni.py633
-rw-r--r--test/support/integration/plugins/modules/ec2_eni_info.py275
-rw-r--r--test/support/integration/plugins/modules/ec2_group.py1345
-rw-r--r--test/support/integration/plugins/modules/ec2_instance.py1805
-rw-r--r--test/support/integration/plugins/modules/ec2_instance_info.py571
-rw-r--r--test/support/integration/plugins/modules/ec2_key.py271
-rw-r--r--test/support/integration/plugins/modules/ec2_vpc_igw.py283
-rw-r--r--test/support/integration/plugins/modules/ec2_vpc_net.py524
-rw-r--r--test/support/integration/plugins/modules/ec2_vpc_route_table.py750
-rw-r--r--test/support/integration/plugins/modules/ec2_vpc_subnet.py604
-rw-r--r--test/support/integration/plugins/modules/hcloud_server.py555
-rw-r--r--test/support/integration/plugins/modules/iam_role.py673
-rw-r--r--test/support/integration/plugins/modules/k8s.py274
-rw-r--r--test/support/integration/plugins/modules/k8s_info.py179
-rw-r--r--test/support/integration/plugins/modules/nios_txt_record.py134
-rw-r--r--test/support/integration/plugins/modules/nios_zone.py228
-rw-r--r--test/support/integration/plugins/modules/python_requirements_info.py175
-rw-r--r--test/support/integration/plugins/modules/s3_bucket.py740
-rw-r--r--test/support/integration/plugins/modules/sts_assume_role.py180
-rw-r--r--test/support/integration/plugins/modules/tower_credential_type.py174
-rw-r--r--test/support/integration/plugins/modules/tower_receive.py172
-rw-r--r--test/support/integration/plugins/modules/vmware_guest.py2914
-rw-r--r--test/support/integration/plugins/modules/vmware_guest_custom_attributes.py259
-rw-r--r--test/support/integration/plugins/modules/vmware_host_hyperthreading.py261
l---------test/utils/shippable/incidental/aws.sh1
l---------test/utils/shippable/incidental/azure.sh1
-rwxr-xr-xtest/utils/shippable/incidental/cloud.sh41
l---------test/utils/shippable/incidental/cs.sh1
l---------test/utils/shippable/incidental/hcloud.sh1
l---------test/utils/shippable/incidental/tower.sh1
l---------test/utils/shippable/incidental/vcenter.sh1
285 files changed, 52801 insertions, 2 deletions
diff --git a/shippable.yml b/shippable.yml
index f33278083c..1ed7999ed9 100644
--- a/shippable.yml
+++ b/shippable.yml
@@ -165,6 +165,9 @@ matrix:
- env: T=aws/2.7/4
- env: T=aws/3.6/4
+ - env: T=i/aws/2.7/1
+ - env: T=i/aws/3.6/1
+
- env: T=azure/2.7/1
- env: T=azure/3.6/1
@@ -198,29 +201,42 @@ matrix:
- env: T=azure/2.7/11
- env: T=azure/3.6/11
+ - env: T=i/azure/2.7/1
+ - env: T=i/azure/3.6/1
+
- env: T=vcenter/2.7/1
- env: T=vcenter/3.6/1
- env: T=vcenter/2.7/2
- env: T=vcenter/3.6/2
+ - env: T=i/vcenter//1
+
- env: T=cs/2.7/1
- env: T=cs/3.6/1
- env: T=cs/2.7/2
- env: T=cs/3.6/2
+ - env: T=i/cs//1
+
- env: T=tower/2.7/1
- env: T=tower/3.6/1
+ - env: T=i/tower//1
+
- env: T=cloud/2.7/1
- env: T=cloud/3.6/1
+ - env: T=i/cloud//1
+
- env: T=hcloud/2.7/1
- env: T=hcloud/3.6/1
- env: T=hcloud/2.7/2
- env: T=hcloud/3.6/2
+
+ - env: T=i/hcloud//1
branches:
except:
- "*-patch-*"
diff --git a/test/integration/targets/incidental_aws_codebuild/aliases b/test/integration/targets/incidental_aws_codebuild/aliases
new file mode 100644
index 0000000000..29f60feb44
--- /dev/null
+++ b/test/integration/targets/incidental_aws_codebuild/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/incidental
diff --git a/test/integration/targets/incidental_aws_codebuild/defaults/main.yml b/test/integration/targets/incidental_aws_codebuild/defaults/main.yml
new file mode 100644
index 0000000000..a36eb3de72
--- /dev/null
+++ b/test/integration/targets/incidental_aws_codebuild/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+# defaults file for aws_codebuild
+
+# IAM role names have to be less than 64 characters
+# The 8 digit identifier at the end of resource_prefix helps determine during
+# which test something was created and allows tests to be run in parallel
+# Shippable resource_prefixes are in the format shippable-123456-123, so in those cases
+# we need both sets of digits to keep the resource name unique
+unique_id: "{{ resource_prefix | regex_search('(\\d+-?)(\\d+)$') }}"
+iam_role_name: "ansible-test-sts-{{ unique_id }}-codebuild-service-role"
diff --git a/test/integration/targets/incidental_aws_codebuild/files/codebuild_iam_trust_policy.json b/test/integration/targets/incidental_aws_codebuild/files/codebuild_iam_trust_policy.json
new file mode 100644
index 0000000000..3af7c64120
--- /dev/null
+++ b/test/integration/targets/incidental_aws_codebuild/files/codebuild_iam_trust_policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "codebuild.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/test/integration/targets/incidental_aws_codebuild/tasks/main.yml b/test/integration/targets/incidental_aws_codebuild/tasks/main.yml
new file mode 100644
index 0000000000..953aaeaad8
--- /dev/null
+++ b/test/integration/targets/incidental_aws_codebuild/tasks/main.yml
@@ -0,0 +1,119 @@
+---
+# tasks file for aws_codebuild
+
+- name: Run aws_codebuild integration tests.
+
+ block:
+
+ # ==================== preparations ========================================
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create IAM role needed for CodeBuild
+ iam_role:
+ name: "{{ iam_role_name }}"
+ description: Role with permissions for CodeBuild actions.
+ assume_role_policy_document: "{{ lookup('file', 'codebuild_iam_trust_policy.json') }}"
+ state: present
+ <<: *aws_connection_info
+ register: codebuild_iam_role
+
+ - name: Set variable with aws account id
+ set_fact:
+ aws_account_id: "{{ codebuild_iam_role.iam_role.arn.split(':')[4] }}"
+
+ # ================== integration test ==========================================
+
+ - name: create CodeBuild project
+ aws_codebuild:
+ name: "{{ resource_prefix }}-test-ansible-codebuild"
+ description: Build project for testing the Ansible aws_codebuild module
+ service_role: "{{ codebuild_iam_role.iam_role.arn }}"
+ timeout_in_minutes: 30
+ source:
+ type: CODEPIPELINE
+ buildspec: ''
+ artifacts:
+ namespace_type: NONE
+ packaging: NONE
+ type: CODEPIPELINE
+ name: test
+ environment:
+ compute_type: BUILD_GENERAL1_SMALL
+ privileged_mode: true
+ image: 'aws/codebuild/docker:17.09.0'
+ type: LINUX_CONTAINER
+ environment_variables:
+ - { name: 'FOO_ENV', value: 'other' }
+ tags:
+ - { key: 'purpose', value: 'ansible-test' }
+ state: present
+ <<: *aws_connection_info
+ register: output
+ retries: 10
+ delay: 5
+ until: output is success
+
+ - assert:
+ that:
+ - "output.project.description == 'Build project for testing the Ansible aws_codebuild module'"
+
+ - name: idempotence check rerunning same Codebuild task
+ aws_codebuild:
+ name: "{{ resource_prefix }}-test-ansible-codebuild"
+ description: Build project for testing the Ansible aws_codebuild module
+ service_role: "{{ codebuild_iam_role.iam_role.arn }}"
+ timeout_in_minutes: 30
+ source:
+ type: CODEPIPELINE
+ buildspec: ''
+ artifacts:
+ namespace_type: NONE
+ packaging: NONE
+ type: CODEPIPELINE
+ name: test
+ encryption_key: 'arn:aws:kms:{{ aws_region }}:{{ aws_account_id }}:alias/aws/s3'
+ environment:
+ compute_type: BUILD_GENERAL1_SMALL
+ privileged_mode: true
+ image: 'aws/codebuild/docker:17.09.0'
+ type: LINUX_CONTAINER
+ environment_variables:
+ - { name: 'FOO_ENV', value: 'other' }
+ tags:
+ - { key: 'purpose', value: 'ansible-test' }
+ state: present
+ <<: *aws_connection_info
+ register: rerun_test_output
+
+ - assert:
+ that:
+ - "rerun_test_output.project.created == output.project.created"
+
+ - name: delete CodeBuild project
+ aws_codebuild:
+ name: "{{ output.project.name }}"
+ source:
+ type: CODEPIPELINE
+ buildspec: ''
+ artifacts: {}
+ state: absent
+ <<: *aws_connection_info
+ async: 300
+
+ # ============================== cleanup ======================================
+
+ always:
+
+ - name: cleanup IAM role created for CodeBuild test
+ iam_role:
+ name: "{{ iam_role_name }}"
+ state: absent
+ <<: *aws_connection_info
diff --git a/test/integration/targets/incidental_aws_codebuild/vars/main.yml b/test/integration/targets/incidental_aws_codebuild/vars/main.yml
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/integration/targets/incidental_aws_codebuild/vars/main.yml
diff --git a/test/integration/targets/incidental_aws_step_functions_state_machine/aliases b/test/integration/targets/incidental_aws_step_functions_state_machine/aliases
new file mode 100644
index 0000000000..29f60feb44
--- /dev/null
+++ b/test/integration/targets/incidental_aws_step_functions_state_machine/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/incidental
diff --git a/test/integration/targets/incidental_aws_step_functions_state_machine/defaults/main.yml b/test/integration/targets/incidental_aws_step_functions_state_machine/defaults/main.yml
new file mode 100644
index 0000000000..273a0c783b
--- /dev/null
+++ b/test/integration/targets/incidental_aws_step_functions_state_machine/defaults/main.yml
@@ -0,0 +1,4 @@
+# the random_num is generated in a set_fact task at the start of the testsuite
+state_machine_name: "{{ resource_prefix }}_step_functions_state_machine_ansible_test_{{ random_num }}"
+step_functions_role_name: "ansible-test-sts-{{ resource_prefix }}-step_functions-role"
+execution_name: "{{ resource_prefix }}_sfn_execution"
diff --git a/test/integration/targets/incidental_aws_step_functions_state_machine/files/alternative_state_machine.json b/test/integration/targets/incidental_aws_step_functions_state_machine/files/alternative_state_machine.json
new file mode 100644
index 0000000000..7b51bebb1a
--- /dev/null
+++ b/test/integration/targets/incidental_aws_step_functions_state_machine/files/alternative_state_machine.json
@@ -0,0 +1,15 @@
+{
+ "StartAt": "HelloWorld",
+ "States": {
+ "HelloWorld": {
+ "Type": "Pass",
+ "Result": "Some other result",
+ "Next": "Wait"
+ },
+ "Wait": {
+ "Type": "Wait",
+ "Seconds": 30,
+ "End": true
+ }
+ }
+} \ No newline at end of file
diff --git a/test/integration/targets/incidental_aws_step_functions_state_machine/files/state_machine.json b/test/integration/targets/incidental_aws_step_functions_state_machine/files/state_machine.json
new file mode 100644
index 0000000000..c07d5cebad
--- /dev/null
+++ b/test/integration/targets/incidental_aws_step_functions_state_machine/files/state_machine.json
@@ -0,0 +1,10 @@
+{
+ "StartAt": "HelloWorld",
+ "States": {
+ "HelloWorld": {
+ "Type": "Pass",
+ "Result": "Hello World!",
+ "End": true
+ }
+ }
+} \ No newline at end of file
diff --git a/test/integration/targets/incidental_aws_step_functions_state_machine/files/state_machines_iam_trust_policy.json b/test/integration/targets/incidental_aws_step_functions_state_machine/files/state_machines_iam_trust_policy.json
new file mode 100644
index 0000000000..48d627220f
--- /dev/null
+++ b/test/integration/targets/incidental_aws_step_functions_state_machine/files/state_machines_iam_trust_policy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "states.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+} \ No newline at end of file
diff --git a/test/integration/targets/incidental_aws_step_functions_state_machine/tasks/main.yml b/test/integration/targets/incidental_aws_step_functions_state_machine/tasks/main.yml
new file mode 100644
index 0000000000..0a28ca3624
--- /dev/null
+++ b/test/integration/targets/incidental_aws_step_functions_state_machine/tasks/main.yml
@@ -0,0 +1,300 @@
+---
+
+- name: Integration test for AWS Step Function state machine module
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+
+ # ==== Setup ==================================================
+
+ - name: Create IAM service role needed for Step Functions
+ iam_role:
+ name: "{{ step_functions_role_name }}"
+ description: Role with permissions for AWS Step Functions actions.
+ assume_role_policy_document: "{{ lookup('file', 'state_machines_iam_trust_policy.json') }}"
+ state: present
+ register: step_functions_role
+
+ - name: Pause a few seconds to ensure IAM role is available to next task
+ pause:
+ seconds: 10
+
+ # ==== Tests ===================================================
+
+ - name: Create a random component for state machine name
+ set_fact:
+ random_num: "{{ 999999999 | random }}"
+
+ - name: Create a new state machine -- check_mode
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: "{{ step_functions_role.iam_role.arn }}"
+ tags:
+ project: helloWorld
+ state: present
+ register: creation_check
+ check_mode: yes
+
+ - assert:
+ that:
+ - creation_check.changed == True
+ - creation_check.output == 'State machine would be created.'
+
+ - name: Create a new state machine
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: "{{ step_functions_role.iam_role.arn }}"
+ tags:
+ project: helloWorld
+ state: present
+ register: creation_output
+
+ - assert:
+ that:
+ - creation_output.changed == True
+
+ - name: Pause a few seconds to ensure state machine role is available
+ pause:
+ seconds: 5
+
+ - name: Idempotent rerun of same state function -- check_mode
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: "{{ step_functions_role.iam_role.arn }}"
+ tags:
+ project: helloWorld
+ state: present
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result.changed == False
+ - result.output == 'State is up-to-date.'
+
+ - name: Idempotent rerun of same state function
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: "{{ step_functions_role.iam_role.arn }}"
+ tags:
+ project: helloWorld
+ state: present
+ register: result
+
+ - assert:
+ that:
+ - result.changed == False
+
+ - name: Update an existing state machine -- check_mode
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ definition: "{{ lookup('file','alternative_state_machine.json') }}"
+ role_arn: "{{ step_functions_role.iam_role.arn }}"
+ tags:
+ differentTag: different_tag
+ state: present
+ register: update_check
+ check_mode: yes
+
+ - assert:
+ that:
+ - update_check.changed == True
+ - "update_check.output == 'State machine would be updated: {{ creation_output.state_machine_arn }}'"
+
+ - name: Update an existing state machine
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ definition: "{{ lookup('file','alternative_state_machine.json') }}"
+ role_arn: "{{ step_functions_role.iam_role.arn }}"
+ tags:
+ differentTag: different_tag
+ state: present
+ register: update_output
+
+ - assert:
+ that:
+ - update_output.changed == True
+ - update_output.state_machine_arn == creation_output.state_machine_arn
+
+ - name: Start execution of state machine -- check_mode
+ aws_step_functions_state_machine_execution:
+ name: "{{ execution_name }}"
+ execution_input: "{}"
+ state_machine_arn: "{{ creation_output.state_machine_arn }}"
+ register: start_execution_output
+ check_mode: yes
+
+ - assert:
+ that:
+ - start_execution_output.changed == True
+ - "start_execution_output.output == 'State machine execution would be started.'"
+
+ - name: Start execution of state machine
+ aws_step_functions_state_machine_execution:
+ name: "{{ execution_name }}"
+ execution_input: "{}"
+ state_machine_arn: "{{ creation_output.state_machine_arn }}"
+ register: start_execution_output
+
+ - assert:
+ that:
+ - start_execution_output.changed
+ - "'execution_arn' in start_execution_output"
+ - "'start_date' in start_execution_output"
+
+ - name: Start execution of state machine (check for idempotency) (check mode)
+ aws_step_functions_state_machine_execution:
+ name: "{{ execution_name }}"
+ execution_input: "{}"
+ state_machine_arn: "{{ creation_output.state_machine_arn }}"
+ register: start_execution_output_idem_check
+ check_mode: yes
+
+ - assert:
+ that:
+ - not start_execution_output_idem_check.changed
+ - "start_execution_output_idem_check.output == 'State machine execution already exists.'"
+
+ - name: Start execution of state machine (check for idempotency)
+ aws_step_functions_state_machine_execution:
+ name: "{{ execution_name }}"
+ execution_input: "{}"
+ state_machine_arn: "{{ creation_output.state_machine_arn }}"
+ register: start_execution_output_idem
+
+ - assert:
+ that:
+ - not start_execution_output_idem.changed
+
+ - name: Stop execution of state machine -- check_mode
+ aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "{{ start_execution_output.execution_arn }}"
+ cause: "cause of the failure"
+ error: "error code of the failure"
+ register: stop_execution_output
+ check_mode: yes
+
+ - assert:
+ that:
+ - stop_execution_output.changed
+ - "stop_execution_output.output == 'State machine execution would be stopped.'"
+
+ - name: Stop execution of state machine
+ aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "{{ start_execution_output.execution_arn }}"
+ cause: "cause of the failure"
+ error: "error code of the failure"
+ register: stop_execution_output
+
+ - assert:
+ that:
+ - stop_execution_output.changed
+ - "'stop_date' in stop_execution_output"
+
+ - name: Stop execution of state machine (check for idempotency)
+ aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "{{ start_execution_output.execution_arn }}"
+ cause: "cause of the failure"
+ error: "error code of the failure"
+ register: stop_execution_output
+
+ - assert:
+ that:
+ - not stop_execution_output.changed
+
+ - name: Try stopping a non-running execution -- check_mode
+ aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "{{ start_execution_output.execution_arn }}"
+ cause: "cause of the failure"
+ error: "error code of the failure"
+ register: stop_execution_output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not stop_execution_output.changed
+ - "stop_execution_output.output == 'State machine execution is not running.'"
+
+ - name: Try stopping a non-running execution
+ aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "{{ start_execution_output.execution_arn }}"
+ cause: "cause of the failure"
+ error: "error code of the failure"
+ register: stop_execution_output
+ check_mode: yes
+
+ - assert:
+ that:
+ - not stop_execution_output.changed
+
+ - name: Start execution of state machine with the same execution name
+ aws_step_functions_state_machine_execution:
+ name: "{{ execution_name }}"
+ state_machine_arn: "{{ creation_output.state_machine_arn }}"
+ register: start_execution_output_again
+
+ - assert:
+ that:
+ - not start_execution_output_again.changed
+
+ - name: Remove state machine -- check_mode
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ state: absent
+ register: deletion_check
+ check_mode: yes
+
+ - assert:
+ that:
+ - deletion_check.changed == True
+ - "deletion_check.output == 'State machine would be deleted: {{ creation_output.state_machine_arn }}'"
+
+ - name: Remove state machine
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ state: absent
+ register: deletion_output
+
+ - assert:
+ that:
+ - deletion_output.changed == True
+ - deletion_output.state_machine_arn == creation_output.state_machine_arn
+
+ - name: Non-existent state machine is absent
+ aws_step_functions_state_machine:
+ name: "non_existing_state_machine"
+ state: absent
+ register: result
+
+ - assert:
+ that:
+ - result.changed == False
+
+ # ==== Cleanup ====================================================
+
+ always:
+
+ - name: Cleanup - delete state machine
+ aws_step_functions_state_machine:
+ name: "{{ state_machine_name }}"
+ state: absent
+ ignore_errors: true
+
+ - name: Cleanup - delete IAM role needed for Step Functions test
+ iam_role:
+ name: "{{ step_functions_role_name }}"
+ state: absent
+ ignore_errors: true
diff --git a/test/integration/targets/incidental_azure_rm_functionapp/aliases b/test/integration/targets/incidental_azure_rm_functionapp/aliases
new file mode 100644
index 0000000000..537e96e319
--- /dev/null
+++ b/test/integration/targets/incidental_azure_rm_functionapp/aliases
@@ -0,0 +1,3 @@
+cloud/azure
+shippable/azure/incidental
+destructive
diff --git a/test/integration/targets/incidental_azure_rm_functionapp/tasks/main.yml b/test/integration/targets/incidental_azure_rm_functionapp/tasks/main.yml
new file mode 100644
index 0000000000..cf200fb9c4
--- /dev/null
+++ b/test/integration/targets/incidental_azure_rm_functionapp/tasks/main.yml
@@ -0,0 +1,131 @@
+- name: Fix resource prefix
+ set_fact:
+ fixed_resource_prefix: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+
+- name: Fix resource prefix
+ set_fact:
+ funcapp_name_basic: "fa{{ fixed_resource_prefix }}basic"
+ funcapp_name_container: "fa{{ fixed_resource_prefix }}container"
+ funcapp_name_params: "fa{{ fixed_resource_prefix }}params"
+ storage_account_name: "sa{{ fixed_resource_prefix }}"
+ plan_name: "ap{{ fixed_resource_prefix }}"
+
+- name: create storage account for function apps
+ azure_rm_storageaccount:
+ resource_group: '{{ resource_group }}'
+ name: "{{ storage_account_name }}"
+ account_type: Standard_LRS
+
+- name: create basic function app
+ azure_rm_functionapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ funcapp_name_basic }}"
+ storage_account: "{{ storage_account_name }}"
+ register: output
+
+- name: assert the function was created
+ assert:
+ that: output.changed
+
+- name: list facts for function
+ azure_rm_functionapp_info:
+ resource_group: '{{ resource_group }}'
+ name: "{{ funcapp_name_basic }}"
+ register: results
+
+- name: assert the facts were retrieved
+ assert:
+ that:
+ - results.ansible_info.azure_functionapps|length == 1
+ - results.ansible_info.azure_functionapps[0].name == "{{ funcapp_name_basic }}"
+
+- name: delete basic function app
+ azure_rm_functionapp:
+ resource_group: '{{ resource_group }}'
+ name: "{{ funcapp_name_basic }}"
+ state: absent
+ register: output
+
+- name: assert the function was deleted
+ assert:
+ that: output.changed
+
+- name: create a function with app settings
+ azure_rm_functionapp:
+ resource_group: '{{ resource_group }}'
+ name: "{{ funcapp_name_params }}"
+ storage_account: "{{ storage_account_name }}"
+ app_settings:
+ hello: world
+ things: more stuff
+ FUNCTIONS_EXTENSION_VERSION: "~2"
+ register: output
+
+- name: assert the function with app settings was created
+ assert:
+ that: output.changed
+
+- name: change app settings
+ azure_rm_functionapp:
+ resource_group: '{{ resource_group }}'
+ name: "{{ funcapp_name_params }}"
+ storage_account: "{{ storage_account_name }}"
+ app_settings:
+ hello: world
+ things: more stuff
+ FUNCTIONS_EXTENSION_VERSION: "~2"
+ another: one
+ register: output
+
+- name: assert the function was changed
+ assert:
+ that: output.changed
+
+- name: delete the function app
+ azure_rm_functionapp:
+ resource_group: '{{ resource_group }}'
+ name: "{{ funcapp_name_params }}"
+ state: absent
+ register: output
+
+- name: assert the function was deleted
+ assert:
+ that: output.changed
+
+- name: Create a linux app service plan
+ azure_rm_appserviceplan:
+ resource_group: "{{ resource_group }}"
+ name: "{{ plan_name }}"
+ sku: S1
+ is_linux: true
+ number_of_workers: 1
+
+- name: "Create azure function app {{ function_app }}"
+ azure_rm_functionapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ funcapp_name_container }}"
+ storage_account: "{{ storage_account_name }}"
+ plan:
+ resource_group: "{{ resource_group }}"
+ name: "{{ plan_name }}"
+ container_settings:
+ name: httpd
+ app_settings:
+ FUNCTIONS_EXTENSION_VERSION: "~2"
+ register: output
+
+- name: assert the function was changed
+ assert:
+ that: output.changed
+
+- name: delete the function app
+ azure_rm_functionapp:
+ resource_group: '{{ resource_group }}'
+ name: "{{ funcapp_name_container }}"
+ state: absent
+
+- name: delete storage account
+ azure_rm_storageaccount:
+ resource_group: '{{ resource_group }}'
+ name: "{{ storage_account_name }}"
+ state: absent
diff --git a/test/integration/targets/incidental_azure_rm_mariadbserver/aliases b/test/integration/targets/incidental_azure_rm_mariadbserver/aliases
new file mode 100644
index 0000000000..9901373af2
--- /dev/null
+++ b/test/integration/targets/incidental_azure_rm_mariadbserver/aliases
@@ -0,0 +1,3 @@
+cloud/azure
+destructive
+shippable/azure/incidental
diff --git a/test/integration/targets/incidental_azure_rm_mariadbserver/tasks/main.yml b/test/integration/targets/incidental_azure_rm_mariadbserver/tasks/main.yml
new file mode 100644
index 0000000000..5b33ffb951
--- /dev/null
+++ b/test/integration/targets/incidental_azure_rm_mariadbserver/tasks/main.yml
@@ -0,0 +1,640 @@
+- name: Prepare random number
+ set_fact:
+ rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+ run_once: yes
+
+- name: Create instance of MariaDB Server -- check mode
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: westus2
+ storage_mb: 51200
+ version: 10.2
+ enforce_ssl: True
+ admin_username: zimxyz
+ admin_password: Testpasswordxyz12!
+ check_mode: yes
+ register: output
+- name: Assert the resource instance is well created
+ assert:
+ that:
+ - output.changed
+
+- name: Create instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: westus2
+ storage_mb: 51200
+ version: 10.2
+ enforce_ssl: True
+ admin_username: zimxyz
+ admin_password: Testpasswordxyz12!
+ register: output
+- name: Assert the resource instance is well created
+ assert:
+ that:
+ - output.changed
+ - output.state == 'Ready'
+
+- name: Create again instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: westus2
+ storage_mb: 51200
+ version: 10.2
+ enforce_ssl: True
+ admin_username: zimxyz
+ admin_password: Testpasswordxyz12!
+ register: output
+- name: Assert the state has not changed
+ assert:
+ that:
+ - output.changed == false
+ - output.state == 'Ready'
+
+- name: Update instance of MariaDB Server, change storage size
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: westus2
+ storage_mb: 128000
+ version: 10.2
+ enforce_ssl: True
+ admin_username: zimxyz
+ admin_password: Testpasswordxyz12!
+ register: output
+- name: Assert the state has not changed
+ assert:
+ that:
+ - output.changed
+ - output.state == 'Ready'
+- debug:
+ var: output
+
+- name: Gather facts MariaDB Server
+ azure_rm_mariadbserver_facts:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ register: output
+- name: Assert that storage size is correct
+ assert:
+ that:
+ - output.servers[0]['storage_mb'] == 128000
+
+- name: Create second instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}second
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: westus2
+ storage_mb: 51200
+ version: 10.2
+ enforce_ssl: True
+ admin_username: zimxyz
+ admin_password: Testpasswordxyz12!
+ tags:
+ aaa: bbb
+
+- name: Create second instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}second
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: westus2
+ storage_mb: 51200
+ version: 10.2
+ enforce_ssl: True
+ admin_username: zimxyz
+ admin_password: Testpasswordxyz12!
+ tags:
+ ccc: ddd
+
+- name: Gather facts MariaDB Server
+ azure_rm_mariadbserver_facts:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}second
+ register: output
+
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.servers[0]['id'] != None
+ - output.servers[0]['name'] != None
+ - output.servers[0]['location'] != None
+ - output.servers[0]['sku']['name'] != None
+ - output.servers[0]['sku']['tier'] != None
+ - output.servers[0]['sku']['capacity'] != None
+ - output.servers[0]['version'] != None
+ - output.servers[0]['user_visible_state'] != None
+ - output.servers[0]['fully_qualified_domain_name'] != None
+ - output.servers[0]['tags']['aaa'] == 'bbb'
+ - output.servers[0]['tags']['ccc'] == 'ddd'
+
+- name: Gather facts MariaDB Server
+ azure_rm_mariadbserver_facts:
+ resource_group: "{{ resource_group }}"
+ register: output
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.servers[0]['id'] != None
+ - output.servers[0]['name'] != None
+ - output.servers[0]['location'] != None
+ - output.servers[0]['sku']['name'] != None
+ - output.servers[0]['sku']['tier'] != None
+ - output.servers[0]['sku']['capacity'] != None
+ - output.servers[0]['version'] != None
+ - output.servers[0]['user_visible_state'] != None
+ - output.servers[0]['fully_qualified_domain_name'] != None
+ - output.servers[1]['id'] != None
+ - output.servers[1]['name'] != None
+ - output.servers[1]['location'] != None
+ - output.servers[1]['sku']['name'] != None
+ - output.servers[1]['sku']['tier'] != None
+ - output.servers[1]['sku']['capacity'] != None
+ - output.servers[1]['version'] != None
+ - output.servers[1]['user_visible_state'] != None
+ - output.servers[1]['fully_qualified_domain_name'] != None
+
+#
+# azure_rm_mariadbdatabase tests below
+#
+- name: Create instance of MariaDB Database -- check mode
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ check_mode: yes
+ register: output
+- name: Assert the resource instance is well created
+ assert:
+ that:
+ - output.changed
+
+- name: Create instance of MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ collation: latin1_swedish_ci
+ charset: latin1
+ register: output
+- name: Assert the resource instance is well created
+ assert:
+ that:
+ - output.changed
+ - output.name == 'testdatabase'
+
+- name: Create again instance of MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ collation: latin1_swedish_ci
+ charset: latin1
+ register: output
+- name: Assert the state has not changed
+ assert:
+ that:
+ - output.changed == false
+ - output.name == 'testdatabase'
+
+- name: Try to update database without force_update
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ collation: latin1_czech_ci
+ charset: latin1
+ ignore_errors: yes
+ register: output
+- name: Assert that nothing has changed
+ assert:
+ that:
+ - output.changed == False
+
+- name: Update instance of database using force_update
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ collation: latin1_czech_ci
+ charset: latin1
+ force_update: yes
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+ - output.name == 'testdatabase'
+
+- name: Create second instance of MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase2
+
+- name: Gather facts MariaDB Database
+ azure_rm_mariadbdatabase_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ register: output
+
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.databases[0]['server_name'] != None
+ - output.databases[0]['name'] != None
+ - output.databases[0]['charset'] != None
+ - output.databases[0]['collation'] != None
+
+- name: Gather facts MariaDB Database
+ azure_rm_mariadbdatabase_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ register: output
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.databases[0]['server_name'] != None
+ - output.databases[0]['name'] != None
+ - output.databases[0]['charset'] != None
+ - output.databases[0]['collation'] != None
+ - output.databases[1]['server_name'] != None
+ - output.databases[1]['name'] != None
+ - output.databases[1]['charset'] != None
+ - output.databases[1]['collation'] != None
+
+- name: Delete instance of MariaDB Database -- check mode
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ state: absent
+ check_mode: yes
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Delete instance of MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ state: absent
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Delete unexisting instance of MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: testdatabase
+ state: absent
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed == false
+
+#
+# azure_rm_firewallrule tests below
+#
+- name: Create instance of Firewall Rule -- check mode
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ start_ip_address: 172.28.10.136
+ end_ip_address: 172.28.10.138
+ check_mode: yes
+ register: output
+- name: Assert the resource instance is well created
+ assert:
+ that:
+ - output.changed
+
+- name: Create instance of Firewall Rule
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ start_ip_address: 172.28.10.136
+ end_ip_address: 172.28.10.138
+ register: output
+- name: Assert the resource instance is well created
+ assert:
+ that:
+ - output.changed
+
+- name: Create again instance of Firewall Rule
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ start_ip_address: 172.28.10.136
+ end_ip_address: 172.28.10.138
+ register: output
+- name: Assert the state has not changed
+ assert:
+ that:
+ - output.changed == false
+
+- name: Delete instance of Firewall Rule -- check mode
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ state: absent
+ check_mode: yes
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Create instance of Firewall Rule -- second
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}second
+ start_ip_address: 172.28.10.136
+ end_ip_address: 172.28.10.138
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Gather facts MariaDB Firewall Rule
+ azure_rm_mariadbfirewallrule_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ register: output
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.rules[0].id != None
+ - output.rules[0].server_name != None
+ - output.rules[0].name != None
+ - output.rules[0].start_ip_address != None
+ - output.rules[0].end_ip_address != None
+ - "output.rules | length == 1"
+
+- name: Gather facts MariaDB Firewall Rule
+ azure_rm_mariadbfirewallrule_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ register: output
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.rules[0].id != None
+ - output.rules[0].server_name != None
+ - output.rules[0].name != None
+ - output.rules[0].start_ip_address != None
+ - output.rules[0].end_ip_address != None
+ - output.rules[1].id != None
+ - output.rules[1].name != None
+ - output.rules[1].start_ip_address != None
+ - output.rules[1].end_ip_address != None
+ - "output.rules | length == 2"
+
+- name: Delete instance of Firewall Rule
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ state: absent
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Delete unexisting instance of Firewall Rule
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ state: absent
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed == false
+
+- name: Delete instance of Firewall Rule - second
+ azure_rm_mariadbfirewallrule:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}second
+ state: absent
+
+- name: Gather facts MariaDB Firewall Rule
+ azure_rm_mariadbfirewallrule_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: firewallrule{{ rpfx }}
+ register: output
+- name: Assert that empty list was returned
+ assert:
+ that:
+ - output.changed == False
+ - "output.rules | length == 0"
+
+#
+# configuration
+#
+- name: Create instance of Configuration -- check mode
+ azure_rm_mariadbconfiguration:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ value: "ON"
+ check_mode: yes
+ register: output
+- name: Assert that change was registered
+ assert:
+ that:
+ - output.changed
+
+- name: Try to delete default configuraion
+ azure_rm_mariadbconfiguration_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ register: output
+- name: Get facts of event_scheduler
+ debug:
+ var: output
+
+- name: Try to delete default configuraion
+ azure_rm_mariadbconfiguration:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ state: absent
+ register: output
+- name: Assert that change was registered
+ assert:
+ that:
+ - not output.changed
+
+- name: Try to change default configuraion
+ azure_rm_mariadbconfiguration:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ value: "ON"
+ register: output
+- name: Assert that change was registered
+ assert:
+ that:
+ - output.changed
+
+- name: Try to change default configuration -- idempotent
+ azure_rm_mariadbconfiguration:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ value: "ON"
+ register: output
+- name: Assert that change was registered
+ assert:
+ that:
+ - not output.changed
+
+- name: Try to reset configuration
+ azure_rm_mariadbconfiguration:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ state: absent
+ register: output
+- name: Assert that change was registered
+ assert:
+ that:
+ - output.changed
+
+- name: Try to reset configuration -- idempotent
+ azure_rm_mariadbconfiguration:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ state: absent
+ register: output
+- name: Assert that change was registered
+ assert:
+ that:
+ - not output.changed
+
+- name: Gather facts MariaDB Configuration
+ azure_rm_mariadbconfiguration_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ name: event_scheduler
+ register: output
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.settings[0].id != None
+ - output.settings[0].name != None
+ - output.settings[0].value != None
+ - output.settings[0].description != None
+ - output.settings[0].source != None
+ - output.settings | length == 1
+
+- name: Gather facts MariaDB Configuration
+ azure_rm_mariadbconfiguration_facts:
+ resource_group: "{{ resource_group }}"
+ server_name: mariadbsrv{{ rpfx }}
+ register: output
+- name: Assert that facts are returned
+ assert:
+ that:
+ - output.changed == False
+ - output.settings[0].id != None
+ - output.settings[0].name != None
+ - output.settings[0].value != None
+ - output.settings[0].description != None
+ - output.settings[0].source != None
+ - output.settings | length > 1
+
+#
+# clean up azure_rm_mariadbserver test
+#
+
+- name: Delete instance of MariaDB Server -- check mode
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ state: absent
+ check_mode: yes
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Delete instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ state: absent
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed
+
+- name: Delete unexisting instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}
+ state: absent
+ register: output
+- name: Assert the state has changed
+ assert:
+ that:
+ - output.changed == false
+
+- name: Delete second instance of MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: "{{ resource_group }}"
+ name: mariadbsrv{{ rpfx }}second
+ state: absent
+ async: 400
+ poll: 0
diff --git a/test/integration/targets/incidental_azure_rm_resource/aliases b/test/integration/targets/incidental_azure_rm_resource/aliases
new file mode 100644
index 0000000000..9901373af2
--- /dev/null
+++ b/test/integration/targets/incidental_azure_rm_resource/aliases
@@ -0,0 +1,3 @@
+cloud/azure
+destructive
+shippable/azure/incidental
diff --git a/test/integration/targets/incidental_azure_rm_resource/tasks/main.yml b/test/integration/targets/incidental_azure_rm_resource/tasks/main.yml
new file mode 100644
index 0000000000..7c3024a5ef
--- /dev/null
+++ b/test/integration/targets/incidental_azure_rm_resource/tasks/main.yml
@@ -0,0 +1,158 @@
+- name: Prepare random number
+ set_fact:
+ nsgname: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+ storageaccountname: "stacc{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+ dbname: "mdb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
+ run_once: yes
+
+- name: Call REST API
+ azure_rm_resource:
+ api_version: '2018-02-01'
+ resource_group: "{{ resource_group }}"
+ provider: network
+ resource_type: networksecuritygroups
+ resource_name: "{{ nsgname }}"
+ body:
+ location: eastus
+ idempotency: yes
+ register: output
+
+- name: Assert that something has changed
+ assert:
+ that: output.changed
+
+- name: Call REST API
+ azure_rm_resource:
+ api_version: '2018-02-01'
+ resource_group: "{{ resource_group }}"
+ provider: network
+ resource_type: networksecuritygroups
+ resource_name: "{{ nsgname }}"
+ body:
+ location: eastus
+ idempotency: yes
+ register: output
+
+- name: Assert that nothing has changed
+ assert:
+ that: not output.changed
+
+- name: Call REST API
+ azure_rm_resource:
+ api_version: '2018-02-01'
+ resource_group: "{{ resource_group }}"
+ provider: network
+ resource_type: networksecuritygroups
+ resource_name: "{{ nsgname }}"
+ body:
+ location: eastus
+ tags:
+ a: "abc"
+ b: "cde"
+ idempotency: yes
+ register: output
+
+- name: Assert that something has changed
+ assert:
+ that: output.changed
+
+- name: Try to get information about account
+ azure_rm_resource_facts:
+ api_version: '2018-02-01'
+ resource_group: "{{ resource_group }}"
+ provider: network
+ resource_type: networksecuritygroups
+ resource_name: "{{ nsgname }}"
+ register: output
+
+- name: Assert value was returned
+ assert:
+ that:
+ - not output.changed
+ - output.response[0]['name'] != None
+ - output.response | length == 1
+
+- name: Try to query a list
+ azure_rm_resource_facts:
+ api_version: '2018-02-01'
+ resource_group: "{{ resource_group }}"
+ provider: network
+ resource_type: networksecuritygroups
+ register: output
+- name: Assert value was returned
+ assert:
+ that:
+ - not output.changed
+ - output.response[0]['name'] != None
+ - output.response | length >= 1
+
+- name: Try to query a list - same without API version
+ azure_rm_resource_facts:
+ resource_group: "{{ resource_group }}"
+ provider: network
+ resource_type: networksecuritygroups
+ register: output
+- name: Assert value was returned
+ assert:
+ that:
+ - not output.changed
+ - output.response[0]['name'] != None
+ - output.response | length >= 1
+
+- name: Query all the resources in the resource group
+ azure_rm_resource_facts:
+ resource_group: "{{ resource_group }}"
+ resource_type: resources
+ register: output
+- name: Assert value was returned
+ assert:
+ that:
+ - not output.changed
+ - output.response | length >= 1
+
+- name: Create storage account that requires LRO polling
+ azure_rm_resource:
+ polling_timeout: 600
+ polling_interval: 60
+ api_version: '2018-07-01'
+ resource_group: "{{ resource_group }}"
+ provider: Storage
+ resource_type: storageAccounts
+ resource_name: "{{ storageaccountname }}"
+ body:
+ sku:
+ name: Standard_GRS
+ kind: Storage
+ location: eastus
+ register: output
+
+- name: Assert that storage was successfully created
+ assert:
+ that: "output['response']['name'] == '{{ storageaccountname }}'"
+
+
+- name: Try to storage keys -- special case when subresource part has no name
+ azure_rm_resource:
+ resource_group: "{{ resource_group }}"
+ provider: storage
+ resource_type: storageAccounts
+ resource_name: "{{ storageaccountname }}"
+ subresource:
+ - type: listkeys
+ api_version: '2018-03-01-preview'
+ method: POST
+ register: keys
+
+- name: Assert that key was returned
+ assert:
+ that: keys['response']['keys'][0]['value'] | length > 0
+
+- name: Delete storage - without API version
+ azure_rm_resource:
+ polling_timeout: 600
+ polling_interval: 60
+ method: DELETE
+ resource_group: "{{ resource_group }}"
+ provider: Storage
+ resource_type: storageAccounts
+ resource_name: "{{ storageaccountname }}"
diff --git a/test/integration/targets/incidental_azure_rm_webapp/aliases b/test/integration/targets/incidental_azure_rm_webapp/aliases
new file mode 100644
index 0000000000..537e96e319
--- /dev/null
+++ b/test/integration/targets/incidental_azure_rm_webapp/aliases
@@ -0,0 +1,3 @@
+cloud/azure
+shippable/azure/incidental
+destructive
diff --git a/test/integration/targets/incidental_azure_rm_webapp/tasks/main.yml b/test/integration/targets/incidental_azure_rm_webapp/tasks/main.yml
new file mode 100644
index 0000000000..8efc6629de
--- /dev/null
+++ b/test/integration/targets/incidental_azure_rm_webapp/tasks/main.yml
@@ -0,0 +1,434 @@
+- name: Fix resource prefix
+ set_fact:
+ linux_app_plan_resource_group: "{{ resource_group_secondary }}"
+ win_app_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}{{ 1000 | random}}winapp"
+ win_plan_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}winplan"
+ linux_plan_name: "{{ (resource_group_secondary | replace('-','x'))[-8:] }}linplan"
+ slot1_name: "stage1"
+
+- name: Create a windows web app with non-exist app service plan
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}1"
+ plan:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_plan_name }}"
+ is_linux: false
+ sku: S1
+
+- name: Create a windows web app with existing app service plan
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}2"
+ plan: "{{ win_plan_name }}"
+ register: output
+
+- name: stop the web app
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}2"
+ plan: "{{ win_plan_name }}"
+ app_state: stopped
+ register: output
+
+- name: assert output changed
+ assert:
+ that:
+ output.changed
+
+# enable after webapp_facts merged
+# - name: get the web app
+# azure_rm_webapp_facts:
+# resource_group: "{{ resource_group }}"
+# name: "{{ win_app_name }}2"
+# register: stopped
+
+# - name: assert web app is stopped
+# assert:
+# that:
+# - stopped.properties.state == "Stopped"
+
+- name: Create a windows web app with existing app service plan, try to update some root level params
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}3"
+ plan: "{{ win_plan_name }}"
+ dns_registration: true
+ https_only: true
+ tags:
+ testwebapptag: test
+ register: output
+
+- name: get web app with resource group and tag
+ azure_rm_webapp_facts:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}3"
+ tags:
+ - testwebapptag
+ register: output
+
+- assert:
+ that:
+ - output.webapps | length == 1
+
+- name: Create a win web app with java run time specific
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}4"
+ plan: "{{ win_plan_name }}"
+ frameworks:
+ - name: "java"
+ version: "1.8"
+ settings:
+ java_container: "Tomcat"
+ java_container_version: "8.0"
+ app_settings:
+ testkey: "testvalue"
+ register: output
+
+- name: assert the web app was created
+ assert:
+ that: output.changed
+
+- name: get web app with name
+ azure_rm_webapp_facts:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}4"
+ register: output
+
+- assert:
+ that:
+ - output.webapps | length == 1
+ - output.webapps[0].app_settings | length == 1
+ - output.webapps[0].frameworks | length > 1 # there's default frameworks eg net_framework
+
+- name: Update app settings and framework
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}4"
+ plan: "{{ win_plan_name }}"
+ frameworks:
+ - name: "java"
+ version: "1.7"
+ settings:
+ java_container: "Tomcat"
+ java_container_version: "8.5"
+ app_settings:
+ testkey2: "testvalue2"
+ register: output
+
+- name: Assert the web app was updated
+ assert:
+ that:
+ - output.changed
+
+- name: get web app with name
+ azure_rm_webapp_facts:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}4"
+ register: output
+
+- name: Assert updating
+ assert:
+ that:
+ - output.webapps[0].app_settings | length == 2
+ - output.webapps[0].app_settings['testkey'] == 'testvalue'
+ - output.webapps[0].app_settings['testkey2'] == 'testvalue2'
+
+- name: get web app with return publishing profile
+ azure_rm_webapp_facts:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}4"
+ return_publish_profile: true
+ register: output
+
+- assert:
+ that:
+ - output.webapps | length == 1
+ - output.webapps[0].publishing_username != ""
+ - output.webapps[0].publishing_password != ""
+
+- name: Purge all existing app settings
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}4"
+ plan: "{{ win_plan_name }}"
+ purge_app_settings: true
+ register: output
+
+- name: Assert the web app was updated
+ assert:
+ that: output.changed
+
+- name: Create a win web app with python run time and php run time
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}5"
+ plan: "{{ win_plan_name }}"
+ frameworks:
+ - name: "python"
+ version: "2.7"
+ - name: node
+ version: "6.6"
+ - name: "php"
+ version: "7.0"
+ register: output
+
+- name: Assert the web app was created
+ assert:
+ that: output.changed
+
+- name: Create a docker web app with some app settings
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}6"
+ plan:
+ resource_group: "{{ linux_app_plan_resource_group }}"
+ name: "{{ linux_plan_name }}"
+ is_linux: true
+ sku: S1
+ number_of_workers: 1
+ container_settings:
+ name: "ansible/ansible:ubuntu1404"
+ register: output
+
+- name: Assert the web app was created
+ assert:
+ that: output.changed
+
+- name: Create a docker web app with private acr registry
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}7"
+ plan:
+ resource_group: "{{ linux_app_plan_resource_group }}"
+ name: "{{ linux_plan_name }}"
+ container_settings:
+ name: "ansible/ansible:ubuntu1404"
+ registry_server_url: test.io
+ registry_server_user: user
+ registry_server_password: password
+ register: output
+
+- name: Assert the web app was created
+ assert:
+ that: output.changed
+
+- name: Create a linux web app with nodejs framework
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}8"
+ plan:
+ resource_group: "{{ linux_app_plan_resource_group }}"
+ name: "{{ linux_plan_name }}"
+ frameworks:
+ - name: node
+ version: "6.6"
+ register: output
+
+- name: Should be idempotent with linux web app created
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}8"
+ plan:
+ resource_group: "{{ linux_app_plan_resource_group }}"
+ name: "{{ linux_plan_name }}"
+ frameworks:
+ - name: node
+ version: "6.6"
+ register: output
+
+- assert:
+ that: not output.changed
+
+- name: Update nodejs framework
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}8"
+ plan:
+ resource_group: "{{ linux_app_plan_resource_group }}"
+ name: "{{ linux_plan_name }}"
+ frameworks:
+ - name: node
+ version: "6.9"
+ register: output
+
+- name: Assert the web app was created
+ assert:
+ that: output.changed
+
+- name: Create a linux web app with deployment source github
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}10"
+ plan:
+ resource_group: "{{ linux_app_plan_resource_group }}"
+ name: "{{ linux_plan_name }}"
+ deployment_source:
+ url: "https://github.com/test/test"
+ branch: master
+ scm_type: GitHub
+ register: output
+
+- name: Assert the web app was created
+ assert:
+ that: output.changed
+
+- name: Delete web app
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}8"
+ state: absent
+ register: output
+
+- name: Assert the web app was deleted
+ assert:
+ that: output.changed
+
+- name: assert error that java is mutually exclusive with frameworks
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}11"
+ plan: "{{ win_plan_name }}"
+ frameworks:
+ - name: "python"
+ version: "2.7"
+ - name: "java"
+ version: "1.8"
+ register: fail_win_java_version_mutual_exclusive
+ failed_when: 'fail_win_java_version_mutual_exclusive.msg != "Java is mutually exclusive with other frameworks."'
+
+- name: assert error when linux web app, only can specify one framework
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ linux_plan_name }}12"
+ plan:
+ resource_group: "{{ linux_app_plan_resource_group }}"
+ name: "{{ linux_plan_name }}"
+ frameworks:
+ - name: "python"
+ version: "2.7"
+ - name: "node"
+ version: "6.6"
+ register: fail_linux_one_framework_only
+ failed_when: fail_linux_one_framework_only.msg != "Can specify one framework only for Linux web app."
+
+- name: Create a linux web app with java tomcat container
+ azure_rm_webapp:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}13"
+ plan:
+ resource_group: "{{ linux_app_plan_resource_group }}"
+ name: "{{ linux_plan_name }}"
+ frameworks:
+ - name: java
+ version: "8"
+ settings:
+ java_container: "tomcat"
+ java_container_version: "8.5"
+ register: output
+
+- name: Assert the web app was created
+ assert:
+ that: output.changed
+
+- name: Get facts with publish profile
+ azure_rm_webapp_facts:
+ resource_group: "{{ resource_group }}"
+ name: "{{ win_app_name }}13"
+ no_log: true
+ register: facts
+
+- name: Assert publish profile returned
+ assert:
+ that:
+ - facts.webapps[0].ftp_publish_url != ''
+
+- name: Create a webapp slot (Check mode)
+ azure_rm_webappslot:
+ resource_group: "{{ resource_group }}"
+ webapp_name: "{{ win_app_name }}13"
+ name: "{{ slot1_name }}"
+ configuration_source: "{{ win_app_name }}13"
+ app_settings:
+ testkey: testvalue
+ check_mode: yes
+ register: output
+
+- name: Assert slot check mode creation
+ assert:
+ that:
+ - output.changed
+
+- name: Create a webapp slot
+ azure_rm_webappslot:
+ resource_group: "{{ resource_group }}"
+ webapp_name: "{{ win_app_name }}13"
+ name: "{{ slot1_name }}"
+ configuration_source: "{{ win_app_name }}13"
+ app_settings:
+ testkey: testvalueslot
+ register: output
+
+- name: Assert slot creation
+ assert:
+ that:
+ - output.changed
+
+- name: Update webapp slot (idempotence)
+ azure_rm_webappslot:
+ resource_group: "{{ resource_group }}"
+ webapp_name: "{{ win_app_name }}13"
+ name: "{{ slot1_name }}"
+ app_settings:
+ testkey: testvalueslot
+ register: output
+
+- name: Assert idempotence
+ assert:
+ that:
+ - not output.changed
+
+- name: Update webapp slot
+ azure_rm_webappslot:
+ resource_group: "{{ resource_group }}"
+ webapp_name: "{{ win_app_name }}13"
+ name: "{{ slot1_name }}"
+ frameworks:
+ - name: "node"
+ version: "10.1"
+ app_settings:
+ testkey: testvalue2
+ register: output
+
+- name: Assert updating
+ assert:
+ that:
+ - output.changed
+
+- name: Swap webapp slot
+ azure_rm_webappslot:
+ resource_group: "{{ resource_group }}"
+ webapp_name: "{{ win_app_name }}13"
+ name: "{{ slot1_name }}"
+ swap:
+ action: swap
+ register: output
+
+- name: Assert swap
+ assert:
+ that:
+ - output.changed
+
+- name: Stop webapp slot
+ azure_rm_webappslot:
+ resource_group: "{{ resource_group }}"
+ webapp_name: "{{ win_app_name }}13"
+ name: "{{ slot1_name }}"
+ app_state: stopped
+ register: output
+
+- name: Assert stopped
+ assert:
+ that:
+ - output.changed \ No newline at end of file
diff --git a/test/integration/targets/incidental_cloudformation/aliases b/test/integration/targets/incidental_cloudformation/aliases
new file mode 100644
index 0000000000..29f60feb44
--- /dev/null
+++ b/test/integration/targets/incidental_cloudformation/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/incidental
diff --git a/test/integration/targets/incidental_cloudformation/defaults/main.yml b/test/integration/targets/incidental_cloudformation/defaults/main.yml
new file mode 100644
index 0000000000..aaf0ca7e61
--- /dev/null
+++ b/test/integration/targets/incidental_cloudformation/defaults/main.yml
@@ -0,0 +1,8 @@
+stack_name: "{{ resource_prefix }}"
+
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
+subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
+
+ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'
diff --git a/test/integration/targets/incidental_cloudformation/files/cf_template.json b/test/integration/targets/incidental_cloudformation/files/cf_template.json
new file mode 100644
index 0000000000..ff4c5693b0
--- /dev/null
+++ b/test/integration/targets/incidental_cloudformation/files/cf_template.json
@@ -0,0 +1,37 @@
+{
+ "AWSTemplateFormatVersion" : "2010-09-09",
+
+ "Description" : "Create an Amazon EC2 instance.",
+
+ "Parameters" : {
+ "InstanceType" : {
+ "Description" : "EC2 instance type",
+ "Type" : "String",
+ "Default" : "t3.nano",
+ "AllowedValues" : [ "t3.micro", "t3.nano"]
+ },
+ "ImageId" : {
+ "Type" : "String"
+ },
+ "SubnetId" : {
+ "Type" : "String"
+ }
+ },
+
+ "Resources" : {
+ "EC2Instance" : {
+ "Type" : "AWS::EC2::Instance",
+ "Properties" : {
+ "InstanceType" : { "Ref" : "InstanceType" },
+ "ImageId" : { "Ref" : "ImageId" },
+ "SubnetId": { "Ref" : "SubnetId" }
+ }
+ }
+ },
+
+ "Outputs" : {
+ "InstanceId" : {
+ "Value" : { "Ref" : "EC2Instance" }
+ }
+ }
+}
diff --git a/test/integration/targets/incidental_cloudformation/tasks/main.yml b/test/integration/targets/incidental_cloudformation/tasks/main.yml
new file mode 100644
index 0000000000..9b89722b20
--- /dev/null
+++ b/test/integration/targets/incidental_cloudformation/tasks/main.yml
@@ -0,0 +1,463 @@
+---
+
+- module_defaults:
+ group/aws:
+ aws_access_key: '{{ aws_access_key | default(omit) }}'
+ aws_secret_key: '{{ aws_secret_key | default(omit) }}'
+ security_token: '{{ security_token | default(omit) }}'
+ region: '{{ aws_region | default(omit) }}'
+
+ block:
+
+ # ==== Env setup ==========================================================
+ - name: list available AZs
+ aws_az_info:
+ register: region_azs
+
+ - name: pick an AZ for testing
+ set_fact:
+ availability_zone: "{{ region_azs.availability_zones[0].zone_name }}"
+
+ - name: Create a test VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: Cloudformation testing
+ register: testing_vpc
+
+ - name: Create a test subnet
+ ec2_vpc_subnet:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_cidr }}"
+ az: "{{ availability_zone }}"
+ register: testing_subnet
+
+ - name: Find AMI to use
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ ec2_ami_name }}'
+ register: ec2_amis
+
+ - name: Set fact with latest AMI
+ vars:
+ latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
+ set_fact:
+ ec2_ami_image: '{{ latest_ami.image_id }}'
+
+ # ==== Cloudformation tests ===============================================
+
+ # 1. Basic stack creation (check mode, actual run and idempotency)
+ # 2. Tags
+ # 3. cloudformation_info tests (basic + all_facts)
+ # 4. termination_protection
+ # 5. create_changeset + changeset_name
+
+ # There is still scope to add tests for -
+ # 1. capabilities
+ # 2. stack_policy
+ # 3. on_create_failure (covered in unit tests)
+ # 4. Passing in a role
+ # 5. nested stacks?
+
+
+ - name: create a cloudformation stack (check mode)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+ check_mode: yes
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg"
+
+ - name: create a cloudformation stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'events' in cf_stack"
+ - "'output' in cf_stack and 'Stack CREATE complete' in cf_stack.output"
+ - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
+ - "'stack_resources' in cf_stack"
+
+ - name: create a cloudformation stack (check mode) (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+ check_mode: yes
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+
+ - name: create a cloudformation stack (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+ - "'output' in cf_stack and 'Stack is already up-to-date.' in cf_stack.output"
+ - "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
+ - "'stack_resources' in cf_stack"
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'cloudformation' in stack_info"
+ - "stack_info.cloudformation | length == 1"
+ - "stack_name in stack_info.cloudformation"
+ - "'stack_description' in stack_info.cloudformation[stack_name]"
+ - "'stack_outputs' in stack_info.cloudformation[stack_name]"
+ - "'stack_parameters' in stack_info.cloudformation[stack_name]"
+ - "'stack_tags' in stack_info.cloudformation[stack_name]"
+ - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'cloudformation' in stack_info"
+ - "stack_info.cloudformation | length == 1"
+ - "stack_name in stack_info.cloudformation"
+ - "'stack_description' in stack_info.cloudformation[stack_name]"
+ - "'stack_outputs' in stack_info.cloudformation[stack_name]"
+ - "'stack_parameters' in stack_info.cloudformation[stack_name]"
+ - "'stack_tags' in stack_info.cloudformation[stack_name]"
+ - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
+
+ - name: get stack details (all_facts)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ all_facts: yes
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'stack_events' in stack_info.cloudformation[stack_name]"
+ - "'stack_policy' in stack_info.cloudformation[stack_name]"
+ - "'stack_resource_list' in stack_info.cloudformation[stack_name]"
+ - "'stack_resources' in stack_info.cloudformation[stack_name]"
+ - "'stack_template' in stack_info.cloudformation[stack_name]"
+
+ - name: get stack details (all_facts) (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ all_facts: yes
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "'stack_events' in stack_info.cloudformation[stack_name]"
+ - "'stack_policy' in stack_info.cloudformation[stack_name]"
+ - "'stack_resource_list' in stack_info.cloudformation[stack_name]"
+ - "'stack_resources' in stack_info.cloudformation[stack_name]"
+ - "'stack_template' in stack_info.cloudformation[stack_name]"
+
+ # ==== Cloudformation tests (create changeset) ============================
+
+ # try to create a changeset by changing instance type
+ - name: create a changeset
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ create_changeset: yes
+ changeset_name: "test-changeset"
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.micro"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: create_changeset_result
+
+ - name: assert changeset created
+ assert:
+ that:
+ - "create_changeset_result.changed"
+ - "'change_set_id' in create_changeset_result"
+ - "'Stack CREATE_CHANGESET complete' in create_changeset_result.output"
+
+ - name: get stack details with changesets
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ stack_change_sets: True
+ register: stack_info
+
+ - name: assert changesets in info
+ assert:
+ that:
+ - "'stack_change_sets' in stack_info.cloudformation[stack_name]"
+
+ - name: get stack details with changesets (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ stack_change_sets: True
+ register: stack_info
+ check_mode: yes
+
+ - name: assert changesets in info
+ assert:
+ that:
+ - "'stack_change_sets' in stack_info.cloudformation[stack_name]"
+
+ # try to create an empty changeset by passing in unchanged template
+ - name: create a changeset
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ create_changeset: yes
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: create_changeset_result
+
+ - name: assert changeset created
+ assert:
+ that:
+ - "not create_changeset_result.changed"
+ - "'The created Change Set did not contain any changes to this stack and was deleted.' in create_changeset_result.output"
+
+ # ==== Cloudformation tests (termination_protection) ======================
+
+ - name: set termination protection to true
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ termination_protection: yes
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+# This fails - #65592
+# - name: check task return attributes
+# assert:
+# that:
+# - cf_stack.changed
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ - name: set termination protection to false
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ termination_protection: no
+ template_body: "{{ lookup('file','cf_template.json') }}"
+ template_parameters:
+ InstanceType: "t3.nano"
+ ImageId: "{{ ec2_ami_image }}"
+ SubnetId: "{{ testing_subnet.subnet.id }}"
+ tags:
+ Stack: "{{ stack_name }}"
+ test: "{{ resource_prefix }}"
+ register: cf_stack
+
+# This fails - #65592
+# - name: check task return attributes
+# assert:
+# that:
+# - cf_stack.changed
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
+
+ # ==== Cloudformation tests (delete stack tests) ==========================
+
+ - name: delete cloudformation stack (check mode)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ check_mode: yes
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'msg' in cf_stack and 'Stack would be deleted' in cf_stack.msg"
+
+ - name: delete cloudformation stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - cf_stack.changed
+ - "'output' in cf_stack and 'Stack Deleted' in cf_stack.output"
+
+ - name: delete cloudformation stack (check mode) (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ check_mode: yes
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+ - "'msg' in cf_stack"
+ - >-
+ "Stack doesn't exist" in cf_stack.msg
+
+ - name: delete cloudformation stack (idempotent)
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ register: cf_stack
+
+ - name: check task return attributes
+ assert:
+ that:
+ - not cf_stack.changed
+ - "'output' in cf_stack and 'Stack not found.' in cf_stack.output"
+
+ - name: get stack details
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation"
+
+ - name: get stack details (checkmode)
+ cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: stack_info
+ check_mode: yes
+
+ - name: assert stack info
+ assert:
+ that:
+ - "not stack_info.cloudformation"
+
+ # ==== Cleanup ============================================================
+
+ always:
+
+ - name: delete stack
+ cloudformation:
+ stack_name: "{{ stack_name }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: Delete test subnet
+ ec2_vpc_subnet:
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_cidr }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: Delete test VPC
+ ec2_vpc_net:
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ state: absent
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_cs_common/aliases b/test/integration/targets/incidental_cs_common/aliases
new file mode 100644
index 0000000000..136c05e0d0
--- /dev/null
+++ b/test/integration/targets/incidental_cs_common/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_cs_common/defaults/main.yml b/test/integration/targets/incidental_cs_common/defaults/main.yml
new file mode 100644
index 0000000000..942316bdd4
--- /dev/null
+++ b/test/integration/targets/incidental_cs_common/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+cs_resource_prefix: "cs-{{ (ansible_date_time.iso8601_micro | to_uuid).split('-')[0] }}"
+cs_common_template: CentOS 5.6 (64-bit) no GUI (Simulator)
+cs_common_service_offering: Small Instance
+cs_common_zone_adv: Sandbox-simulator-advanced
+cs_common_zone_basic: Sandbox-simulator-basic
diff --git a/test/integration/targets/incidental_cs_role_permission/aliases b/test/integration/targets/incidental_cs_role_permission/aliases
new file mode 100644
index 0000000000..e50e650e98
--- /dev/null
+++ b/test/integration/targets/incidental_cs_role_permission/aliases
@@ -0,0 +1,2 @@
+cloud/cs
+shippable/cs/incidental
diff --git a/test/integration/targets/incidental_cs_role_permission/meta/main.yml b/test/integration/targets/incidental_cs_role_permission/meta/main.yml
new file mode 100644
index 0000000000..d46613c55f
--- /dev/null
+++ b/test/integration/targets/incidental_cs_role_permission/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - incidental_cs_common
diff --git a/test/integration/targets/incidental_cs_role_permission/tasks/main.yml b/test/integration/targets/incidental_cs_role_permission/tasks/main.yml
new file mode 100644
index 0000000000..95e2df84d9
--- /dev/null
+++ b/test/integration/targets/incidental_cs_role_permission/tasks/main.yml
@@ -0,0 +1,303 @@
+- name: pre-setup
+ cs_role:
+ name: "testRole"
+ register: testRole
+- name: verify pre-setup
+ assert:
+ that:
+ - testRole is successful
+
+- name: setup
+ cs_role_permission:
+ name: "fakeRolePerm"
+ role: "{{ testRole.id }}"
+ state: absent
+ register: roleperm
+- name: verify setup
+ assert:
+ that:
+ - roleperm is successful
+
+- name: setup2
+ cs_role_permission:
+ name: "fakeRolePerm2"
+ role: "{{ testRole.id }}"
+ state: absent
+ register: roleperm2
+- name: verify setup2
+ assert:
+ that:
+ - roleperm2 is successful
+
+- name: test fail if missing name
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ register: roleperm
+ ignore_errors: true
+- name: verify results of fail if missing name
+ assert:
+ that:
+ - roleperm is failed
+ - 'roleperm.msg == "missing required arguments: name"'
+
+- name: test fail if missing role
+ cs_role_permission:
+ name: "fakeRolePerm"
+ register: roleperm
+ ignore_errors: true
+- name: verify results of fail if missing role
+ assert:
+ that:
+ - roleperm is failed
+ - 'roleperm.msg == "missing required arguments: role"'
+
+- name: test fail if role does not exist
+ cs_role_permission:
+ name: "fakeRolePerm"
+ role: "testtest"
+ register: roleperm
+ ignore_errors: true
+- name: verify results of fail if role does not exist
+ assert:
+ that:
+ - roleperm is failed
+ - roleperm.msg == "Role 'testtest' not found"
+
+- name: test fail if state is incorrcect
+ cs_role_permission:
+ state: badstate
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ permission: allow
+ register: roleperm
+ ignore_errors: true
+- name: verify results of fail if state is incorrcect
+ assert:
+ that:
+ - roleperm is failed
+ - 'roleperm.msg == "value of state must be one of: present, absent, got: badstate"'
+
+- name: test create role permission in check mode
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ permission: allow
+ description: "fakeRolePerm description"
+ register: roleperm
+ check_mode: yes
+- name: verify results of role permission in check mode
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is changed
+
+- name: test create role permission
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ permission: allow
+ description: "fakeRolePerm description"
+ register: roleperm
+- name: verify results of role permission
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is changed
+ - roleperm.name == "fakeRolePerm"
+ - roleperm.permission == "allow"
+ - roleperm.description == "fakeRolePerm description"
+
+- name: test create role permission idempotency
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ permission: allow
+ description: "fakeRolePerm description"
+ register: roleperm
+- name: verify results of role permission idempotency
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is not changed
+ - roleperm.name == "fakeRolePerm"
+ - roleperm.permission == "allow"
+ - roleperm.description == "fakeRolePerm description"
+
+- name: test update role permission in check_mode
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ permission: deny
+ description: "fakeRolePerm description"
+ register: roleperm
+ check_mode: yes
+- name: verify results of update role permission in check mode
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is changed
+ - roleperm.name == "fakeRolePerm"
+ - roleperm.permission == "allow"
+ - roleperm.description == "fakeRolePerm description"
+
+- name: test update role permission
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ permission: deny
+ description: "fakeRolePerm description"
+ register: roleperm
+- name: verify results of update role permission
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is changed
+ - roleperm.name == "fakeRolePerm"
+ - roleperm.permission == "deny"
+ - roleperm.description == "fakeRolePerm description"
+
+- name: test update role permission idempotency
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ permission: deny
+ description: "fakeRolePerm description"
+ register: roleperm
+- name: verify results of update role permission idempotency
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is not changed
+ - roleperm.name == "fakeRolePerm"
+ - roleperm.permission == "deny"
+ - roleperm.description == "fakeRolePerm description"
+
+- name: test create a second role permission
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm2"
+ permission: allow
+ register: roleperm2
+- name: verify results of create a second role permission
+ assert:
+ that:
+ - roleperm2 is successful
+ - roleperm2 is changed
+ - roleperm2.name == "fakeRolePerm2"
+
+- name: test update rules order in check_mode
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ parent: "{{ roleperm2.id }}"
+ register: roleperm
+ check_mode: yes
+- name: verify results of update rule order check mode
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is changed
+ - roleperm.name == "fakeRolePerm"
+
+- name: test update rules order
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ parent: "{{ roleperm2.id }}"
+ register: roleperm
+- name: verify results of update rule order
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is changed
+ - roleperm.name == "fakeRolePerm"
+
+- name: test update rules order to the top of the list
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ parent: 0
+ register: roleperm
+- name: verify results of update rule order to the top of the list
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is changed
+ - roleperm.name == "fakeRolePerm"
+
+- name: test update rules order with parent NAME
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ parent: "{{ roleperm2.name }}"
+ register: roleperm
+- name: verify results of update rule order with parent NAME
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is changed
+ - roleperm.name == "fakeRolePerm"
+
+- name: test fail if permission AND parent args are present
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ permission: allow
+ parent: 0
+ register: roleperm
+ ignore_errors: true
+- name: verify results of fail if permission AND parent args are present
+ assert:
+ that:
+ - roleperm is failed
+ - 'roleperm.msg == "parameters are mutually exclusive: permission|parent"'
+
+- name: test fail if parent does not exist
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ parent: "badParent"
+ register: roleperm
+ ignore_errors: true
+- name: verify results of fail if parent does not exist
+ assert:
+ that:
+ - roleperm is failed
+ - roleperm.msg == "Parent rule 'badParent' not found"
+
+- name: test remove role permission in check_mode
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ state: absent
+ register: roleperm
+ check_mode: yes
+- name: verify results of rename role permission in check_mode
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is changed
+
+- name: test remove role permission
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm"
+ state: absent
+ register: roleperm
+- name: verify results of remove role permission
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is changed
+
+- name: remove second role permission
+ cs_role_permission:
+ role: "{{ testRole.id }}"
+ name: "fakeRolePerm2"
+ state: absent
+ register: roleperm
+- name: verify results of remove second role permission
+ assert:
+ that:
+ - roleperm is successful
+ - roleperm is changed
diff --git a/test/integration/targets/incidental_cs_service_offering/aliases b/test/integration/targets/incidental_cs_service_offering/aliases
new file mode 100644
index 0000000000..e50e650e98
--- /dev/null
+++ b/test/integration/targets/incidental_cs_service_offering/aliases
@@ -0,0 +1,2 @@
+cloud/cs
+shippable/cs/incidental
diff --git a/test/integration/targets/incidental_cs_service_offering/meta/main.yml b/test/integration/targets/incidental_cs_service_offering/meta/main.yml
new file mode 100644
index 0000000000..d46613c55f
--- /dev/null
+++ b/test/integration/targets/incidental_cs_service_offering/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - incidental_cs_common
diff --git a/test/integration/targets/incidental_cs_service_offering/tasks/guest_vm_service_offering.yml b/test/integration/targets/incidental_cs_service_offering/tasks/guest_vm_service_offering.yml
new file mode 100644
index 0000000000..f7aee3c8a2
--- /dev/null
+++ b/test/integration/targets/incidental_cs_service_offering/tasks/guest_vm_service_offering.yml
@@ -0,0 +1,223 @@
+---
+- name: setup service offering
+ cs_service_offering:
+ name: Micro
+ state: absent
+ register: so
+- name: verify setup service offering
+ assert:
+ that:
+ - so is successful
+
+- name: create service offering in check mode
+ cs_service_offering:
+ name: Micro
+ display_text: Micro 512mb 1cpu
+ cpu_number: 1
+ cpu_speed: 2198
+ memory: 512
+ host_tags: eco
+ storage_tags:
+ - eco
+ - backup
+ storage_type: local
+ register: so
+ check_mode: true
+- name: verify create service offering in check mode
+ assert:
+ that:
+ - so is changed
+
+- name: create service offering
+ cs_service_offering:
+ name: Micro
+ display_text: Micro 512mb 1cpu
+ cpu_number: 1
+ cpu_speed: 2198
+ memory: 512
+ host_tags: eco
+ storage_tags:
+ - eco
+ - backup
+ storage_type: local
+ register: so
+- name: verify create service offering
+ assert:
+ that:
+ - so is changed
+ - so.name == "Micro"
+ - so.display_text == "Micro 512mb 1cpu"
+ - so.cpu_number == 1
+ - so.cpu_speed == 2198
+ - so.memory == 512
+ - so.host_tags == ['eco']
+ - so.storage_tags == ['eco', 'backup']
+ - so.storage_type == "local"
+
+- name: create service offering idempotence
+ cs_service_offering:
+ name: Micro
+ display_text: Micro 512mb 1cpu
+ cpu_number: 1
+ cpu_speed: 2198
+ memory: 512
+ host_tags: eco
+ storage_tags:
+ - eco
+ - backup
+ storage_type: local
+ register: so
+- name: verify create service offering idempotence
+ assert:
+ that:
+ - so is not changed
+ - so.name == "Micro"
+ - so.display_text == "Micro 512mb 1cpu"
+ - so.cpu_number == 1
+ - so.cpu_speed == 2198
+ - so.memory == 512
+ - so.host_tags == ['eco']
+ - so.storage_tags == ['eco', 'backup']
+ - so.storage_type == "local"
+
+- name: update service offering in check mode
+ cs_service_offering:
+ name: Micro
+ display_text: Micro RAM 512MB 1vCPU
+ register: so
+ check_mode: true
+- name: verify create update offering in check mode
+ assert:
+ that:
+ - so is changed
+ - so.name == "Micro"
+ - so.display_text == "Micro 512mb 1cpu"
+ - so.cpu_number == 1
+ - so.cpu_speed == 2198
+ - so.memory == 512
+ - so.host_tags == ['eco']
+ - so.storage_tags == ['eco', 'backup']
+ - so.storage_type == "local"
+
+- name: update service offering
+ cs_service_offering:
+ name: Micro
+ display_text: Micro RAM 512MB 1vCPU
+ register: so
+- name: verify update service offerin
+ assert:
+ that:
+ - so is changed
+ - so.name == "Micro"
+ - so.display_text == "Micro RAM 512MB 1vCPU"
+ - so.cpu_number == 1
+ - so.cpu_speed == 2198
+ - so.memory == 512
+ - so.host_tags == ['eco']
+ - so.storage_tags == ['eco', 'backup']
+ - so.storage_type == "local"
+
+- name: update service offering idempotence
+ cs_service_offering:
+ name: Micro
+ display_text: Micro RAM 512MB 1vCPU
+ register: so
+- name: verify update service offering idempotence
+ assert:
+ that:
+ - so is not changed
+ - so.name == "Micro"
+ - so.display_text == "Micro RAM 512MB 1vCPU"
+ - so.cpu_number == 1
+ - so.cpu_speed == 2198
+ - so.memory == 512
+ - so.host_tags == ['eco']
+ - so.storage_tags == ['eco', 'backup']
+ - so.storage_type == "local"
+
+- name: remove service offering in check mode
+ cs_service_offering:
+ name: Micro
+ state: absent
+ check_mode: true
+ register: so
+- name: verify remove service offering in check mode
+ assert:
+ that:
+ - so is changed
+ - so.name == "Micro"
+ - so.display_text == "Micro RAM 512MB 1vCPU"
+ - so.cpu_number == 1
+ - so.cpu_speed == 2198
+ - so.memory == 512
+ - so.host_tags == ['eco']
+ - so.storage_tags == ['eco', 'backup']
+ - so.storage_type == "local"
+
+- name: remove service offering
+ cs_service_offering:
+ name: Micro
+ state: absent
+ register: so
+- name: verify remove service offering
+ assert:
+ that:
+ - so is changed
+ - so.name == "Micro"
+ - so.display_text == "Micro RAM 512MB 1vCPU"
+ - so.cpu_number == 1
+ - so.cpu_speed == 2198
+ - so.memory == 512
+ - so.host_tags == ['eco']
+ - so.storage_tags == ['eco', 'backup']
+ - so.storage_type == "local"
+
+- name: remove service offering idempotence
+ cs_service_offering:
+ name: Micro
+ state: absent
+ register: so
+- name: verify remove service offering idempotence
+ assert:
+ that:
+ - so is not changed
+
+- name: create custom service offering
+ cs_service_offering:
+ name: custom
+ display_text: custom offer
+ is_customized: yes
+ host_tags: eco
+ storage_tags:
+ - eco
+ - backup
+ storage_type: local
+ register: so
+- name: verify create custom service offering
+ assert:
+ that:
+ - so is changed
+ - so.name == "custom"
+ - so.display_text == "custom offer"
+ - so.is_customized == True
+ - so.cpu_number is not defined
+ - so.cpu_speed is not defined
+ - so.memory is not defined
+ - so.host_tags == ['eco']
+ - so.storage_tags == ['eco', 'backup']
+ - so.storage_type == "local"
+
+- name: remove custom service offering
+ cs_service_offering:
+ name: custom
+ state: absent
+ register: so
+- name: verify remove service offering
+ assert:
+ that:
+ - so is changed
+ - so.name == "custom"
+ - so.display_text == "custom offer"
+ - so.host_tags == ['eco']
+ - so.storage_tags == ['eco', 'backup']
+ - so.storage_type == "local"
diff --git a/test/integration/targets/incidental_cs_service_offering/tasks/main.yml b/test/integration/targets/incidental_cs_service_offering/tasks/main.yml
new file mode 100644
index 0000000000..581f7d74de
--- /dev/null
+++ b/test/integration/targets/incidental_cs_service_offering/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- import_tasks: guest_vm_service_offering.yml
+- import_tasks: system_vm_service_offering.yml \ No newline at end of file
diff --git a/test/integration/targets/incidental_cs_service_offering/tasks/system_vm_service_offering.yml b/test/integration/targets/incidental_cs_service_offering/tasks/system_vm_service_offering.yml
new file mode 100644
index 0000000000..4c63a4b9c8
--- /dev/null
+++ b/test/integration/targets/incidental_cs_service_offering/tasks/system_vm_service_offering.yml
@@ -0,0 +1,151 @@
+---
+- name: setup system offering
+ cs_service_offering:
+ name: System Offering for Ansible
+ is_system: true
+ state: absent
+ register: so
+- name: verify setup system offering
+ assert:
+ that:
+ - so is successful
+
+- name: fail missing storage type and is_system
+ cs_service_offering:
+ name: System Offering for Ansible
+ cpu_number: 1
+ cpu_speed: 500
+ memory: 512
+ host_tag: perf
+ storage_tag: perf
+ storage_type: shared
+ offer_ha: true
+ limit_cpu_usage: false
+ is_system: true
+ register: so
+ ignore_errors: true
+- name: verify create system service offering in check mode
+ assert:
+ that:
+ - so is failed
+ - so.msg.startswith('missing required arguments:')
+
+- name: create system service offering in check mode
+ cs_service_offering:
+ name: System Offering for Ansible
+ cpu_number: 1
+ cpu_speed: 500
+ memory: 512
+ host_tag: perf
+ storage_tag: perf
+ storage_type: shared
+ offer_ha: true
+ limit_cpu_usage: false
+ system_vm_type: domainrouter
+ is_system: true
+ register: so
+ check_mode: true
+- name: verify create system service offering in check mode
+ assert:
+ that:
+ - so is changed
+
+- name: create system service offering
+ cs_service_offering:
+ name: System Offering for Ansible
+ cpu_number: 1
+ cpu_speed: 500
+ memory: 512
+ host_tag: perf
+ storage_tag: perf
+ storage_type: shared
+ offer_ha: true
+ limit_cpu_usage: false
+ system_vm_type: domainrouter
+ is_system: true
+ register: so
+- name: verify create system service offering
+ assert:
+ that:
+ - so is changed
+ - so.name == "System Offering for Ansible"
+ - so.display_text == "System Offering for Ansible"
+ - so.cpu_number == 1
+ - so.cpu_speed == 500
+ - so.memory == 512
+ - so.host_tags == ['perf']
+ - so.storage_tags == ['perf']
+ - so.storage_type == "shared"
+ - so.offer_ha == true
+ - so.limit_cpu_usage == false
+ - so.system_vm_type == "domainrouter"
+ - so.is_system == true
+
+- name: create system service offering idempotence
+ cs_service_offering:
+ name: System Offering for Ansible
+ cpu_number: 1
+ cpu_speed: 500
+ memory: 512
+ host_tag: perf
+ storage_tag: perf
+ storage_type: shared
+ offer_ha: true
+ limit_cpu_usage: false
+ system_vm_type: domainrouter
+ is_system: true
+ register: so
+- name: verify create system service offering idempotence
+ assert:
+ that:
+ - so is not changed
+ - so.name == "System Offering for Ansible"
+ - so.display_text == "System Offering for Ansible"
+ - so.cpu_number == 1
+ - so.cpu_speed == 500
+ - so.memory == 512
+ - so.host_tags == ['perf']
+ - so.storage_tags == ['perf']
+ - so.storage_type == "shared"
+ - so.offer_ha == true
+ - so.limit_cpu_usage == false
+ - so.system_vm_type == "domainrouter"
+ - so.is_system == true
+
+- name: remove system service offering in check mode
+ cs_service_offering:
+ name: System Offering for Ansible
+ is_system: true
+ state: absent
+ check_mode: true
+ register: so
+- name: verify remove system service offering in check mode
+ assert:
+ that:
+ - so is changed
+ - so.name == "System Offering for Ansible"
+ - so.is_system == true
+
+- name: remove system service offering
+ cs_service_offering:
+ name: System Offering for Ansible
+ is_system: true
+ state: absent
+ register: so
+- name: verify remove system service offering
+ assert:
+ that:
+ - so is changed
+ - so.name == "System Offering for Ansible"
+ - so.is_system == true
+
+- name: remove system service offering idempotence
+ cs_service_offering:
+ name: System Offering for Ansible
+ is_system: true
+ state: absent
+ register: so
+- name: verify remove system service offering idempotence
+ assert:
+ that:
+ - so is not changed
diff --git a/test/integration/targets/incidental_ec2_instance/aliases b/test/integration/targets/incidental_ec2_instance/aliases
new file mode 100644
index 0000000000..29f60feb44
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/incidental
diff --git a/test/integration/targets/incidental_ec2_instance/inventory b/test/integration/targets/incidental_ec2_instance/inventory
new file mode 100644
index 0000000000..44b46ec88f
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/inventory
@@ -0,0 +1,17 @@
+[tests]
+# Sorted fastest to slowest
+version_fail_wrapper
+ebs_optimized
+block_devices
+cpu_options
+default_vpc_tests
+external_resource_attach
+instance_no_wait
+iam_instance_role
+termination_protection
+tags_and_vpc_settings
+checkmode_tests
+
+[all:vars]
+ansible_connection=local
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/incidental_ec2_instance/main.yml b/test/integration/targets/incidental_ec2_instance/main.yml
new file mode 100644
index 0000000000..7695f7bcb9
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/main.yml
@@ -0,0 +1,43 @@
+---
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
+
+
+# Prepare the VPC and figure out which AMI to use
+- hosts: all
+ gather_facts: no
+ tasks:
+ - module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ vars:
+ # We can't just use "run_once" because the facts don't propagate when
+ # running an 'include' that was run_once
+ setup_run_once: yes
+ block:
+ - include_role:
+ name: 'ec2_instance'
+ tasks_from: find_ami.yml
+ - include_role:
+ name: 'ec2_instance'
+ tasks_from: env_setup.yml
+ rescue:
+ - include_role:
+ name: 'ec2_instance'
+ tasks_from: env_cleanup.yml
+ run_once: yes
+ - fail:
+ msg: 'Environment preparation failed'
+ run_once: yes
+
+# VPC should get cleaned up once all hosts have run
+- hosts: all
+ gather_facts: no
+ strategy: free
+ #serial: 10
+ roles:
+ - ec2_instance
diff --git a/test/integration/targets/incidental_ec2_instance/meta/main.yml b/test/integration/targets/incidental_ec2_instance/meta/main.yml
new file mode 100644
index 0000000000..aadd21abb9
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - incidental_setup_ec2
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/defaults/main.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/defaults/main.yml
new file mode 100644
index 0000000000..8e70ab6933
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+# defaults file for ec2_instance
+ec2_instance_owner: 'integration-run-{{ resource_prefix }}'
+ec2_instance_type: 't3.micro'
+ec2_instance_tag_TestId: '{{ resource_prefix }}-{{ inventory_hostname }}'
+ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'
+
+vpc_name: '{{ resource_prefix }}-vpc'
+vpc_seed: '{{ resource_prefix }}'
+vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
+subnet_a_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
+subnet_a_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.32.'
+subnet_b_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.33.0/24'
+subnet_b_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.33.'
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/files/assume-role-policy.json b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/files/assume-role-policy.json
new file mode 100644
index 0000000000..72413abdd3
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/files/assume-role-policy.json
@@ -0,0 +1,13 @@
+{
+ "Version": "2008-10-17",
+ "Statement": [
+ {
+ "Sid": "",
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/meta/main.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/meta/main.yml
new file mode 100644
index 0000000000..aa8ab19226
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_setup_ec2
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/block_devices.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/block_devices.yml
new file mode 100644
index 0000000000..0a8ab63f08
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/block_devices.yml
@@ -0,0 +1,82 @@
+- block:
+ - name: "New instance with an extra block device"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-ebs-vols"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ volumes:
+ - device_name: /dev/sdb
+ ebs:
+ volume_size: 20
+ delete_on_termination: true
+ volume_type: standard
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: true
+ register: block_device_instances
+
+ - name: "Gather instance info"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-ebs-vols"
+ register: block_device_instances_info
+
+ - assert:
+ that:
+ - block_device_instances is not failed
+ - block_device_instances is changed
+ - block_device_instances_info.instances[0].block_device_mappings[0]
+ - block_device_instances_info.instances[0].block_device_mappings[1]
+ - block_device_instances_info.instances[0].block_device_mappings[1].device_name == '/dev/sdb'
+
+ - name: "New instance with an extra block device (check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-ebs-vols-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ volumes:
+ - device_name: /dev/sdb
+ ebs:
+ volume_size: 20
+ delete_on_termination: true
+ volume_type: standard
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-ebs-vols"
+ "instance-state-name": "running"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-ebs-vols-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Terminate instances"
+ ec2_instance:
+ state: absent
+ instance_ids: "{{ block_device_instances.instance_ids }}"
+
+ always:
+ - name: "Terminate block_devices instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/checkmode_tests.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/checkmode_tests.yml
new file mode 100644
index 0000000000..b161eca636
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/checkmode_tests.yml
@@ -0,0 +1,172 @@
+- block:
+ - name: "Make basic instance"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ image_id: "{{ ec2_ami_image }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ wait: false
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: basic_instance
+
+ - name: "Make basic instance (check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-checkmode-comparison-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Stop instance (check mode)"
+ ec2_instance:
+ state: stopped
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ check_mode: yes
+
+ - name: "fact ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_checkmode_stopinstance_fact
+
+ - name: "Verify that it was not stopped."
+ assert:
+ that:
+ - '"{{ confirm_checkmode_stopinstance_fact.instances[0].state.name }}" != "stopped"'
+
+ - name: "Stop instance."
+ ec2_instance:
+ state: stopped
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: instance_stop
+ until: not instance_stop.failed
+ retries: 10
+
+ - name: "fact stopped ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_stopinstance_fact
+
+ - name: "Verify that it was stopped."
+ assert:
+ that:
+ - '"{{ confirm_stopinstance_fact.instances[0].state.name }}" in ["stopped", "stopping"]'
+
+ - name: "Running instance in check mode."
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ check_mode: yes
+
+ - name: "fact ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_checkmode_runninginstance_fact
+
+ - name: "Verify that it was not running."
+ assert:
+ that:
+ - '"{{ confirm_checkmode_runninginstance_fact.instances[0].state.name }}" != "running"'
+
+ - name: "Running instance."
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+
+ - name: "fact ec2 instance."
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_runninginstance_fact
+
+ - name: "Verify that it was running."
+ assert:
+ that:
+ - '"{{ confirm_runninginstance_fact.instances[0].state.name }}" == "running"'
+
+ - name: "Terminate instance in check mode."
+ ec2_instance:
+ state: absent
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ check_mode: yes
+
+ - name: "fact ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_checkmode_terminatedinstance_fact
+
+ - name: "Verify that it was not terminated,"
+ assert:
+ that:
+ - '"{{ confirm_checkmode_terminatedinstance_fact.instances[0].state.name }}" != "terminated"'
+
+ - name: "Terminate instance."
+ ec2_instance:
+ state: absent
+ name: "{{ resource_prefix }}-checkmode-comparison"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+
+ - name: "fact ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
+ register: confirm_terminatedinstance_fact
+
+ - name: "Verify that it was terminated,"
+ assert:
+ that:
+ - '"{{ confirm_terminatedinstance_fact.instances[0].state.name }}" == "terminated"'
+
+ always:
+ - name: "Terminate checkmode instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/cpu_options.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/cpu_options.yml
new file mode 100644
index 0000000000..947011f75e
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/cpu_options.yml
@@ -0,0 +1,86 @@
+- block:
+ - name: "create t3.nano instance with cpu_options"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+ wait: false
+ register: instance_creation
+
+ - name: "instance with cpu_options created with the right options"
+ assert:
+ that:
+ - instance_creation is success
+ - instance_creation is changed
+
+ - name: "modify cpu_options on existing instance (warning displayed)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ cpu_options:
+ core_count: 1
+ threads_per_core: 2
+ wait: false
+ register: cpu_options_update
+ ignore_errors: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
+ register: presented_instance_fact
+
+ - name: "modify cpu_options has no effect on existing instance"
+ assert:
+ that:
+ - cpu_options_update is success
+ - cpu_options_update is not changed
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "'{{ presented_instance_fact.instances.0.state.name }}' in ['running','pending']"
+ - "{{ presented_instance_fact.instances.0.cpu_options.core_count }} == 1"
+ - "{{ presented_instance_fact.instances.0.cpu_options.threads_per_core }} == 1"
+
+ - name: "create t3.nano instance with cpu_options(check mode)"
+ ec2_instance:
+ name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ instance_type: t3.nano
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+ check_mode: yes
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm existence of instance id."
+ assert:
+ that:
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ always:
+ - name: "Terminate cpu_options instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/default_vpc_tests.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/default_vpc_tests.yml
new file mode 100644
index 0000000000..a69dfe9f86
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/default_vpc_tests.yml
@@ -0,0 +1,57 @@
+- block:
+ - name: "Make instance in a default subnet of the VPC"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-default-vpc"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_group: "default"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: in_default_vpc
+
+ - name: "Make instance in a default subnet of the VPC(check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-default-vpc-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_group: "default"
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-default-vpc"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-default-vpc-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Terminate instances"
+ ec2_instance:
+ state: absent
+ instance_ids: "{{ in_default_vpc.instance_ids }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+
+ always:
+ - name: "Terminate vpc_tests instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/ebs_optimized.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/ebs_optimized.yml
new file mode 100644
index 0000000000..5bfdc086e7
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/ebs_optimized.yml
@@ -0,0 +1,41 @@
+- block:
+ - name: "Make EBS optimized instance in the testing subnet of the test VPC"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ ebs_optimized: true
+ instance_type: t3.nano
+ wait: false
+ register: ebs_opt_in_vpc
+
+ - name: "Get ec2 instance info"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc"
+ register: ebs_opt_instance_info
+
+ - name: "Assert instance is ebs_optimized"
+ assert:
+ that:
+ - "{{ ebs_opt_instance_info.instances.0.ebs_optimized }}"
+
+ - name: "Terminate instances"
+ ec2_instance:
+ state: absent
+ instance_ids: "{{ ebs_opt_in_vpc.instance_ids }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+
+ always:
+ - name: "Terminate ebs_optimzed instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/env_cleanup.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/env_cleanup.yml
new file mode 100644
index 0000000000..1b6c79e0d9
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/env_cleanup.yml
@@ -0,0 +1,93 @@
+- name: "remove Instances"
+ ec2_instance:
+ state: absent
+ filters:
+ vpc-id: "{{ testing_vpc.vpc.id }}"
+ wait: yes
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove ENIs"
+ ec2_eni_info:
+ filters:
+ vpc-id: "{{ testing_vpc.vpc.id }}"
+ register: enis
+
+- name: "delete all ENIs"
+ ec2_eni:
+ state: absent
+ eni_id: "{{ item.id }}"
+ until: removed is not failed
+ with_items: "{{ enis.network_interfaces }}"
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove the security group"
+ ec2_group:
+ state: absent
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove routing rules"
+ ec2_vpc_route_table:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ tags:
+ created: "{{ resource_prefix }}-route"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ subnets:
+ - "{{ testing_subnet_a.subnet.id }}"
+ - "{{ testing_subnet_b.subnet.id }}"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove internet gateway"
+ ec2_vpc_igw:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove subnet A"
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_a_cidr }}"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove subnet B"
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_b_cidr }}"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
+
+- name: "remove the VPC"
+ ec2_vpc_net:
+ state: absent
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: Ansible Testing VPC
+ tenancy: default
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/env_setup.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/env_setup.yml
new file mode 100644
index 0000000000..6c76b7bf79
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/env_setup.yml
@@ -0,0 +1,79 @@
+- run_once: '{{ setup_run_once | default("no") | bool }}'
+ block:
+ - name: "fetch AZ availability"
+ aws_az_info:
+ register: az_info
+ - name: "Assert that we have multiple AZs available to us"
+ assert:
+ that: az_info.availability_zones | length >= 2
+
+ - name: "pick AZs"
+ set_fact:
+ subnet_a_az: '{{ az_info.availability_zones[0].zone_name }}'
+ subnet_b_az: '{{ az_info.availability_zones[1].zone_name }}'
+
+ - name: "Create VPC for use in testing"
+ ec2_vpc_net:
+ state: present
+ name: "{{ vpc_name }}"
+ cidr_block: "{{ vpc_cidr }}"
+ tags:
+ Name: Ansible ec2_instance Testing VPC
+ tenancy: default
+ register: testing_vpc
+
+ - name: "Create internet gateway for use in testing"
+ ec2_vpc_igw:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ register: igw
+
+ - name: "Create default subnet in zone A"
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_a_cidr }}"
+ az: "{{ subnet_a_az }}"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet-a"
+ register: testing_subnet_a
+
+ - name: "Create secondary subnet in zone B"
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ cidr: "{{ subnet_b_cidr }}"
+ az: "{{ subnet_b_az }}"
+ resource_tags:
+ Name: "{{ resource_prefix }}-subnet-b"
+ register: testing_subnet_b
+
+ - name: "create routing rules"
+ ec2_vpc_route_table:
+ state: present
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ tags:
+ created: "{{ resource_prefix }}-route"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ subnets:
+ - "{{ testing_subnet_a.subnet.id }}"
+ - "{{ testing_subnet_b.subnet.id }}"
+
+ - name: "create a security group with the vpc"
+ ec2_group:
+ state: present
+ name: "{{ resource_prefix }}-sg"
+ description: a security group for ansible tests
+ vpc_id: "{{ testing_vpc.vpc.id }}"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ register: sg
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/external_resource_attach.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/external_resource_attach.yml
new file mode 100644
index 0000000000..2625977f41
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/external_resource_attach.yml
@@ -0,0 +1,129 @@
+- block:
+ # Make custom ENIs and attach via the `network` parameter
+ - ec2_eni:
+ state: present
+ delete_on_termination: true
+ subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ security_groups:
+ - "{{ sg.group_id }}"
+ register: eni_a
+
+ - ec2_eni:
+ state: present
+ delete_on_termination: true
+ subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ security_groups:
+ - "{{ sg.group_id }}"
+ register: eni_b
+
+ - ec2_eni:
+ state: present
+ delete_on_termination: true
+ subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ security_groups:
+ - "{{ sg.group_id }}"
+ register: eni_c
+
+ - ec2_key:
+ name: "{{ resource_prefix }}_test_key"
+
+ - name: "Make instance in the testing subnet created in the test VPC"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-eni-vpc"
+ key_name: "{{ resource_prefix }}_test_key"
+ network:
+ interfaces:
+ - id: "{{ eni_a.interface.id }}"
+ image_id: "{{ ec2_ami_image }}"
+ availability_zone: '{{ subnet_b_az }}'
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: in_test_vpc
+
+ - name: "Gather {{ resource_prefix }}-test-eni-vpc info"
+ ec2_instance_info:
+ filters:
+ "tag:Name": '{{ resource_prefix }}-test-eni-vpc'
+ register: in_test_vpc_instance
+
+ - assert:
+ that:
+ - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"'
+ - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1'
+
+ - name: "Add a second interface"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-eni-vpc"
+ network:
+ interfaces:
+ - id: "{{ eni_a.interface.id }}"
+ - id: "{{ eni_b.interface.id }}"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: add_interface
+ until: add_interface is not failed
+ ignore_errors: yes
+ retries: 10
+
+ - name: "Make instance in the testing subnet created in the test VPC(check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-eni-vpc-checkmode"
+ key_name: "{{ resource_prefix }}_test_key"
+ network:
+ interfaces:
+ - id: "{{ eni_c.interface.id }}"
+ image_id: "{{ ec2_ami_image }}"
+ availability_zone: '{{ subnet_b_az }}'
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-eni-vpc"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-eni-vpc-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm existence of instance id."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ always:
+ - name: "Terminate external_resource_attach instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
+
+ - ec2_key:
+ state: absent
+ name: "{{ resource_prefix }}_test_key"
+ ignore_errors: yes
+
+ - ec2_eni:
+ state: absent
+ eni_id: '{{ item.interface.id }}'
+ ignore_errors: yes
+ with_items:
+ - '{{ eni_a }}'
+ - '{{ eni_b }}'
+ - '{{ eni_c }}'
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/find_ami.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/find_ami.yml
new file mode 100644
index 0000000000..5c0e61f84c
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/find_ami.yml
@@ -0,0 +1,15 @@
+- run_once: '{{ setup_run_once | default("no") | bool }}'
+ block:
+ - name: "Find AMI to use"
+ run_once: yes
+ ec2_ami_info:
+ owners: 'amazon'
+ filters:
+ name: '{{ ec2_ami_name }}'
+ register: ec2_amis
+ - name: "Set fact with latest AMI"
+ run_once: yes
+ vars:
+ latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
+ set_fact:
+ ec2_ami_image: '{{ latest_ami.image_id }}'
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/iam_instance_role.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/iam_instance_role.yml
new file mode 100644
index 0000000000..6e29b74674
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/iam_instance_role.yml
@@ -0,0 +1,127 @@
+- block:
+ - name: "Create IAM role for test"
+ iam_role:
+ state: present
+ name: "ansible-test-sts-{{ resource_prefix }}-test-policy"
+ assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
+ create_instance_profile: yes
+ managed_policy:
+ - AmazonEC2ContainerServiceRole
+ register: iam_role
+
+ - name: "Create second IAM role for test"
+ iam_role:
+ state: present
+ name: "ansible-test-sts-{{ resource_prefix }}-test-policy-2"
+ assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
+ create_instance_profile: yes
+ managed_policy:
+ - AmazonEC2ContainerServiceRole
+ register: iam_role_2
+
+ - name: "wait 10 seconds for roles to become available"
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+
+ - name: "Make instance with an instance_role"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-instance-role"
+ image_id: "{{ ec2_ami_image }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ instance_role: "ansible-test-sts-{{ resource_prefix }}-test-policy"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: instance_with_role
+
+ - assert:
+ that:
+ - 'instance_with_role.instances[0].iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
+
+ - name: "Make instance with an instance_role(check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-instance-role-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ instance_role: "{{ iam_role.arn.replace(':role/', ':instance-profile/') }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-instance-role"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-instance-role-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Update instance with new instance_role"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-instance-role"
+ image_id: "{{ ec2_ami_image }}"
+ security_groups: "{{ sg.group_id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ instance_role: "{{ iam_role_2.arn.replace(':role/', ':instance-profile/') }}"
+ vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: instance_with_updated_role
+
+ - name: "wait 10 seconds for role update to complete"
+ wait_for:
+ timeout: 10
+ delegate_to: localhost
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-instance-role"
+ register: updates_instance_info
+
+ - assert:
+ that:
+ - 'updates_instance_info.instances[0].iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
+ - 'updates_instance_info.instances[0].instance_id == instance_with_role.instances[0].instance_id'
+
+ always:
+ - name: "Terminate iam_instance_role instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
+
+ - name: "Delete IAM role for test"
+ iam_role:
+ state: absent
+ name: "{{ item }}"
+ assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
+ create_instance_profile: yes
+ managed_policy:
+ - AmazonEC2ContainerServiceRole
+ loop:
+ - "ansible-test-sts-{{ resource_prefix }}-test-policy"
+ - "ansible-test-sts-{{ resource_prefix }}-test-policy-2"
+ register: removed
+ until: removed is not failed
+ ignore_errors: yes
+ retries: 10
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/instance_no_wait.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/instance_no_wait.yml
new file mode 100644
index 0000000000..418d7ef3e8
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/instance_no_wait.yml
@@ -0,0 +1,68 @@
+- block:
+ - name: "New instance and don't wait for it to complete"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-no-wait"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: false
+ instance_type: "{{ ec2_instance_type }}"
+ register: in_test_vpc
+
+ - assert:
+ that:
+ - in_test_vpc is not failed
+ - in_test_vpc is changed
+ - in_test_vpc.instances is not defined
+ - in_test_vpc.instance_ids is defined
+ - in_test_vpc.instance_ids | length > 0
+
+ - name: "New instance and don't wait for it to complete ( check mode )"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-no-wait-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ wait: false
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "Facts for ec2 test instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-no-wait"
+ register: real_instance_fact
+ until: real_instance_fact.instances | length > 0
+ retries: 10
+
+ - name: "Facts for checkmode ec2 test instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-no-wait-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ real_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Terminate instances"
+ ec2_instance:
+ state: absent
+ instance_ids: "{{ in_test_vpc.instance_ids }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+
+ always:
+ - name: "Terminate instance_no_wait instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/main.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/main.yml
new file mode 100644
index 0000000000..e10aebcefe
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/main.yml
@@ -0,0 +1,48 @@
+---
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
+#
+# Please make sure you tag your instances with
+# tags:
+# "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+# And delete them based off that tag at the end of your specific set of tests
+#
+# ###############################################################################
+#
+# A Note about ec2 environment variable name preference:
+# - EC2_URL -> AWS_URL
+# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
+# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
+# - EC2_REGION -> AWS_REGION
+#
+
+- name: "Wrap up all tests and setup AWS credentials"
+ module_defaults:
+ group/aws:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token | default(omit) }}"
+ region: "{{ aws_region }}"
+ block:
+ - debug:
+ msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}"
+ - include_tasks: '{{ inventory_hostname }}.yml'
+ - debug:
+ msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}"
+
+ always:
+ - set_fact:
+ _role_complete: True
+ - vars:
+ completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") | list | select("defined") | list | length }}'
+ hosts_in_play: '{{ ansible_play_hosts_all | length }}'
+ debug:
+ msg: "{{ completed_hosts }} of {{ hosts_in_play }} complete"
+ - include_tasks: env_cleanup.yml
+ vars:
+ completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") | list | select("defined") | list | length }}'
+ hosts_in_play: '{{ ansible_play_hosts_all | length }}'
+ when:
+ - aws_cleanup
+ - completed_hosts == hosts_in_play
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/tags_and_vpc_settings.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/tags_and_vpc_settings.yml
new file mode 100644
index 0000000000..d38b53f76f
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/tags_and_vpc_settings.yml
@@ -0,0 +1,158 @@
+- block:
+ - name: "Make instance in the testing subnet created in the test VPC"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_image }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Something: else
+ security_groups: "{{ sg.group_id }}"
+ network:
+ source_dest_check: false
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ register: in_test_vpc
+
+ - name: "Make instance in the testing subnet created in the test VPC(check mode)"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Something: else
+ security_groups: "{{ sg.group_id }}"
+ network:
+ source_dest_check: false
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "Try to re-make the instance, hopefully this shows changed=False"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_image }}"
+ user_data: |
+ #cloud-config
+ package_upgrade: true
+ package_update: true
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Something: else
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ register: remake_in_test_vpc
+ - name: "Remaking the same instance resulted in no changes"
+ assert:
+ that: not remake_in_test_vpc.changed
+ - name: "check that instance IDs match anyway"
+ assert:
+ that: 'remake_in_test_vpc.instance_ids[0] == in_test_vpc.instance_ids[0]'
+ - name: "check that source_dest_check was set to false"
+ assert:
+ that: 'not remake_in_test_vpc.instances[0].source_dest_check'
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-basic-vpc-create"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-basic-vpc-create-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Alter it by adding tags"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Another: thing
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ register: add_another_tag
+
+ - ec2_instance_info:
+ instance_ids: "{{ add_another_tag.instance_ids }}"
+ register: check_tags
+ - name: "Remaking the same instance resulted in no changes"
+ assert:
+ that:
+ - check_tags.instances[0].tags.Another == 'thing'
+ - check_tags.instances[0].tags.Something == 'else'
+
+ - name: "Purge a tag"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-basic-vpc-create"
+ image_id: "{{ ec2_ami_image }}"
+ purge_tags: true
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ Another: thing
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+
+ - ec2_instance_info:
+ instance_ids: "{{ add_another_tag.instance_ids }}"
+ register: check_tags
+
+ - name: "Remaking the same instance resulted in no changes"
+ assert:
+ that:
+ - "'Something' not in check_tags.instances[0].tags"
+
+ - name: "check that subnet-default public IP rule was followed"
+ assert:
+ that:
+ - check_tags.instances[0].public_dns_name == ""
+ - check_tags.instances[0].private_ip_address.startswith(subnet_b_startswith)
+ - check_tags.instances[0].subnet_id == testing_subnet_b.subnet.id
+ - name: "check that tags were applied"
+ assert:
+ that:
+ - check_tags.instances[0].tags.Name.startswith(resource_prefix)
+ - "'{{ check_tags.instances[0].state.name }}' in ['pending', 'running']"
+
+ - name: "Terminate instance"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: false
+ register: result
+ - assert:
+ that: result.changed
+
+ always:
+ - name: "Terminate tags_and_vpc_settings instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/termination_protection.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/termination_protection.yml
new file mode 100644
index 0000000000..e2d3728f48
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/termination_protection.yml
@@ -0,0 +1,101 @@
+- block:
+ - name: "Make termination-protected instance in the testing subnet created in the test VPC"
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-test-protected-instance-in-vpc"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ wait: yes
+ register: in_test_vpc
+
+ - name: "Make termination-protected instance in the testing subnet created in the test VPC(check mode)"
+ ec2_instance:
+ state: running
+ name: "{{ resource_prefix }}-test-protected-instance-in-vpc-checkmode"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ check_mode: yes
+
+ - name: "fact presented ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-protected-instance-in-vpc"
+ "instance-state-name": "running"
+ register: presented_instance_fact
+
+ - name: "fact checkmode ec2 instance"
+ ec2_instance_info:
+ filters:
+ "tag:Name": "{{ resource_prefix }}-test-protected-instance-in-vpc-checkmode"
+ register: checkmode_instance_fact
+
+ - name: "Confirm whether the check mode is working normally."
+ assert:
+ that:
+ - "{{ presented_instance_fact.instances | length }} > 0"
+ - "'{{ presented_instance_fact.instances.0.state.name }}' in ['running', 'pending']"
+ - "{{ checkmode_instance_fact.instances | length }} == 0"
+
+ - name: "Try to terminate the instance"
+ ec2_instance:
+ state: absent
+ name: "{{ resource_prefix }}-test-protected-instance-in-vpc"
+ image_id: "{{ ec2_ami_image }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ security_groups: "{{ sg.group_id }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: true
+ instance_type: "{{ ec2_instance_type }}"
+ register: bad_terminate
+ ignore_errors: yes
+
+ - name: "Cannot terminate protected instance"
+ assert:
+ that:
+ - bad_terminate is failed
+
+ - name: "Alter termination protection setting"
+ ec2_instance:
+ state: present
+ name: "{{ resource_prefix }}-test-protected-instance-in-vpc"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ termination_protection: false
+ instance_type: "{{ ec2_instance_type }}"
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+
+ - name: "Try to terminate the instance again (should work)"
+ ec2_instance:
+ state: absent
+ name: "{{ resource_prefix }}-test-protected-instance-in-vpc"
+ image_id: "{{ ec2_ami_image }}"
+ vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
+ instance_type: "{{ ec2_instance_type }}"
+ wait: false
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: terminate_results
+
+ - assert:
+ that: terminate_results is not failed
+
+ always:
+ - name: "Terminate termination_protection instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/version_fail.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/version_fail.yml
new file mode 100644
index 0000000000..67370ebe37
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/version_fail.yml
@@ -0,0 +1,29 @@
+- block:
+ - name: "create t3.nano with cpu options (fails gracefully)"
+ ec2_instance:
+ state: present
+ name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-ec2"
+ image_id: "{{ ec2_ami_image }}"
+ instance_type: "t3.nano"
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+ tags:
+ TestId: "{{ ec2_instance_tag_TestId }}"
+ register: ec2_instance_cpu_options_creation
+ ignore_errors: yes
+
+ - name: "check that graceful error message is returned when creation with cpu_options and old botocore"
+ assert:
+ that:
+ - ec2_instance_cpu_options_creation.failed
+ - 'ec2_instance_cpu_options_creation.msg == "cpu_options is only supported with botocore >= 1.10.16"'
+
+ always:
+ - name: "Terminate version_fail instances"
+ ec2_instance:
+ state: absent
+ filters:
+ "tag:TestId": "{{ ec2_instance_tag_TestId }}"
+ wait: yes
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/version_fail_wrapper.yml b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/version_fail_wrapper.yml
new file mode 100644
index 0000000000..ae5bd78500
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/roles/ec2_instance/tasks/version_fail_wrapper.yml
@@ -0,0 +1,30 @@
+---
+- include_role:
+ name: 'setup_remote_tmp_dir'
+
+- set_fact:
+ virtualenv: "{{ remote_tmp_dir }}/virtualenv"
+ virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
+
+- set_fact:
+ virtualenv_interpreter: "{{ virtualenv }}/bin/python"
+
+- pip:
+ name: "virtualenv"
+
+- pip:
+ name:
+ - 'botocore<1.10.16'
+ - boto3
+ - coverage
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: version_fail.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+- file:
+ state: absent
+ path: "{{ virtualenv }}"
diff --git a/test/integration/targets/incidental_ec2_instance/runme.sh b/test/integration/targets/incidental_ec2_instance/runme.sh
new file mode 100755
index 0000000000..aa324772bb
--- /dev/null
+++ b/test/integration/targets/incidental_ec2_instance/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+#
+# Beware: most of our tests here are run in parallel.
+# To add new tests you'll need to add a new host to the inventory and a matching
+# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
+
+
+set -eux
+
+export ANSIBLE_ROLES_PATH=../
+
+ansible-playbook main.yml -i inventory "$@"
diff --git a/test/integration/targets/incidental_hcloud_server/aliases b/test/integration/targets/incidental_hcloud_server/aliases
new file mode 100644
index 0000000000..6c43c27cf9
--- /dev/null
+++ b/test/integration/targets/incidental_hcloud_server/aliases
@@ -0,0 +1,2 @@
+cloud/hcloud
+shippable/hcloud/incidental
diff --git a/test/integration/targets/incidental_hcloud_server/defaults/main.yml b/test/integration/targets/incidental_hcloud_server/defaults/main.yml
new file mode 100644
index 0000000000..b9a9a8df7b
--- /dev/null
+++ b/test/integration/targets/incidental_hcloud_server/defaults/main.yml
@@ -0,0 +1,5 @@
+# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+---
+hcloud_prefix: "tests"
+hcloud_server_name: "{{hcloud_prefix}}-integration"
diff --git a/test/integration/targets/incidental_hcloud_server/tasks/main.yml b/test/integration/targets/incidental_hcloud_server/tasks/main.yml
new file mode 100644
index 0000000000..945df73020
--- /dev/null
+++ b/test/integration/targets/incidental_hcloud_server/tasks/main.yml
@@ -0,0 +1,565 @@
+# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+---
+- name: setup
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ state: absent
+ register: result
+- name: verify setup
+ assert:
+ that:
+ - result is success
+- name: test missing required parameters on create server
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ register: result
+ ignore_errors: yes
+- name: verify fail test missing required parameters on create server
+ assert:
+ that:
+ - result is failed
+ - 'result.msg == "missing required arguments: server_type, image"'
+
+- name: test create server with check mode
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ server_type: cx11
+ image: ubuntu-18.04
+ state: present
+ register: result
+ check_mode: yes
+- name: test create server server
+ assert:
+ that:
+ - result is changed
+
+- name: test create server
+ hcloud_server:
+ name: "{{ hcloud_server_name}}"
+ server_type: cx11
+ image: ubuntu-18.04
+ state: started
+ register: main_server
+- name: verify create server
+ assert:
+ that:
+ - main_server is changed
+ - main_server.hcloud_server.name == "{{ hcloud_server_name }}"
+ - main_server.hcloud_server.server_type == "cx11"
+ - main_server.hcloud_server.status == "running"
+ - main_server.root_password != ""
+
+- name: test create server idempotence
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ state: started
+ register: result
+- name: verify create server idempotence
+ assert:
+ that:
+ - result is not changed
+
+- name: test stop server with check mode
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ state: stopped
+ register: result
+ check_mode: yes
+- name: verify stop server with check mode
+ assert:
+ that:
+ - result is changed
+ - result.hcloud_server.status == "running"
+
+- name: test stop server
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ state: stopped
+ register: result
+- name: verify stop server
+ assert:
+ that:
+ - result is changed
+ - result.hcloud_server.status == "off"
+
+- name: test start server with check mode
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ state: started
+ register: result
+ check_mode: true
+- name: verify start server with check mode
+ assert:
+ that:
+ - result is changed
+
+- name: test start server
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ state: started
+ register: result
+- name: verify start server
+ assert:
+ that:
+ - result is changed
+ - result.hcloud_server.status == "running"
+
+- name: test start server idempotence
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ state: started
+ register: result
+- name: verify start server idempotence
+ assert:
+ that:
+ - result is not changed
+ - result.hcloud_server.status == "running"
+
+- name: test stop server by its id
+ hcloud_server:
+ id: "{{ main_server.hcloud_server.id }}"
+ state: stopped
+ register: result
+- name: verify stop server by its id
+ assert:
+ that:
+ - result is changed
+ - result.hcloud_server.status == "off"
+
+- name: test resize server running without force
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ server_type: "cx21"
+ state: present
+ register: result
+ check_mode: true
+- name: verify test resize server running without force
+ assert:
+ that:
+ - result is changed
+ - result.hcloud_server.server_type == "cx11"
+
+- name: test resize server with check mode
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ server_type: "cx21"
+ state: stopped
+ register: result
+ check_mode: true
+- name: verify resize server with check mode
+ assert:
+ that:
+ - result is changed
+
+- name: test resize server without disk
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ server_type: "cx21"
+ state: stopped
+ register: result
+- name: verify resize server without disk
+ assert:
+ that:
+ - result is changed
+ - result.hcloud_server.server_type == "cx21"
+
+- name: test resize server idempotence
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ server_type: "cx21"
+ state: stopped
+ register: result
+- name: verify resize server idempotence
+ assert:
+ that:
+ - result is not changed
+
+- name: test resize server to smaller plan
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ server_type: "cx11"
+ state: stopped
+ register: result
+- name: verify resize server to smaller plan
+ assert:
+ that:
+ - result is changed
+ - result.hcloud_server.server_type == "cx11"
+
+- name: test resize server with disk
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ server_type: "cx21"
+ upgrade_disk: true
+ state: stopped
+ register: result
+- name: verify resize server with disk
+ assert:
+ that:
+ - result is changed
+ - result.hcloud_server.server_type == "cx21"
+
+- name: test enable backups with check mode
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ backups: true
+ state: stopped
+ register: result
+ check_mode: true
+- name: verify enable backups with check mode
+ assert:
+ that:
+ - result is changed
+
+- name: test enable backups
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ backups: true
+ state: stopped
+ register: result
+- name: verify enable backups
+ assert:
+ that:
+ - result is changed
+ - result.hcloud_server.backup_window != ""
+
+- name: test enable backups idempotence
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ backups: true
+ state: stopped
+ register: result
+- name: verify enable backups idempotence
+ assert:
+ that:
+ - result is not changed
+ - result.hcloud_server.backup_window != ""
+
+- name: test rebuild server
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ image: ubuntu-18.04
+ state: rebuild
+ register: result_after_test
+- name: verify rebuild server
+ assert:
+ that:
+ - result_after_test is changed
+ - result.hcloud_server.id == result_after_test.hcloud_server.id
+
+- name: test rebuild server with check mode
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ image: ubuntu-18.04
+ state: rebuild
+ register: result_after_test
+ check_mode: true
+- name: verify rebuild server with check mode
+ assert:
+ that:
+ - result_after_test is changed
+
+- name: test update server protection booth protection arguments are required
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ delete_protection: true
+ state: present
+ register: result_after_test
+ ignore_errors: true
+- name: verify update server protection booth protection arguments are required
+ assert:
+ that:
+ - result_after_test is failed
+ - 'result_after_test.msg == "parameters are required together: delete_protection, rebuild_protection"'
+
+- name: test update server protection fails if they are not the same
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ delete_protection: true
+ rebuild_protection: false
+ state: present
+ register: result_after_test
+ ignore_errors: true
+- name: verify update server protection fails if they are not the same
+ assert:
+ that:
+ - result_after_test is failed
+
+- name: test update server protection
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ delete_protection: true
+ rebuild_protection: true
+ state: present
+ register: result_after_test
+ ignore_errors: true
+- name: verify update server protection
+ assert:
+ that:
+ - result_after_test is changed
+ - result_after_test.hcloud_server.delete_protection is sameas true
+ - result_after_test.hcloud_server.rebuild_protection is sameas true
+
+- name: test server without protection set to be idempotent
+ hcloud_server:
+ name: "{{hcloud_server_name}}"
+ register: result_after_test
+- name: verify test server without protection set to be idempotent
+ assert:
+ that:
+ - result_after_test is not changed
+ - result_after_test.hcloud_server.delete_protection is sameas true
+ - result_after_test.hcloud_server.rebuild_protection is sameas true
+
+- name: test delete server fails if it is protected
+ hcloud_server:
+ name: "{{hcloud_server_name}}"
+ state: absent
+ ignore_errors: yes
+ register: result
+- name: verify delete server fails if it is protected
+ assert:
+ that:
+ - result is failed
+ - 'result.msg == "server deletion is protected"'
+
+- name: test rebuild server fails if it is protected
+ hcloud_server:
+ name: "{{hcloud_server_name}}"
+ image: ubuntu-18.04
+ state: rebuild
+ ignore_errors: yes
+ register: result
+- name: verify rebuild server fails if it is protected
+ assert:
+ that:
+ - result is failed
+ - 'result.msg == "server rebuild is protected"'
+
+- name: test remove server protection
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ delete_protection: false
+ rebuild_protection: false
+ state: present
+ register: result_after_test
+ ignore_errors: true
+- name: verify remove server protection
+ assert:
+ that:
+ - result_after_test is changed
+ - result_after_test.hcloud_server.delete_protection is sameas false
+ - result_after_test.hcloud_server.rebuild_protection is sameas false
+
+- name: absent server
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ state: absent
+ register: result
+- name: verify absent server
+ assert:
+ that:
+ - result is success
+
+- name: test create server with ssh key
+ hcloud_server:
+ name: "{{ hcloud_server_name}}"
+ server_type: cx11
+ image: "ubuntu-18.04"
+ ssh_keys:
+ - ci@ansible.hetzner.cloud
+ state: started
+ register: main_server
+- name: verify create server with ssh key
+ assert:
+ that:
+ - main_server is changed
+ - main_server.hcloud_server.name == "{{ hcloud_server_name }}"
+ - main_server.hcloud_server.server_type == "cx11"
+ - main_server.hcloud_server.status == "running"
+ - main_server.root_password != ""
+
+- name: absent server
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ state: absent
+ register: result
+- name: verify absent server
+ assert:
+ that:
+ - result is success
+
+- name: test create server with rescue_mode
+ hcloud_server:
+ name: "{{ hcloud_server_name}}"
+ server_type: cx11
+ image: "ubuntu-18.04"
+ ssh_keys:
+ - ci@ansible.hetzner.cloud
+ rescue_mode: "linux64"
+ state: started
+ register: main_server
+- name: verify create server with rescue_mode
+ assert:
+ that:
+ - main_server is changed
+ - main_server.hcloud_server.name == "{{ hcloud_server_name }}"
+ - main_server.hcloud_server.server_type == "cx11"
+ - main_server.hcloud_server.status == "running"
+ - main_server.root_password != ""
+ - main_server.hcloud_server.rescue_enabled is sameas true
+
+- name: absent server
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ state: absent
+ register: result
+- name: verify absent server
+ assert:
+ that:
+ - result is success
+
+- name: setup server
+ hcloud_server:
+ name: "{{ hcloud_server_name}}"
+ server_type: cx11
+ image: ubuntu-18.04
+ state: started
+ register: main_server
+- name: verify setup server
+ assert:
+ that:
+ - main_server is changed
+ - main_server.hcloud_server.name == "{{ hcloud_server_name }}"
+ - main_server.hcloud_server.server_type == "cx11"
+ - main_server.hcloud_server.status == "running"
+ - main_server.root_password != ""
+
+- name: test activate rescue mode with check_mode
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ rescue_mode: "linux64"
+ ssh_keys:
+ - ci@ansible.hetzner.cloud
+ state: present
+ register: main_server
+ check_mode: true
+- name: verify activate rescue mode
+ assert:
+ that:
+ - main_server is changed
+
+- name: test activate rescue mode
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ rescue_mode: "linux64"
+ ssh_keys:
+ - ci@ansible.hetzner.cloud
+ state: present
+ register: main_server
+- name: verify activate rescue mode
+ assert:
+ that:
+ - main_server is changed
+ - main_server.hcloud_server.rescue_enabled is sameas true
+
+- name: test disable rescue mode
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ ssh_keys:
+ - ci@ansible.hetzner.cloud
+ state: present
+ register: main_server
+- name: verify activate rescue mode
+ assert:
+ that:
+ - main_server is changed
+ - main_server.hcloud_server.rescue_enabled is sameas false
+
+- name: test activate rescue mode without ssh keys
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ rescue_mode: "linux64"
+ state: present
+ register: main_server
+- name: verify activate rescue mode without ssh keys
+ assert:
+ that:
+ - main_server is changed
+ - main_server.hcloud_server.rescue_enabled is sameas true
+
+- name: cleanup
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ state: absent
+ register: result
+- name: verify cleanup
+ assert:
+ that:
+ - result is success
+
+- name: test create server with labels
+ hcloud_server:
+ name: "{{ hcloud_server_name}}"
+ server_type: cx11
+ image: "ubuntu-18.04"
+ ssh_keys:
+ - ci@ansible.hetzner.cloud
+ labels:
+ key: value
+ mylabel: "val123"
+ state: started
+ register: main_server
+- name: verify create server with labels
+ assert:
+ that:
+ - main_server is changed
+ - main_server.hcloud_server.labels.key == "value"
+ - main_server.hcloud_server.labels.mylabel == "val123"
+
+- name: test update server with labels
+ hcloud_server:
+ name: "{{ hcloud_server_name}}"
+ server_type: cx11
+ image: "ubuntu-18.04"
+ ssh_keys:
+ - ci@ansible.hetzner.cloud
+ labels:
+ key: other
+ mylabel: "val123"
+ state: started
+ register: main_server
+- name: verify update server with labels
+ assert:
+ that:
+ - main_server is changed
+ - main_server.hcloud_server.labels.key == "other"
+ - main_server.hcloud_server.labels.mylabel == "val123"
+
+- name: test update server with labels in other order
+ hcloud_server:
+ name: "{{ hcloud_server_name}}"
+ server_type: cx11
+ image: "ubuntu-18.04"
+ ssh_keys:
+ - ci@ansible.hetzner.cloud
+ labels:
+ mylabel: "val123"
+ key: other
+ state: started
+ register: main_server
+- name: verify update server with labels in other order
+ assert:
+ that:
+ - main_server is not changed
+
+- name: cleanup with labels
+ hcloud_server:
+ name: "{{ hcloud_server_name }}"
+ state: absent
+ register: result
+- name: verify cleanup
+ assert:
+ that:
+ - result is success
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/aliases b/test/integration/targets/incidental_inventory_aws_ec2/aliases
new file mode 100644
index 0000000000..29f60feb44
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/incidental
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml
new file mode 100644
index 0000000000..8680c38d01
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/create_inventory_config.yml
@@ -0,0 +1,11 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ vars:
+ template_name: "../templates/{{ template | default('inventory.yml') }}"
+ tasks:
+ - name: write inventory config file
+ copy:
+ dest: ../test.aws_ec2.yml
+ content: "{{ lookup('template', template_name) }}"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml
new file mode 100644
index 0000000000..f67fff1a93
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/empty_inventory_config.yml
@@ -0,0 +1,9 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: write inventory config file
+ copy:
+ dest: ../test.aws_ec2.yml
+ content: ""
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml
new file mode 100644
index 0000000000..07b0eec4c5
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/populate_cache.yml
@@ -0,0 +1,64 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ # Create new host, add it to inventory and then terminate it without updating the cache
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ no_log: yes
+
+ - name: create a new host
+ ec2:
+ image: '{{ image_id }}'
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ <<: *aws_connection_info
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ <<: *aws_connection_info
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml
new file mode 100644
index 0000000000..8a9b88937f
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/setup.yml
@@ -0,0 +1,62 @@
+- name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ no_log: yes
+
+- name: get image ID to create an instance
+ ec2_ami_info:
+ filters:
+ architecture: x86_64
+ owner-id: '125523088429'
+ virtualization-type: hvm
+ root-device-type: ebs
+ name: 'Fedora-Atomic-27*'
+ <<: *aws_connection_info
+ register: fedora_images
+
+- set_fact:
+ image_id: '{{ fedora_images.images.0.image_id }}'
+
+- name: create a VPC to work in
+ ec2_vpc_net:
+ cidr_block: 10.10.0.0/24
+ state: present
+ name: '{{ resource_prefix }}_setup'
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ <<: *aws_connection_info
+ register: setup_vpc
+
+- set_fact:
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+
+- name: create a subnet to use for creating an ec2 instance
+ ec2_vpc_subnet:
+ az: '{{ aws_region }}a'
+ tags: '{{ resource_prefix }}_setup'
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ cidr: 10.10.0.0/24
+ state: present
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ <<: *aws_connection_info
+ register: setup_subnet
+
+- set_fact:
+ subnet_id: '{{ setup_subnet.subnet.id }}'
+
+- name: create a security group to use for creating an ec2 instance
+ ec2_group:
+ name: '{{ resource_prefix }}_setup'
+ description: 'created by Ansible integration tests'
+ state: present
+ vpc_id: '{{ setup_vpc.vpc.id }}'
+ <<: *aws_connection_info
+ register: setup_sg
+
+- set_fact:
+ sg_id: '{{ setup_sg.group_id }}'
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml
new file mode 100644
index 0000000000..4c8240e46d
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/tear_down.yml
@@ -0,0 +1,39 @@
+- name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ no_log: yes
+
+- name: remove setup security group
+ ec2_group:
+ name: '{{ resource_prefix }}_setup'
+ description: 'created by Ansible integration tests'
+ state: absent
+ vpc_id: '{{ vpc_id }}'
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+- name: remove setup subnet
+ ec2_vpc_subnet:
+ az: '{{ aws_region }}a'
+ tags: '{{ resource_prefix }}_setup'
+ vpc_id: '{{ vpc_id }}'
+ cidr: 10.10.0.0/24
+ state: absent
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ <<: *aws_connection_info
+ ignore_errors: yes
+
+- name: remove setup VPC
+ ec2_vpc_net:
+ cidr_block: 10.10.0.0/24
+ state: absent
+ name: '{{ resource_prefix }}_setup'
+ resource_tags:
+ Name: '{{ resource_prefix }}_setup'
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml
new file mode 100644
index 0000000000..cc1b9a5a5e
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml
@@ -0,0 +1,9 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: assert inventory was not populated by aws_ec2 inventory plugin
+ assert:
+ that:
+ - "'aws_ec2' not in groups"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml
new file mode 100644
index 0000000000..d83cb0bfe6
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_inventory_cache.yml
@@ -0,0 +1,18 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: assert cache was used to populate inventory
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "groups.aws_ec2 | length == 1"
+
+ - meta: refresh_inventory
+
+ - name: assert refresh_inventory updated the cache
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml
new file mode 100644
index 0000000000..73a67db065
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory.yml
@@ -0,0 +1,91 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ # Create new host, refresh inventory, remove host, refresh inventory
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ no_log: yes
+
+ - name: create a new host
+ ec2:
+ image: '{{ image_id }}'
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ <<: *aws_connection_info
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory and is no longer empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "groups.aws_ec2 | length == 1"
+ - "groups.aws_ec2.0 == '{{ resource_prefix }}'"
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ <<: *aws_connection_info
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ <<: *aws_connection_info
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml
new file mode 100644
index 0000000000..fdeeeeff42
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml
@@ -0,0 +1,79 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: no
+ environment: "{{ ansible_test.environment }}"
+ tasks:
+
+ - block:
+
+ # Create VPC, subnet, security group, and find image_id to create instance
+
+ - include_tasks: setup.yml
+
+ # Create new host, refresh inventory
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: '{{ aws_access_key }}'
+ aws_secret_key: '{{ aws_secret_key }}'
+ security_token: '{{ security_token }}'
+ region: '{{ aws_region }}'
+ no_log: yes
+
+ - name: create a new host
+ ec2:
+ image: '{{ image_id }}'
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ tag1: value1
+ tag2: value2
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ sg_id }}'
+ vpc_subnet_id: '{{ subnet_id }}'
+ <<: *aws_connection_info
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: register the keyed sg group name
+ set_fact:
+ sg_group_name: "security_groups_{{ sg_id | replace('-', '_') }}"
+
+ - name: register one of the keyed tag groups name
+ set_fact:
+ tag_group_name: "tag_Name_{{ resource_prefix | replace('-', '_') }}"
+
+ - name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars
+ assert:
+ that:
+ # There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group
+ - "groups | length == 9"
+ - "groups[tag_group_name] | length == 1"
+ - "groups[sg_group_name] | length == 1"
+ - "groups.arch_x86_64 | length == 1"
+ - "groups.tag_with_name_key | length == 1"
+ - vars.hostvars[groups.aws_ec2.0]['test_compose_var_sum'] == 'value1value2'
+
+ always:
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: "{{ sg_id }}"
+ vpc_subnet_id: "{{ subnet_id }}"
+ <<: *aws_connection_info
+ ignore_errors: yes
+ when: setup_instance is defined
+
+ - include_tasks: tear_down.yml
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml
new file mode 100644
index 0000000000..6b46599b5b
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/playbooks/test_refresh_inventory.yml
@@ -0,0 +1,74 @@
+- name: test updating inventory
+ block:
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ - name: set connection information for all tasks
+ set_fact:
+ aws_connection_info: &aws_connection_info
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ no_log: yes
+
+ - name: create a new host
+ ec2:
+ image: "{{ images[aws_region] }}"
+ exact_count: 1
+ count_tag:
+ Name: '{{ resource_prefix }}'
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ instance_type: t2.micro
+ wait: yes
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ <<: *aws_connection_info
+ register: setup_instance
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory and is no longer empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "groups.aws_ec2 | length == 1"
+ - "groups.aws_ec2.0 == '{{ resource_prefix }}'"
+
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ <<: *aws_connection_info
+
+ - meta: refresh_inventory
+
+ - name: assert group was populated with inventory but is empty
+ assert:
+ that:
+ - "'aws_ec2' in groups"
+ - "not groups.aws_ec2"
+
+ always:
+ - name: remove setup ec2 instance
+ ec2:
+ instance_type: t2.micro
+ instance_ids: '{{ setup_instance.instance_ids }}'
+ state: absent
+ wait: yes
+ instance_tags:
+ Name: '{{ resource_prefix }}'
+ group_id: '{{ setup_sg.group_id }}'
+ vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
+ <<: *aws_connection_info
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/runme.sh b/test/integration/targets/incidental_inventory_aws_ec2/runme.sh
new file mode 100755
index 0000000000..916f7e8f7a
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/runme.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# ensure test config is empty
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
+
+export ANSIBLE_INVENTORY_ENABLED=aws_ec2
+
+# test with default inventory file
+ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
+
+export ANSIBLE_INVENTORY=test.aws_ec2.yml
+
+# test empty inventory config
+ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@"
+
+# generate inventory config and test using it
+ansible-playbook playbooks/create_inventory_config.yml "$@"
+ansible-playbook playbooks/test_populating_inventory.yml "$@"
+
+# generate inventory config with caching and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml'" "$@"
+ansible-playbook playbooks/populate_cache.yml "$@"
+ansible-playbook playbooks/test_inventory_cache.yml "$@"
+
+# remove inventory cache
+rm -r aws_ec2_cache_dir/
+
+# generate inventory config with constructed features and test using it
+ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.yml'" "$@"
+ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@"
+
+# cleanup inventory config
+ansible-playbook playbooks/empty_inventory_config.yml "$@"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml
new file mode 100644
index 0000000000..942edb309b
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory.yml
@@ -0,0 +1,12 @@
+plugin: aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+aws_security_token: '{{ security_token }}'
+regions:
+ - '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
+hostnames:
+ - tag:Name
+ - dns-name
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml
new file mode 100644
index 0000000000..e35bf9010b
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_cache.yml
@@ -0,0 +1,12 @@
+plugin: aws_ec2
+cache: True
+cache_plugin: jsonfile
+cache_connection: aws_ec2_cache_dir
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+aws_security_token: '{{ security_token }}'
+regions:
+ - '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml
new file mode 100644
index 0000000000..6befb4e339
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/templates/inventory_with_constructed.yml
@@ -0,0 +1,20 @@
+plugin: aws_ec2
+aws_access_key_id: '{{ aws_access_key }}'
+aws_secret_access_key: '{{ aws_secret_key }}'
+aws_security_token: '{{ security_token }}'
+regions:
+ - '{{ aws_region }}'
+filters:
+ tag:Name:
+ - '{{ resource_prefix }}'
+keyed_groups:
+ - key: 'security_groups|json_query("[].group_id")'
+ prefix: 'security_groups'
+ - key: 'tags'
+ prefix: 'tag'
+ - prefix: 'arch'
+ key: "architecture"
+compose:
+ test_compose_var_sum: tags.tag1 + tags.tag2
+groups:
+ tag_with_name_key: "'Name' in (tags | list)"
diff --git a/test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml b/test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_aws_ec2/test.aws_ec2.yml
diff --git a/test/integration/targets/incidental_inventory_foreman/aliases b/test/integration/targets/incidental_inventory_foreman/aliases
new file mode 100644
index 0000000000..c28a056e81
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_foreman/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/incidental
+cloud/foreman
+destructive
diff --git a/test/integration/targets/incidental_inventory_foreman/ansible.cfg b/test/integration/targets/incidental_inventory_foreman/ansible.cfg
new file mode 100644
index 0000000000..63e24c4bd0
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_foreman/ansible.cfg
@@ -0,0 +1,5 @@
+[defaults]
+inventory = test-config.foreman.yaml
+
+[inventory]
+enable_plugins = foreman
diff --git a/test/integration/targets/incidental_inventory_foreman/inspect_cache.yml b/test/integration/targets/incidental_inventory_foreman/inspect_cache.yml
new file mode 100644
index 0000000000..c91f4c3868
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_foreman/inspect_cache.yml
@@ -0,0 +1,31 @@
+---
+- hosts: localhost
+ vars:
+ foreman_stub_host: "{{ lookup('env', 'FOREMAN_HOST') }}"
+ foreman_stub_port: "{{ lookup('env', 'FOREMAN_PORT') }}"
+ foreman_stub_api_path: /api/v2
+ cached_hosts_key: "http://{{ foreman_stub_host }}:{{ foreman_stub_port }}{{ foreman_stub_api_path }}/hosts"
+ tasks:
+ - name: verify a cache file was created
+ find:
+ path:
+ - ./foreman_cache
+ register: matching_files
+
+ - assert:
+ that:
+ - matching_files.matched == 1
+ - name: read the cached inventory
+ set_fact:
+ contents: "{{ lookup('file', matching_files.files.0.path) }}"
+
+ - name: extract all the host names
+ set_fact:
+ cached_hosts: "{{ contents[cached_hosts_key] | json_query('[*].name') }}"
+
+ - assert:
+ that:
+ "'{{ item }}' in cached_hosts"
+ loop:
+ - "v6.example-780.com"
+ - "c4.j1.y5.example-487.com"
diff --git a/test/integration/targets/incidental_inventory_foreman/runme.sh b/test/integration/targets/incidental_inventory_foreman/runme.sh
new file mode 100755
index 0000000000..ba94a9360f
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_foreman/runme.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+export ANSIBLE_INVENTORY
+export ANSIBLE_PYTHON_INTERPRETER
+
+unset ANSIBLE_INVENTORY
+unset ANSIBLE_PYTHON_INTERPRETER
+
+export ANSIBLE_CONFIG=ansible.cfg
+export FOREMAN_HOST="${FOREMAN_HOST:-localhost}"
+export FOREMAN_PORT="${FOREMAN_PORT:-8080}"
+FOREMAN_CONFIG=test-config.foreman.yaml
+
+# Set inventory caching environment variables to populate a jsonfile cache
+export ANSIBLE_INVENTORY_CACHE=True
+export ANSIBLE_INVENTORY_CACHE_PLUGIN=jsonfile
+export ANSIBLE_INVENTORY_CACHE_CONNECTION=./foreman_cache
+
+# flag for checking whether cleanup has already fired
+_is_clean=
+
+function _cleanup() {
+ [[ -n "$_is_clean" ]] && return # don't double-clean
+ echo Cleanup: removing $FOREMAN_CONFIG...
+ rm -vf "$FOREMAN_CONFIG"
+ unset ANSIBLE_CONFIG
+ unset FOREMAN_HOST
+ unset FOREMAN_PORT
+ unset FOREMAN_CONFIG
+ _is_clean=1
+}
+trap _cleanup INT TERM EXIT
+
+cat > "$FOREMAN_CONFIG" <<FOREMAN_YAML
+plugin: foreman
+url: http://${FOREMAN_HOST}:${FOREMAN_PORT}
+user: ansible-tester
+password: secure
+validate_certs: False
+FOREMAN_YAML
+
+ansible-playbook test_foreman_inventory.yml --connection=local "$@"
+ansible-playbook inspect_cache.yml --connection=local "$@"
+
+# remove inventory cache
+rm -r ./foreman_cache
diff --git a/test/integration/targets/incidental_inventory_foreman/test_foreman_inventory.yml b/test/integration/targets/incidental_inventory_foreman/test_foreman_inventory.yml
new file mode 100644
index 0000000000..d5eeed4f8b
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_foreman/test_foreman_inventory.yml
@@ -0,0 +1,59 @@
+---
+- hosts: localhost
+ vars:
+ foreman_stub_host: "{{ lookup('env', 'FOREMAN_HOST') }}"
+ foreman_stub_port: "{{ lookup('env', 'FOREMAN_PORT') }}"
+ foreman_stub_api_path: /api/v2
+ foreman_stub_host_uri: "http://{{ foreman_stub_host }}:{{ foreman_stub_port }}"
+ foreman_stub_api_uri: "{{ foreman_stub_host_uri }}{{ foreman_stub_api_path }}"
+ foreman_stub_heartbeat_uri: "{{ foreman_stub_host_uri }}/ping"
+ tasks:
+ - debug:
+ msg: >-
+ Foreman host: {{ foreman_stub_host }} |
+ Foreman port: {{ foreman_stub_port }} |
+ API path: {{ foreman_stub_api_path }} |
+ Foreman API URL: {{ foreman_stub_api_uri }}
+
+ - name: Wait for Foreman API stub to come up online
+ wait_for:
+ host: "{{ foreman_stub_host }}"
+ port: "{{ foreman_stub_port }}"
+ state: started
+
+ # smoke test that flask app is serving
+ - name: Smoke test HTTP response from Foreman stub
+ uri:
+ url: "{{ foreman_stub_heartbeat_uri }}"
+ return_content: yes
+ register: heartbeat_resp
+ failed_when: >
+ heartbeat_resp.json.status != 'ok' or heartbeat_resp.json.response != 'pong'
+
+ #### Testing start
+ - name: >
+ Check that there are 'foreman_pgagne_sats' and 'foreman_base'
+ groups present in inventory
+ assert:
+ that: >
+ '{{ item }}' in groups
+ with_items:
+ - foreman_pgagne_sats
+ - foreman_base
+
+ - name: Check that host are in appropriate groups
+ assert:
+ that: >
+ '{{ item.key }}' in groups['{{ item.value }}']
+ with_dict:
+ v6.example-780.com: foreman_base
+ c4.j1.y5.example-487.com: ungrouped
+
+ - name: Check host UUIDs
+ assert:
+ that: >
+ hostvars['{{ item.key }}']['foreman_subscription_facet_attributes']['uuid'] == '{{ item.value }}'
+ with_dict:
+ v6.example-780.com: 2c72fa49-995a-4bbf-bda0-684c7048ad9f
+ c4.j1.y5.example-487.com: 0a494b6e-7e90-4ed2-8edc-43a41436a242
+ #### Testing end
diff --git a/test/integration/targets/incidental_inventory_vmware_vm_inventory/aliases b/test/integration/targets/incidental_inventory_vmware_vm_inventory/aliases
new file mode 100644
index 0000000000..420e0cddd5
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_vmware_vm_inventory/aliases
@@ -0,0 +1,3 @@
+shippable/vcenter/incidental
+cloud/vcenter
+destructive
diff --git a/test/integration/targets/incidental_inventory_vmware_vm_inventory/ansible.cfg b/test/integration/targets/incidental_inventory_vmware_vm_inventory/ansible.cfg
new file mode 100644
index 0000000000..158f5849fa
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_vmware_vm_inventory/ansible.cfg
@@ -0,0 +1,8 @@
+[defaults]
+inventory = test-config.vmware.yaml
+
+[inventory]
+enable_plugins = vmware_vm_inventory
+cache = True
+cache_plugin = jsonfile
+cache_connection = inventory_cache
diff --git a/test/integration/targets/incidental_inventory_vmware_vm_inventory/runme.sh b/test/integration/targets/incidental_inventory_vmware_vm_inventory/runme.sh
new file mode 100755
index 0000000000..e220624a59
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_vmware_vm_inventory/runme.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+# Required to differentiate between Python 2 and 3 environ
+PYTHON=${ANSIBLE_TEST_PYTHON_INTERPRETER:-python}
+
+export ANSIBLE_CONFIG=ansible.cfg
+export VMWARE_SERVER="${VCENTER_HOSTNAME}"
+export VMWARE_USERNAME="${VCENTER_USERNAME}"
+export VMWARE_PASSWORD="${VCENTER_PASSWORD}"
+port=5000
+VMWARE_CONFIG=test-config.vmware.yaml
+inventory_cache="$(pwd)/inventory_cache"
+
+cat > "$VMWARE_CONFIG" <<VMWARE_YAML
+plugin: vmware_vm_inventory
+strict: False
+validate_certs: False
+with_tags: False
+VMWARE_YAML
+
+cleanup() {
+ echo "Cleanup"
+ if [ -f "${VMWARE_CONFIG}" ]; then
+ rm -f "${VMWARE_CONFIG}"
+ fi
+ if [ -d "${inventory_cache}" ]; then
+ echo "Removing ${inventory_cache}"
+ rm -rf "${inventory_cache}"
+ fi
+ echo "Done"
+ exit 0
+}
+
+trap cleanup INT TERM EXIT
+
+echo "DEBUG: Using ${VCENTER_HOSTNAME} with username ${VCENTER_USERNAME} and password ${VCENTER_PASSWORD}"
+
+echo "Kill all previous instances"
+curl "http://${VCENTER_HOSTNAME}:${port}/killall" > /dev/null 2>&1
+
+echo "Start new VCSIM server"
+curl "http://${VCENTER_HOSTNAME}:${port}/spawn?datacenter=1&cluster=1&folder=0" > /dev/null 2>&1
+
+echo "Debugging new instances"
+curl "http://${VCENTER_HOSTNAME}:${port}/govc_find"
+
+# Get inventory
+ansible-inventory -i ${VMWARE_CONFIG} --list
+
+echo "Check if cache is working for inventory plugin"
+if [ ! -n "$(find "${inventory_cache}" -maxdepth 1 -name 'vmware_vm_inventory_*' -print -quit)" ]; then
+ echo "Cache directory not found. Please debug"
+ exit 1
+fi
+echo "Cache is working"
+
+# Get inventory using YAML
+ansible-inventory -i ${VMWARE_CONFIG} --list --yaml
+
+# Install TOML for --toml
+${PYTHON} -m pip freeze | grep toml > /dev/null 2>&1
+TOML_TEST_RESULT=$?
+if [ $TOML_TEST_RESULT -ne 0 ]; then
+ echo "Installing TOML package"
+ ${PYTHON} -m pip install toml
+else
+ echo "TOML package already exists, skipping installation"
+fi
+
+# Get inventory using TOML
+ansible-inventory -i ${VMWARE_CONFIG} --list --toml
+TOML_INVENTORY_LIST_RESULT=$?
+if [ $TOML_INVENTORY_LIST_RESULT -ne 0 ]; then
+ echo "Inventory plugin failed to list inventory host using --toml, please debug"
+ exit 1
+fi
+
+# Test playbook with given inventory
+ansible-playbook -i ${VMWARE_CONFIG} test_vmware_vm_inventory.yml --connection=local "$@"
diff --git a/test/integration/targets/incidental_inventory_vmware_vm_inventory/test_vmware_vm_inventory.yml b/test/integration/targets/incidental_inventory_vmware_vm_inventory/test_vmware_vm_inventory.yml
new file mode 100644
index 0000000000..88e2be9bd1
--- /dev/null
+++ b/test/integration/targets/incidental_inventory_vmware_vm_inventory/test_vmware_vm_inventory.yml
@@ -0,0 +1,24 @@
+# Test code for the vmware guest dynamic plugin module
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+---
+- name: Test VMware Guest Dynamic Inventroy Plugin
+ hosts: localhost
+ tasks:
+ - name: store the vcenter container ip
+ set_fact:
+ vcsim: "{{ lookup('env', 'VCENTER_HOSTNAME') }}"
+
+ - name: Check that there are 'all' and 'otherGuest' groups present in inventory
+ assert:
+ that: "'{{ item }}' in {{ groups.keys() | list }}"
+ with_items:
+ - all
+ - otherGuest
+
+ - name: Check if Hostname and other details are populated in hostvars
+ assert:
+ that:
+ - hostvars[item].name is defined
+ with_items: "{{ groups['all'] }}"
diff --git a/test/integration/targets/incidental_k8s/README.md b/test/integration/targets/incidental_k8s/README.md
new file mode 100644
index 0000000000..7cb72b16e2
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/README.md
@@ -0,0 +1,23 @@
+Wait tests
+----------
+
+wait tests require at least one node, and don't work on the normal k8s
+openshift-origin container as provided by ansible-test --docker -v k8s
+
+minikube, Kubernetes from Docker or any other Kubernetes service will
+suffice.
+
+If kubectl is already using the right config file and context, you can
+just do
+
+```
+cd test/integration/targets/k8s
+./runme.sh -vv
+```
+
+otherwise set one or both of `K8S_AUTH_KUBECONFIG` and `K8S_AUTH_CONTEXT`
+and use the same command
+
+
+
+
diff --git a/test/integration/targets/incidental_k8s/aliases b/test/integration/targets/incidental_k8s/aliases
new file mode 100644
index 0000000000..79d2b306a4
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/aliases
@@ -0,0 +1,2 @@
+cloud/openshift
+shippable/cloud/incidental
diff --git a/test/integration/targets/incidental_k8s/defaults/main.yml b/test/integration/targets/incidental_k8s/defaults/main.yml
new file mode 100644
index 0000000000..68fde7c412
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/defaults/main.yml
@@ -0,0 +1,32 @@
+recreate_crd_default_merge_expectation: recreate_crd is not failed
+
+k8s_pod_metadata:
+ labels:
+ app: "{{ k8s_pod_name }}"
+
+k8s_pod_spec:
+ containers:
+ - image: "{{ k8s_pod_image }}"
+ imagePullPolicy: Always
+ name: "{{ k8s_pod_name }}"
+ command: "{{ k8s_pod_command }}"
+ readinessProbe:
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - /bin/true
+ resources:
+ limits:
+ cpu: "100m"
+ memory: "100Mi"
+ ports: "{{ k8s_pod_ports }}"
+
+k8s_pod_command: []
+
+k8s_pod_ports: []
+
+k8s_pod_template:
+ metadata: "{{ k8s_pod_metadata }}"
+ spec: "{{ k8s_pod_spec }}"
+
+k8s_openshift: yes
diff --git a/test/integration/targets/incidental_k8s/files/crd-resource.yml b/test/integration/targets/incidental_k8s/files/crd-resource.yml
new file mode 100644
index 0000000000..9804d4d14e
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/files/crd-resource.yml
@@ -0,0 +1,20 @@
+apiVersion: certmanager.k8s.io/v1alpha1
+kind: Certificate
+metadata:
+ name: acme-crt
+spec:
+ secretName: acme-crt-secret
+ dnsNames:
+ - foo.example.com
+ - bar.example.com
+ acme:
+ config:
+ - ingressClass: nginx
+ domains:
+ - foo.example.com
+ - bar.example.com
+ issuerRef:
+ name: letsencrypt-prod
+ # We can reference ClusterIssuers by changing the kind here.
+ # The default value is Issuer (i.e. a locally namespaced Issuer)
+ kind: Issuer
diff --git a/test/integration/targets/incidental_k8s/files/kuard-extra-property.yml b/test/integration/targets/incidental_k8s/files/kuard-extra-property.yml
new file mode 100644
index 0000000000..2d5b799434
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/files/kuard-extra-property.yml
@@ -0,0 +1,21 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: kuard
+ name: kuard
+ namespace: default
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: kuard
+ unwanted: value
+ template:
+ metadata:
+ labels:
+ app: kuard
+ spec:
+ containers:
+ - image: gcr.io/kuar-demo/kuard-amd64:1
+ name: kuard
diff --git a/test/integration/targets/incidental_k8s/files/kuard-invalid-type.yml b/test/integration/targets/incidental_k8s/files/kuard-invalid-type.yml
new file mode 100644
index 0000000000..7a5f73d910
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/files/kuard-invalid-type.yml
@@ -0,0 +1,20 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: kuard
+ name: kuard
+ namespace: default
+spec:
+ replicas: hello
+ selector:
+ matchLabels:
+ app: kuard
+ template:
+ metadata:
+ labels:
+ app: kuard
+ spec:
+ containers:
+ - image: gcr.io/kuar-demo/kuard-amd64:1
+ name: kuard
diff --git a/test/integration/targets/incidental_k8s/files/setup-crd.yml b/test/integration/targets/incidental_k8s/files/setup-crd.yml
new file mode 100644
index 0000000000..a8e2d51e80
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/files/setup-crd.yml
@@ -0,0 +1,14 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: certificates.certmanager.k8s.io
+spec:
+ group: certmanager.k8s.io
+ version: v1alpha1
+ scope: Namespaced
+ names:
+ kind: Certificate
+ plural: certificates
+ shortNames:
+ - cert
+ - certs
diff --git a/test/integration/targets/incidental_k8s/meta/main.yml b/test/integration/targets/incidental_k8s/meta/main.yml
new file mode 100644
index 0000000000..1810d4bec9
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/test/integration/targets/incidental_k8s/tasks/append_hash.yml b/test/integration/targets/incidental_k8s/tasks/append_hash.yml
new file mode 100644
index 0000000000..876e876a29
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/tasks/append_hash.yml
@@ -0,0 +1,68 @@
+- block:
+ - name: Ensure that append_hash namespace exists
+ k8s:
+ kind: Namespace
+ name: append-hash
+
+ - name: create k8s_resource variable
+ set_fact:
+ k8s_resource:
+ metadata:
+ name: config-map-test
+ namespace: append-hash
+ apiVersion: v1
+ kind: ConfigMap
+ data:
+ hello: world
+
+ - name: Create config map
+ k8s:
+ definition: "{{ k8s_resource }}"
+ append_hash: yes
+ register: k8s_configmap1
+
+ - name: check configmap is created with a hash
+ assert:
+ that:
+ - k8s_configmap1 is changed
+ - k8s_configmap1.result.metadata.name != 'config-map-test'
+ - k8s_configmap1.result.metadata.name[:-10] == 'config-map-test-'
+
+ - name: recreate same config map
+ k8s:
+ definition: "{{ k8s_resource }}"
+ append_hash: yes
+ register: k8s_configmap2
+
+ - name: check configmaps are different
+ assert:
+ that:
+ - k8s_configmap2 is not changed
+ - k8s_configmap1.result.metadata.name == k8s_configmap2.result.metadata.name
+
+ - name: add key to config map
+ k8s:
+ definition:
+ metadata:
+ name: config-map-test
+ namespace: append-hash
+ apiVersion: v1
+ kind: ConfigMap
+ data:
+ hello: world
+ another: value
+ append_hash: yes
+ register: k8s_configmap3
+
+ - name: check configmaps are different
+ assert:
+ that:
+ - k8s_configmap3 is changed
+ - k8s_configmap1.result.metadata.name != k8s_configmap3.result.metadata.name
+
+ always:
+ - name: ensure that namespace is removed
+ k8s:
+ kind: Namespace
+ name: append-hash
+ state: absent
diff --git a/test/integration/targets/incidental_k8s/tasks/apply.yml b/test/integration/targets/incidental_k8s/tasks/apply.yml
new file mode 100644
index 0000000000..cf51123124
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/tasks/apply.yml
@@ -0,0 +1,277 @@
+- block:
+ - python_requirements_info:
+ dependencies:
+ - openshift
+ - kubernetes
+
+ - set_fact:
+ apply_namespace: apply
+
+ - name: ensure namespace exists
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ apply_namespace }}"
+
+ - name: add a configmap
+ k8s:
+ name: "apply-configmap"
+ namespace: "{{ apply_namespace }}"
+ definition:
+ kind: ConfigMap
+ apiVersion: v1
+ data:
+ one: "1"
+ two: "2"
+ three: "3"
+ apply: yes
+ register: k8s_configmap
+
+ - name: check configmap was created
+ assert:
+ that:
+ - k8s_configmap is changed
+ - k8s_configmap.result.metadata.annotations|default(False)
+
+ - name: add same configmap again
+ k8s:
+ definition:
+ kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: "apply-configmap"
+ namespace: "{{ apply_namespace }}"
+ data:
+ one: "1"
+ two: "2"
+ three: "3"
+ apply: yes
+ register: k8s_configmap_2
+
+ - name: check nothing changed
+ assert:
+ that:
+ - k8s_configmap_2 is not changed
+
+ - name: add same configmap again with check mode on
+ k8s:
+ definition:
+ kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: "apply-configmap"
+ namespace: "{{ apply_namespace }}"
+ data:
+ one: "1"
+ two: "2"
+ three: "3"
+ apply: yes
+ check_mode: yes
+ register: k8s_configmap_check
+
+ - name: check nothing changed
+ assert:
+ that:
+ - k8s_configmap_check is not changed
+
+ - name: add same configmap again but using name and namespace args
+ k8s:
+ name: "apply-configmap"
+ namespace: "{{ apply_namespace }}"
+ definition:
+ kind: ConfigMap
+ apiVersion: v1
+ data:
+ one: "1"
+ two: "2"
+ three: "3"
+ apply: yes
+ register: k8s_configmap_2a
+
+ - name: check nothing changed
+ assert:
+ that:
+ - k8s_configmap_2a is not changed
+
+ - name: update configmap
+ k8s:
+ definition:
+ kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: "apply-configmap"
+ namespace: "{{ apply_namespace }}"
+ data:
+ one: "1"
+ three: "3"
+ four: "4"
+ apply: yes
+ register: k8s_configmap_3
+
+ - name: ensure that configmap has been correctly updated
+ assert:
+ that:
+ - k8s_configmap_3 is changed
+ - "'four' in k8s_configmap_3.result.data"
+ - "'two' not in k8s_configmap_3.result.data"
+
+ - name: add a service
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8080
+ targetPort: 8080
+ type: NodePort
+ apply: yes
+ register: k8s_service
+
+ - name: add exactly same service
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8080
+ targetPort: 8080
+ type: NodePort
+ apply: yes
+ register: k8s_service_2
+
+ - name: check nothing changed
+ assert:
+ that:
+ - k8s_service_2 is not changed
+
+ - name: change service ports
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8081
+ targetPort: 8081
+ type: NodePort
+ apply: yes
+ register: k8s_service_3
+
+ - name: check ports are correct
+ assert:
+ that:
+ - k8s_service_3 is changed
+ - k8s_service_3.result.spec.ports | length == 1
+ - k8s_service_3.result.spec.ports[0].port == 8081
+
+ - name: insert new service port
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: mesh
+ port: 8080
+ targetPort: 8080
+ - name: http
+ port: 8081
+ targetPort: 8081
+ type: NodePort
+ apply: yes
+ register: k8s_service_4
+
+ - name: check ports are correct
+ assert:
+ that:
+ - k8s_service_4 is changed
+ - k8s_service_4.result.spec.ports | length == 2
+ - k8s_service_4.result.spec.ports[0].port == 8080
+ - k8s_service_4.result.spec.ports[1].port == 8081
+
+ - name: remove new service port (check mode)
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8081
+ targetPort: 8081
+ type: NodePort
+ apply: yes
+ check_mode: yes
+ register: k8s_service_check
+
+ - name: check ports are correct
+ assert:
+ that:
+ - k8s_service_check is changed
+ - k8s_service_check.result.spec.ports | length == 1
+ - k8s_service_check.result.spec.ports[0].port == 8081
+
+ - name: remove new service port
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apply-svc
+ namespace: "{{ apply_namespace }}"
+ spec:
+ selector:
+ app: whatever
+ ports:
+ - name: http
+ port: 8081
+ targetPort: 8081
+ type: NodePort
+ apply: yes
+ register: k8s_service_5
+
+ - name: check ports are correct
+ assert:
+ that:
+ - k8s_service_5 is changed
+ - k8s_service_5.result.spec.ports | length == 1
+ - k8s_service_5.result.spec.ports[0].port == 8081
+
+ always:
+ - name: remove namespace
+ k8s:
+ kind: Namespace
+ name: "{{ apply_namespace }}"
+ state: absent
diff --git a/test/integration/targets/incidental_k8s/tasks/crd.yml b/test/integration/targets/incidental_k8s/tasks/crd.yml
new file mode 100644
index 0000000000..c9e47632f9
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/tasks/crd.yml
@@ -0,0 +1,71 @@
+# TODO: This is the only way I could get the kubeconfig, I don't know why. Running the lookup outside of debug seems to return an empty string
+#- debug: msg={{ lookup('env', 'K8S_AUTH_KUBECONFIG') }}
+# register: kubeconfig
+
+# Kubernetes resources
+
+- block:
+ - name: Create a namespace
+ k8s:
+ name: crd
+ kind: Namespace
+
+ - name: install custom resource definitions
+ k8s:
+ definition: "{{ lookup('file', role_path + '/files/setup-crd.yml') }}"
+
+ - name: pause 5 seconds to avoid race condition
+ pause:
+ seconds: 5
+
+ - name: create custom resource definition
+ k8s:
+ definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
+ namespace: crd
+ apply: "{{ create_crd_with_apply | default(omit) }}"
+ register: create_crd
+
+ - name: patch custom resource definition
+ k8s:
+ definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
+ namespace: crd
+ register: recreate_crd
+ ignore_errors: yes
+
+ - name: assert that recreating crd is as expected
+ assert:
+ that:
+ - recreate_crd_default_merge_expectation
+
+ - block:
+ - name: recreate custom resource definition with merge_type
+ k8s:
+ definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
+ merge_type: merge
+ namespace: crd
+ register: recreate_crd_with_merge
+
+ - name: recreate custom resource definition with merge_type list
+ k8s:
+ definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
+ merge_type:
+ - strategic-merge
+ - merge
+ namespace: crd
+ register: recreate_crd_with_merge_list
+ when: recreate_crd is successful
+
+
+ - name: remove crd
+ k8s:
+ definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
+ namespace: crd
+ state: absent
+
+ always:
+ - name: remove crd namespace
+ k8s:
+ kind: Namespace
+ name: crd
+ state: absent
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_k8s/tasks/delete.yml b/test/integration/targets/incidental_k8s/tasks/delete.yml
new file mode 100644
index 0000000000..fef6e5e9f6
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/tasks/delete.yml
@@ -0,0 +1,101 @@
+- name: ensure that there are actually some nodes
+ k8s_info:
+ kind: Node
+ register: nodes
+
+- block:
+ - set_fact:
+ delete_namespace: delete
+
+ - name: ensure namespace exists
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ delete_namespace }}"
+
+ - name: add a daemonset
+ k8s:
+ definition:
+ apiVersion: extensions/v1beta1
+ kind: DaemonSet
+ metadata:
+ name: delete-daemonset
+ namespace: "{{ delete_namespace }}"
+ spec:
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ wait_timeout: 180
+ vars:
+ k8s_pod_name: delete-ds
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
+ register: ds
+
+ - name: check that daemonset wait worked
+ assert:
+ that:
+ - ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
+
+ - name: check if pods exist
+ k8s_info:
+ namespace: "{{ delete_namespace }}"
+ kind: Pod
+ label_selectors:
+ - "app={{ k8s_pod_name }}"
+ vars:
+ k8s_pod_name: delete-ds
+ register: pods_create
+
+ - name: assert that there are pods
+ assert:
+ that:
+ - pods_create.resources
+
+ - name: remove the daemonset
+ k8s:
+ kind: DaemonSet
+ name: delete-daemonset
+ namespace: "{{ delete_namespace }}"
+ state: absent
+ wait: yes
+
+ - name: show status of pods
+ k8s_info:
+ namespace: "{{ delete_namespace }}"
+ kind: Pod
+ label_selectors:
+ - "app={{ k8s_pod_name }}"
+ vars:
+ k8s_pod_name: delete-ds
+
+ - name: wait for background deletion
+ pause:
+ seconds: 30
+
+ - name: check if pods still exist
+ k8s_info:
+ namespace: "{{ delete_namespace }}"
+ kind: Pod
+ label_selectors:
+ - "app={{ k8s_pod_name }}"
+ vars:
+ k8s_pod_name: delete-ds
+ register: pods_delete
+
+ - name: assert that deleting the daemonset deleted the pods
+ assert:
+ that:
+ - not pods_delete.resources
+
+ always:
+ - name: remove namespace
+ k8s:
+ kind: Namespace
+ name: "{{ delete_namespace }}"
+ state: absent
+
+ when: (nodes.resources | length) > 0
diff --git a/test/integration/targets/incidental_k8s/tasks/full_test.yml b/test/integration/targets/incidental_k8s/tasks/full_test.yml
new file mode 100644
index 0000000000..fdf3d700dc
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/tasks/full_test.yml
@@ -0,0 +1,375 @@
+# TODO: This is the only way I could get the kubeconfig, I don't know why. Running the lookup outside of debug seems to return an empty string
+#- debug: msg={{ lookup('env', 'K8S_AUTH_KUBECONFIG') }}
+# register: kubeconfig
+
+# Kubernetes resources
+
+- include_tasks: delete.yml
+- include_tasks: apply.yml
+- include_tasks: waiter.yml
+
+- block:
+ - name: Create a namespace
+ k8s:
+ name: testing
+ kind: Namespace
+ register: output
+
+ - name: show output
+ debug:
+ var: output
+
+ - name: Setting validate_certs to true causes a failure
+ k8s:
+ name: testing
+ kind: Namespace
+ validate_certs: yes
+ ignore_errors: yes
+ register: output
+
+ - name: assert that validate_certs caused a failure (and therefore was correctly translated to verify_ssl)
+ assert:
+ that:
+ - output is failed
+
+ - name: k8s_info works with empty resources
+ k8s_info:
+ kind: Deployment
+ namespace: testing
+ api_version: extensions/v1beta1
+ register: k8s_info
+
+ - name: assert that k8s_info is in correct format
+ assert:
+ that:
+ - "'resources' in k8s_info"
+ - not k8s_info.resources
+
+ - name: Create a service
+ k8s:
+ state: present
+ resource_definition: &svc
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: web
+ namespace: testing
+ labels:
+ app: galaxy
+ service: web
+ spec:
+ selector:
+ app: galaxy
+ service: web
+ ports:
+ - protocol: TCP
+ targetPort: 8000
+ name: port-8000-tcp
+ port: 8000
+ register: output
+
+ - name: show output
+ debug:
+ var: output
+
+ - name: Create the service again
+ k8s:
+ state: present
+ resource_definition: *svc
+ register: output
+
+ - name: Service creation should be idempotent
+ assert:
+ that: not output.changed
+
+ - name: Create a ConfigMap
+ k8s:
+ kind: ConfigMap
+ name: test-force-update
+ namespace: testing
+ definition:
+ data:
+ key: value
+
+ - name: Force update ConfigMap
+ k8s:
+ kind: ConfigMap
+ name: test-force-update
+ namespace: testing
+ definition:
+ data:
+ key: newvalue
+ force: yes
+
+ - name: Create PVC
+ k8s:
+ state: present
+ inline: &pvc
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: elastic-volume
+ namespace: testing
+ spec:
+ resources:
+ requests:
+ storage: 5Gi
+ accessModes:
+ - ReadWriteOnce
+
+ - name: Show output
+ debug:
+ var: output
+
+ - name: Create the PVC again
+ k8s:
+ state: present
+ inline: *pvc
+
+ - name: PVC creation should be idempotent
+ assert:
+ that: not output.changed
+
+ - name: Create deployment
+ k8s:
+ state: present
+ inline: &deployment
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: elastic
+ labels:
+ app: galaxy
+ service: elastic
+ namespace: testing
+ spec:
+ template:
+ metadata:
+ labels:
+ app: galaxy
+ service: elastic
+ spec:
+ containers:
+ - name: elastic
+ volumeMounts:
+ - mountPath: /usr/share/elasticsearch/data
+ name: elastic-volume
+ command: ['elasticsearch']
+ image: 'ansible/galaxy-elasticsearch:2.4.6'
+ volumes:
+ - name: elastic-volume
+ persistentVolumeClaim:
+ claimName: elastic-volume
+ replicas: 1
+ strategy:
+ type: RollingUpdate
+ register: output
+
+ - name: Show output
+ debug:
+ var: output
+
+ - name: Create deployment again
+ k8s:
+ state: present
+ inline: *deployment
+ register: output
+
+ - name: Deployment creation should be idempotent
+ assert:
+ that: not output.changed
+
+ - debug:
+ var: k8s_openshift
+
+ - include: openshift.yml
+ when: k8s_openshift | bool
+
+ ### Type tests
+ - name: Create a namespace from a string
+ k8s:
+ definition: |+
+ ---
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing1
+
+ - name: Namespace should exist
+ k8s_info:
+ kind: Namespace
+ api_version: v1
+ name: testing1
+ register: k8s_info_testing1
+ failed_when: not k8s_info_testing1.resources or k8s_info_testing1.resources[0].status.phase != "Active"
+
+ - name: Create resources from a multidocument yaml string
+ k8s:
+ definition: |+
+ ---
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing2
+ ---
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing3
+
+ - name: Lookup namespaces
+ k8s_info:
+ api_version: v1
+ kind: Namespace
+ name: "{{ item }}"
+ loop:
+ - testing2
+ - testing3
+ register: k8s_namespaces
+
+ - name: Resources should exist
+ assert:
+ that: item.resources[0].status.phase == 'Active'
+ loop: "{{ k8s_namespaces.results }}"
+
+ - name: Delete resources from a multidocument yaml string
+ k8s:
+ state: absent
+ definition: |+
+ ---
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing2
+ ---
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing3
+
+ - name: Lookup namespaces
+ k8s_info:
+ api_version: v1
+ kind: Namespace
+ name: "{{ item }}"
+ loop:
+ - testing2
+ - testing3
+ register: k8s_namespaces
+
+ - name: Resources should not exist
+ assert:
+ that:
+ - not item.resources or item.resources[0].status.phase == "Terminating"
+ loop: "{{ k8s_namespaces.results }}"
+
+ - name: Create resources from a list
+ k8s:
+ definition:
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing4
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing5
+
+ - name: Lookup namespaces
+ k8s_info:
+ api_version: v1
+ kind: Namespace
+ name: "{{ item }}"
+ loop:
+ - testing4
+ - testing5
+ register: k8s_namespaces
+
+ - name: Resources should exist
+ assert:
+ that: item.resources[0].status.phase == 'Active'
+ loop: "{{ k8s_namespaces.results }}"
+
+ - name: Delete resources from a list
+ k8s:
+ state: absent
+ definition:
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing4
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing5
+
+ - k8s_info:
+ api_version: v1
+ kind: Namespace
+ name: "{{ item }}"
+ loop:
+ - testing4
+ - testing5
+ register: k8s_info
+
+ - name: Resources are terminating if still in results
+ assert:
+ that: not item.resources or item.resources[0].status.phase == "Terminating"
+ loop: "{{ k8s_info.results }}"
+
+ - name: Create resources from a yaml string ending with ---
+ k8s:
+ definition: |+
+ ---
+ kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing6
+ ---
+
+ - name: Namespace should exist
+ k8s_info:
+ kind: Namespace
+ api_version: v1
+ name: testing6
+ register: k8s_info_testing6
+ failed_when: not k8s_info_testing6.resources or k8s_info_testing6.resources[0].status.phase != "Active"
+
+ - include_tasks: crd.yml
+ - include_tasks: lists.yml
+ - include_tasks: append_hash.yml
+
+ always:
+ - name: Delete all namespaces
+ k8s:
+ state: absent
+ definition:
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing1
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing2
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing3
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing4
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing5
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: testing6
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_k8s/tasks/lists.yml b/test/integration/targets/incidental_k8s/tasks/lists.yml
new file mode 100644
index 0000000000..4d464df734
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/tasks/lists.yml
@@ -0,0 +1,140 @@
+---
+
+- name: Ensure testing1 namespace exists
+ k8s:
+ api_version: v1
+ kind: Namespace
+ name: testing1
+
+- block:
+ - name: Create configmaps
+ k8s:
+ namespace: testing1
+ definition:
+ apiVersion: v1
+ kind: ConfigMapList
+ items: '{{ configmaps }}'
+
+ - name: Get ConfigMaps
+ k8s_info:
+ api_version: v1
+ kind: ConfigMap
+ namespace: testing1
+ label_selectors:
+ - app=test
+ register: cms
+
+ - name: All three configmaps should exist
+ assert:
+ that: item.data.a is defined
+ with_items: '{{ cms.resources }}'
+
+ - name: Delete configmaps
+ k8s:
+ state: absent
+ namespace: testing1
+ definition:
+ apiVersion: v1
+ kind: ConfigMapList
+ items: '{{ configmaps }}'
+
+ - name: Get ConfigMaps
+ k8s_info:
+ api_version: v1
+ kind: ConfigMap
+ namespace: testing1
+ label_selectors:
+ - app=test
+ register: cms
+
+ - name: All three configmaps should not exist
+ assert:
+ that: not cms.resources
+ vars:
+ configmaps:
+ - metadata:
+ name: list-example-1
+ labels:
+ app: test
+ data:
+ a: first
+ - metadata:
+ name: list-example-2
+ labels:
+ app: test
+ data:
+ a: second
+ - metadata:
+ name: list-example-3
+ labels:
+ app: test
+ data:
+ a: third
+
+- block:
+ - name: Create list of arbitrary resources
+ k8s:
+ namespace: testing1
+ definition:
+ apiVersion: v1
+ kind: List
+ namespace: testing1
+ items: '{{ resources }}'
+
+ - name: Get the created resources
+ k8s_info:
+ api_version: '{{ item.apiVersion }}'
+ kind: '{{ item.kind }}'
+ namespace: testing1
+ name: '{{ item.metadata.name }}'
+ register: list_resources
+ with_items: '{{ resources }}'
+
+ - name: All resources should exist
+ assert:
+ that: ((list_resources.results | sum(attribute="resources", start=[])) | length) == (resources | length)
+
+ - name: Delete list of arbitrary resources
+ k8s:
+ state: absent
+ namespace: testing1
+ definition:
+ apiVersion: v1
+ kind: List
+ namespace: testing1
+ items: '{{ resources }}'
+
+ - name: Get the resources
+ k8s_info:
+ api_version: '{{ item.apiVersion }}'
+ kind: '{{ item.kind }}'
+ namespace: testing1
+ name: '{{ item.metadata.name }}'
+ register: list_resources
+ with_items: '{{ resources }}'
+
+ - name: The resources should not exist
+ assert:
+ that: not ((list_resources.results | sum(attribute="resources", start=[])) | length)
+ vars:
+ resources:
+ - apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: list-example-4
+ data:
+ key: value
+ - apiVersion: v1
+ kind: Service
+ metadata:
+ name: list-example-svc
+ labels:
+ app: test
+ spec:
+ selector:
+ app: test
+ ports:
+ - protocol: TCP
+ targetPort: 8000
+ name: port-8000-tcp
+ port: 8000
diff --git a/test/integration/targets/incidental_k8s/tasks/main.yml b/test/integration/targets/incidental_k8s/tasks/main.yml
new file mode 100644
index 0000000000..bb11bbc686
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/tasks/main.yml
@@ -0,0 +1,92 @@
+- set_fact:
+ virtualenv: "{{ remote_tmp_dir }}/virtualenv"
+ virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
+
+- set_fact:
+ virtualenv_interpreter: "{{ virtualenv }}/bin/python"
+
+- pip:
+ name: virtualenv
+
+# Test graceful failure for missing kubernetes-validate
+
+- pip:
+ name:
+ - openshift>=0.9.2
+ - coverage
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: validate_not_installed.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+
+- file:
+ path: "{{ virtualenv }}"
+ state: absent
+ no_log: yes
+
+# Test validate with kubernetes-validate
+
+- pip:
+ name:
+ - kubernetes-validate==1.12.0
+ - openshift>=0.9.2
+ - coverage
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: validate_installed.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+ playbook_namespace: ansible-test-k8s-validate
+
+- file:
+ path: "{{ virtualenv }}"
+ state: absent
+ no_log: yes
+
+# Test graceful failure for older versions of openshift
+
+- pip:
+ name:
+ - openshift==0.6.0
+ - kubernetes==6.0.0
+ - coverage
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: older_openshift_fail.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+ recreate_crd_default_merge_expectation: recreate_crd is failed
+ playbook_namespace: ansible-test-k8s-older-openshift
+
+- file:
+ path: "{{ virtualenv }}"
+ state: absent
+ no_log: yes
+
+# Run full test suite
+
+- pip:
+ name:
+ - openshift>=0.9.2
+ - coverage
+ virtualenv: "{{ virtualenv }}"
+ virtualenv_command: "{{ virtualenv_command }}"
+ virtualenv_site_packages: no
+
+- include_tasks: full_test.yml
+ vars:
+ ansible_python_interpreter: "{{ virtualenv_interpreter }}"
+ create_crd_with_apply: no
+ playbook_namespace: ansible-test-k8s-full
+
+- file:
+ path: "{{ virtualenv }}"
+ state: absent
+ no_log: yes
diff --git a/test/integration/targets/incidental_k8s/tasks/older_openshift_fail.yml b/test/integration/targets/incidental_k8s/tasks/older_openshift_fail.yml
new file mode 100644
index 0000000000..2acf3d2175
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/tasks/older_openshift_fail.yml
@@ -0,0 +1,69 @@
+ - python_requirements_info:
+ dependencies:
+ - openshift==0.6.0
+ - kubernetes==6.0.0
+
+ # append_hash
+ - name: use append_hash with ConfigMap
+ k8s:
+ definition:
+ metadata:
+ name: config-map-test
+ namespace: "{{ playbook_namespace }}"
+ apiVersion: v1
+ kind: ConfigMap
+ data:
+ hello: world
+ append_hash: yes
+ ignore_errors: yes
+ register: k8s_append_hash
+
+ - name: assert that append_hash fails gracefully
+ assert:
+ that:
+ - k8s_append_hash is failed
+ - "'Failed to import the required Python library (openshift >= 0.7.2)' in k8s_append_hash.msg"
+ - "'. This is required for append_hash.' in k8s_append_hash.msg"
+
+ # validate
+ - name: attempt to use validate with older openshift
+ k8s:
+ definition:
+ metadata:
+ name: config-map-test
+ namespace: "{{ playbook_namespace }}"
+ apiVersion: v1
+ kind: ConfigMap
+ data:
+ hello: world
+ validate:
+ fail_on_error: yes
+ ignore_errors: yes
+ register: k8s_validate
+
+ - name: assert that validate fails gracefully
+ assert:
+ that:
+ - k8s_validate is failed
+ - "k8s_validate.msg == 'openshift >= 0.8.0 is required for validate'"
+
+ # apply
+ - name: attempt to use apply with older openshift
+ k8s:
+ definition:
+ metadata:
+ name: config-map-test
+ namespace: "{{ playbook_namespace }}"
+ apiVersion: v1
+ kind: ConfigMap
+ data:
+ hello: world
+ apply: yes
+ ignore_errors: yes
+ register: k8s_apply
+
+ - name: assert that apply fails gracefully
+ assert:
+ that:
+ - k8s_apply is failed
+ - "k8s_apply.msg.startswith('Failed to import the required Python library (openshift >= 0.9.2)')"
diff --git a/test/integration/targets/incidental_k8s/tasks/openshift.yml b/test/integration/targets/incidental_k8s/tasks/openshift.yml
new file mode 100644
index 0000000000..f4a9006119
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/tasks/openshift.yml
@@ -0,0 +1,61 @@
+# OpenShift Resources
+- name: Create a project
+ k8s:
+ name: testing
+ kind: Project
+ api_version: v1
+ apply: no
+ register: output
+
+- name: show output
+ debug:
+ var: output
+
+- name: Create deployment config
+ k8s:
+ state: present
+ inline: &dc
+ apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: elastic
+ labels:
+ app: galaxy
+ service: elastic
+ namespace: testing
+ spec:
+ template:
+ metadata:
+ labels:
+ app: galaxy
+ service: elastic
+ spec:
+ containers:
+ - name: elastic
+ volumeMounts:
+ - mountPath: /usr/share/elasticsearch/data
+ name: elastic-volume
+ command: ['elasticsearch']
+ image: 'ansible/galaxy-elasticsearch:2.4.6'
+ volumes:
+ - name: elastic-volume
+ persistentVolumeClaim:
+ claimName: elastic-volume
+ replicas: 1
+ strategy:
+ type: Rolling
+ register: output
+
+- name: Show output
+ debug:
+ var: output
+
+- name: Create deployment config again
+ k8s:
+ state: present
+ inline: *dc
+ register: output
+
+- name: DC creation should be idempotent
+ assert:
+ that: not output.changed
diff --git a/test/integration/targets/incidental_k8s/tasks/validate_installed.yml b/test/integration/targets/incidental_k8s/tasks/validate_installed.yml
new file mode 100644
index 0000000000..224bd2eb68
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/tasks/validate_installed.yml
@@ -0,0 +1,125 @@
+- block:
+ - name: Create a namespace
+ k8s:
+ name: "{{ playbook_namespace }}"
+ kind: Namespace
+
+ - copy:
+ src: files
+ dest: "{{ remote_tmp_dir }}"
+
+ - name: incredibly simple ConfigMap
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: hello
+ namespace: "{{ playbook_namespace }}"
+ validate:
+ fail_on_error: yes
+ register: k8s_with_validate
+
+ - name: assert that k8s_with_validate succeeds
+ assert:
+ that:
+ - k8s_with_validate is successful
+
+ - name: extra property does not fail without strict
+ k8s:
+ src: "{{ remote_tmp_dir }}/files/kuard-extra-property.yml"
+ namespace: "{{ playbook_namespace }}"
+ validate:
+ fail_on_error: yes
+ strict: no
+
+ - name: extra property fails with strict
+ k8s:
+ src: "{{ remote_tmp_dir }}/files/kuard-extra-property.yml"
+ namespace: "{{ playbook_namespace }}"
+ validate:
+ fail_on_error: yes
+ strict: yes
+ ignore_errors: yes
+ register: extra_property
+
+ - name: check that extra property fails with strict
+ assert:
+ that:
+ - extra_property is failed
+
+ - name: invalid type fails at validation stage
+ k8s:
+ src: "{{ remote_tmp_dir }}/files/kuard-invalid-type.yml"
+ namespace: "{{ playbook_namespace }}"
+ validate:
+ fail_on_error: yes
+ strict: no
+ ignore_errors: yes
+ register: invalid_type
+
+ - name: check that invalid type fails
+ assert:
+ that:
+ - invalid_type is failed
+
+ - name: invalid type fails with warnings when fail_on_error is False
+ k8s:
+ src: "{{ remote_tmp_dir }}/files/kuard-invalid-type.yml"
+ namespace: "{{ playbook_namespace }}"
+ validate:
+ fail_on_error: no
+ strict: no
+ ignore_errors: yes
+ register: invalid_type_no_fail
+
+ - name: check that invalid type fails
+ assert:
+ that:
+ - invalid_type_no_fail is failed
+
+ - name: setup custom resource definition
+ k8s:
+ src: "{{ remote_tmp_dir }}/files/setup-crd.yml"
+
+ - name: wait a few seconds
+ pause:
+ seconds: 5
+
+ - name: add custom resource definition
+ k8s:
+ src: "{{ remote_tmp_dir }}/files/crd-resource.yml"
+ namespace: "{{ playbook_namespace }}"
+ validate:
+ fail_on_error: yes
+ strict: yes
+ register: unknown_kind
+
+ - name: check that unknown kind warns
+ assert:
+ that:
+ - unknown_kind is successful
+ - "'warnings' in unknown_kind"
+
+ always:
+ - name: remove custom resource
+ k8s:
+ definition: "{{ lookup('file', role_path + '/files/crd-resource.yml') }}"
+ namespace: "{{ playbook_namespace }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: remove custom resource definitions
+ k8s:
+ definition: "{{ lookup('file', role_path + '/files/setup-crd.yml') }}"
+ state: absent
+
+ - name: Delete namespace
+ k8s:
+ state: absent
+ definition:
+ - kind: Namespace
+ apiVersion: v1
+ metadata:
+ name: "{{ playbook_namespace }}"
+ ignore_errors: yes
diff --git a/test/integration/targets/incidental_k8s/tasks/validate_not_installed.yml b/test/integration/targets/incidental_k8s/tasks/validate_not_installed.yml
new file mode 100644
index 0000000000..ecd17f7ea9
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/tasks/validate_not_installed.yml
@@ -0,0 +1,23 @@
+ - python_requirements_info:
+ dependencies:
+ - openshift
+ - kubernetes
+ - kubernetes-validate
+
+ - k8s:
+ definition:
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: hello
+ namespace: default
+ validate:
+ fail_on_error: yes
+ ignore_errors: yes
+ register: k8s_no_validate
+
+ - name: assert that k8s_no_validate fails gracefully
+ assert:
+ that:
+ - k8s_no_validate is failed
+ - "k8s_no_validate.msg == 'kubernetes-validate python library is required to validate resources'"
diff --git a/test/integration/targets/incidental_k8s/tasks/waiter.yml b/test/integration/targets/incidental_k8s/tasks/waiter.yml
new file mode 100644
index 0000000000..757d7899ac
--- /dev/null
+++ b/test/integration/targets/incidental_k8s/tasks/waiter.yml
@@ -0,0 +1,355 @@
+- name: ensure that there are actually some nodes
+ k8s_info:
+ kind: Node
+ register: nodes
+
+- block:
+ - set_fact:
+ wait_namespace: wait
+
+ - name: ensure namespace exists
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ wait_namespace }}"
+
+ - name: add a simple pod
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "{{ k8s_pod_name }}"
+ namespace: "{{ wait_namespace }}"
+ spec: "{{ k8s_pod_spec }}"
+ wait: yes
+ vars:
+ k8s_pod_name: wait-pod
+ k8s_pod_image: alpine:3.8
+ k8s_pod_command:
+ - sleep
+ - "10000"
+ register: wait_pod
+ ignore_errors: yes
+
+ - name: assert that pod creation succeeded
+ assert:
+ that:
+ - wait_pod is successful
+
+ - name: add a daemonset
+ k8s:
+ definition:
+ apiVersion: extensions/v1beta1
+ kind: DaemonSet
+ metadata:
+ name: wait-daemonset
+ namespace: "{{ wait_namespace }}"
+ spec:
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ wait_sleep: 3
+ wait_timeout: 180
+ vars:
+ k8s_pod_name: wait-ds
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
+ register: ds
+
+ - name: check that daemonset wait worked
+ assert:
+ that:
+ - ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
+
+ - name: update a daemonset in check_mode
+ k8s:
+ definition:
+ apiVersion: extensions/v1beta1
+ kind: DaemonSet
+ metadata:
+ name: wait-daemonset
+ namespace: "{{ wait_namespace }}"
+ spec:
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ updateStrategy:
+ type: RollingUpdate
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ wait_sleep: 3
+ wait_timeout: 180
+ vars:
+ k8s_pod_name: wait-ds
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
+ register: update_ds_check_mode
+
+ - name: check that check_mode returned changed
+ assert:
+ that:
+ - update_ds_check_mode is changed
+
+ - name: update a daemonset
+ k8s:
+ definition:
+ apiVersion: extensions/v1beta1
+ kind: DaemonSet
+ metadata:
+ name: wait-daemonset
+ namespace: "{{ wait_namespace }}"
+ spec:
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ updateStrategy:
+ type: RollingUpdate
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ wait_sleep: 3
+ wait_timeout: 180
+ vars:
+ k8s_pod_name: wait-ds
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:3
+ register: ds
+
+ - name: get updated pods
+ k8s_info:
+ api_version: v1
+ kind: Pod
+ namespace: "{{ wait_namespace }}"
+ label_selectors:
+ - app=wait-ds
+ register: updated_ds_pods
+
+ - name: check that daemonset wait worked
+ assert:
+ that:
+ - ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
+ - updated_ds_pods.resources[0].spec.containers[0].image.endswith(":3")
+
+ - name: add a crashing pod
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "{{ k8s_pod_name }}"
+ namespace: "{{ wait_namespace }}"
+ spec: "{{ k8s_pod_spec }}"
+ wait: yes
+ wait_sleep: 1
+ wait_timeout: 30
+ vars:
+ k8s_pod_name: wait-crash-pod
+ k8s_pod_image: alpine:3.8
+ k8s_pod_command:
+ - /bin/false
+ register: crash_pod
+ ignore_errors: yes
+
+ - name: check that task failed
+ assert:
+ that:
+ - crash_pod is failed
+
+ - name: use a non-existent image
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ name: "{{ k8s_pod_name }}"
+ namespace: "{{ wait_namespace }}"
+ spec: "{{ k8s_pod_spec }}"
+ wait: yes
+ wait_sleep: 1
+ wait_timeout: 30
+ vars:
+ k8s_pod_name: wait-no-image-pod
+ k8s_pod_image: i_made_this_up:and_this_too
+ register: no_image_pod
+ ignore_errors: yes
+
+ - name: check that task failed
+ assert:
+ that:
+ - no_image_pod is failed
+
+ - name: add a deployment
+ k8s:
+ definition:
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: wait-deploy
+ namespace: "{{ wait_namespace }}"
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ vars:
+ k8s_pod_name: wait-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+
+ register: deploy
+
+ - name: check that deployment wait worked
+ assert:
+ that:
+ - deploy.result.status.availableReplicas == deploy.result.status.replicas
+
+ - name: update a deployment
+ k8s:
+ definition:
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: wait-deploy
+ namespace: "{{ wait_namespace }}"
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ vars:
+ k8s_pod_name: wait-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
+ k8s_pod_ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ register: update_deploy
+
+ - name: get updated pods
+ k8s_info:
+ api_version: v1
+ kind: Pod
+ namespace: "{{ wait_namespace }}"
+ label_selectors:
+ - app=wait-deploy
+ register: updated_deploy_pods
+
+ - name: check that deployment wait worked
+ assert:
+ that:
+ - deploy.result.status.availableReplicas == deploy.result.status.replicas
+ - updated_deploy_pods.resources[0].spec.containers[0].image.endswith(":2")
+
+ - name: pause a deployment
+ k8s:
+ definition:
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: wait-deploy
+ namespace: "{{ wait_namespace }}"
+ spec:
+ paused: True
+ apply: no
+ wait: yes
+ wait_condition:
+ type: Progressing
+ status: Unknown
+ reason: DeploymentPaused
+ register: pause_deploy
+
+ - name: check that paused deployment wait worked
+ assert:
+ that:
+ - condition.reason == "DeploymentPaused"
+ - condition.status == "Unknown"
+ vars:
+ condition: '{{ pause_deploy.result.status.conditions | json_query("[?type==`Progressing`]") | first }}'
+
+ - name: add a service based on the deployment
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: wait-svc
+ namespace: "{{ wait_namespace }}"
+ spec:
+ selector:
+ app: "{{ k8s_pod_name }}"
+ ports:
+ - port: 8080
+ targetPort: 8080
+ protocol: TCP
+ wait: yes
+ vars:
+ k8s_pod_name: wait-deploy
+ register: service
+
+ - name: assert that waiting for service works
+ assert:
+ that:
+ - service is successful
+
+ - name: add a crashing deployment
+ k8s:
+ definition:
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: wait-crash-deploy
+ namespace: "{{ wait_namespace }}"
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ vars:
+ k8s_pod_name: wait-crash-deploy
+ k8s_pod_image: alpine:3.8
+ k8s_pod_command:
+ - /bin/false
+ register: wait_crash_deploy
+ ignore_errors: yes
+
+ - name: check that task failed
+ assert:
+ that:
+ - wait_crash_deploy is failed
+
+ - name: remove Pod with very short timeout
+ k8s:
+ api_version: v1
+ kind: Pod
+ name: wait-pod
+ namespace: "{{ wait_namespace }}"
+ state: absent
+ wait: yes
+ wait_sleep: 2
+ wait_timeout: 5
+ ignore_errors: yes
+ register: short_wait_remove_pod
+
+ - name: check that task failed
+ assert:
+ that:
+ - short_wait_remove_pod is failed
+
+ always:
+ - name: remove namespace
+ k8s:
+ kind: Namespace
+ name: "{{ wait_namespace }}"
+ state: absent
+
+ when: (nodes.resources | length) > 0
diff --git a/test/integration/targets/incidental_nios_prepare_tests/aliases b/test/integration/targets/incidental_nios_prepare_tests/aliases
new file mode 100644
index 0000000000..136c05e0d0
--- /dev/null
+++ b/test/integration/targets/incidental_nios_prepare_tests/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_nios_prepare_tests/tasks/main.yml b/test/integration/targets/incidental_nios_prepare_tests/tasks/main.yml
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/integration/targets/incidental_nios_prepare_tests/tasks/main.yml
diff --git a/test/integration/targets/incidental_nios_txt_record/aliases b/test/integration/targets/incidental_nios_txt_record/aliases
new file mode 100644
index 0000000000..dfb77b8152
--- /dev/null
+++ b/test/integration/targets/incidental_nios_txt_record/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/incidental
+cloud/nios
+destructive
diff --git a/test/integration/targets/incidental_nios_txt_record/defaults/main.yaml b/test/integration/targets/incidental_nios_txt_record/defaults/main.yaml
new file mode 100644
index 0000000000..ebf6ffc903
--- /dev/null
+++ b/test/integration/targets/incidental_nios_txt_record/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: [] \ No newline at end of file
diff --git a/test/integration/targets/incidental_nios_txt_record/meta/main.yaml b/test/integration/targets/incidental_nios_txt_record/meta/main.yaml
new file mode 100644
index 0000000000..c7c538f4e7
--- /dev/null
+++ b/test/integration/targets/incidental_nios_txt_record/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_nios_prepare_tests
diff --git a/test/integration/targets/incidental_nios_txt_record/tasks/main.yml b/test/integration/targets/incidental_nios_txt_record/tasks/main.yml
new file mode 100644
index 0000000000..e15b4c55db
--- /dev/null
+++ b/test/integration/targets/incidental_nios_txt_record/tasks/main.yml
@@ -0,0 +1 @@
+- include: nios_txt_record_idempotence.yml
diff --git a/test/integration/targets/incidental_nios_txt_record/tasks/nios_txt_record_idempotence.yml b/test/integration/targets/incidental_nios_txt_record/tasks/nios_txt_record_idempotence.yml
new file mode 100644
index 0000000000..3b7357afaf
--- /dev/null
+++ b/test/integration/targets/incidental_nios_txt_record/tasks/nios_txt_record_idempotence.yml
@@ -0,0 +1,80 @@
+- name: cleanup the parent object
+ nios_zone:
+ name: ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create the parent object
+ nios_zone:
+ name: ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+
+- name: cleanup txt record
+ nios_txt_record:
+ name: txt.ansible.com
+ text: mytext
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create txt record
+ nios_txt_record:
+ name: txt.ansible.com
+ text: mytext
+ state: present
+ provider: "{{ nios_provider }}"
+ register: txt_create1
+
+- name: create txt record
+ nios_txt_record:
+ name: txt.ansible.com
+ text: mytext
+ state: present
+ provider: "{{ nios_provider }}"
+ register: txt_create2
+
+- assert:
+ that:
+ - "txt_create1.changed"
+ - "not txt_create2.changed"
+
+- name: add a comment to an existing txt record
+ nios_txt_record:
+ name: txt.ansible.com
+ text: mytext
+ state: present
+ comment: mycomment
+ provider: "{{ nios_provider }}"
+ register: txt_update1
+
+- name: add a comment to an existing txt record
+ nios_txt_record:
+ name: txt.ansible.com
+ text: mytext
+ state: present
+ comment: mycomment
+ provider: "{{ nios_provider }}"
+ register: txt_update2
+
+- name: remove a txt record from the system
+ nios_txt_record:
+ name: txt.ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: txt_delete1
+
+- name: remove a txt record from the system
+ nios_txt_record:
+ name: txt.ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: txt_delete2
+
+- assert:
+ that:
+ - "txt_create1.changed"
+ - "not txt_create2.changed"
+ - "txt_update1.changed"
+ - "not txt_update2.changed"
+ - "txt_delete1.changed"
+ - "not txt_delete2.changed"
diff --git a/test/integration/targets/incidental_script_inventory_vmware_inventory/aliases b/test/integration/targets/incidental_script_inventory_vmware_inventory/aliases
new file mode 100644
index 0000000000..420e0cddd5
--- /dev/null
+++ b/test/integration/targets/incidental_script_inventory_vmware_inventory/aliases
@@ -0,0 +1,3 @@
+shippable/vcenter/incidental
+cloud/vcenter
+destructive
diff --git a/test/integration/targets/incidental_script_inventory_vmware_inventory/runme.sh b/test/integration/targets/incidental_script_inventory_vmware_inventory/runme.sh
new file mode 100755
index 0000000000..d0d6cd540b
--- /dev/null
+++ b/test/integration/targets/incidental_script_inventory_vmware_inventory/runme.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+contrib_dir=$(pwd)
+
+echo "DEBUG: using ${contrib_dir}"
+
+export ANSIBLE_CONFIG=ansible.cfg
+export VMWARE_SERVER="${VCENTER_HOSTNAME}"
+export VMWARE_USERNAME="${VCENTER_USERNAME}"
+export VMWARE_PASSWORD="${VCENTER_PASSWORD}"
+
+VMWARE_CONFIG=${contrib_dir}/vmware_inventory.ini
+
+
+trap cleanup INT TERM EXIT
+
+# Remove default inventory config file
+if [ -f "${VMWARE_CONFIG}" ];
+then
+ echo "DEBUG: Creating backup of ${VMWARE_CONFIG}"
+ cp "${VMWARE_CONFIG}" "${VMWARE_CONFIG}.bk"
+fi
+
+cat > "${VMWARE_CONFIG}" <<VMWARE_INI
+[vmware]
+server=${VMWARE_SERVER}
+username=${VMWARE_USERNAME}
+password=${VMWARE_PASSWORD}
+validate_certs=False
+VMWARE_INI
+
+function cleanup {
+ # Revert back to previous one
+ if [ -f "${VMWARE_CONFIG}.bk" ]; then
+ echo "DEBUG: Cleanup ${VMWARE_CONFIG}"
+ mv "${VMWARE_CONFIG}.bk" "${VMWARE_CONFIG}"
+ fi
+}
+
+echo "DEBUG: Using ${VCENTER_HOSTNAME} with username ${VCENTER_USERNAME} and password ${VCENTER_PASSWORD}"
+
+echo "Kill all previous instances"
+curl "http://${VCENTER_HOSTNAME}:5000/killall" > /dev/null 2>&1
+
+echo "Start new VCSIM server"
+curl "http://${VCENTER_HOSTNAME}:5000/spawn?datacenter=1&cluster=1&folder=0" > /dev/null 2>&1
+
+echo "Debugging new instances"
+curl "http://${VCENTER_HOSTNAME}:5000/govc_find"
+
+# Get inventory
+ansible-playbook -i ./vmware_inventory.sh "./test_vmware_inventory.yml" --connection=local "$@"
+
+echo "DEBUG: Done"
diff --git a/test/integration/targets/incidental_script_inventory_vmware_inventory/test_vmware_inventory.yml b/test/integration/targets/incidental_script_inventory_vmware_inventory/test_vmware_inventory.yml
new file mode 100644
index 0000000000..035d1d16ec
--- /dev/null
+++ b/test/integration/targets/incidental_script_inventory_vmware_inventory/test_vmware_inventory.yml
@@ -0,0 +1,18 @@
+# Test code for the vmware guest contrib inventory
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+---
+- name: Test VMware guest contrib inventroy script
+ hosts: localhost
+ tasks:
+ - name: store the vcenter container ip
+ set_fact:
+ vcsim: "{{ lookup('env', 'VCENTER_HOSTNAME') }}"
+
+ - name: Check that groups present in inventory
+ assert:
+ that:
+ - "'{{item}}' in groups"
+ with_items:
+ - all
diff --git a/test/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.ini b/test/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.ini
new file mode 100644
index 0000000000..f94570f891
--- /dev/null
+++ b/test/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.ini
@@ -0,0 +1,127 @@
+# Ansible VMware external inventory script settings
+
+[vmware]
+
+# The resolvable hostname or ip address of the vsphere
+server=vcenter
+
+# The port for the vsphere API
+#port=443
+
+# The username with access to the vsphere API. This setting
+# may also be defined via the VMWARE_USERNAME environment variable.
+username=administrator@vsphere.local
+
+# The password for the vsphere API. This setting
+# may also be defined via the VMWARE_PASSWORD environment variable.
+password=vmware
+
+# Verify the server's SSL certificate
+#validate_certs = True
+
+# Specify the number of seconds to use the inventory cache before it is
+# considered stale. If not defined, defaults to 0 seconds.
+#cache_max_age = 3600
+
+
+# Specify the directory used for storing the inventory cache. If not defined,
+# caching will be disabled.
+#cache_path = ~/.cache/ansible
+
+
+# Max object level refers to the level of recursion the script will delve into
+# the objects returned from pyvomi to find serializable facts. The default
+# level of 0 is sufficient for most tasks and will be the most performant.
+# Beware that the recursion can exceed python's limit (causing traceback),
+# cause sluggish script performance and return huge blobs of facts.
+# If you do not know what you are doing, leave this set to 1.
+#max_object_level=1
+
+
+# Lower the keynames for facts to make addressing them easier.
+#lower_var_keys=True
+
+
+# Don't retrieve and process some VMware attribute keys
+# Default values permit to sanitize inventory meta and to improve a little bit
+# performance by removing non-common group attributes.
+#skip_keys = declaredalarmstate,disabledmethod,dynamicproperty,dynamictype,environmentbrowser,managedby,parent,childtype,resourceconfig
+
+
+# Host alias for objects in the inventory. VMware allows duplicate VM names
+# so they can not be considered unique. Use this setting to alter the alias
+# returned for the hosts. Any atributes for the guest can be used to build
+# this alias. The default combines the config name and the config uuid and
+# expects that the ansible_host will be set by the host_pattern.
+#alias_pattern={{ config.name + '_' + config.uuid }}
+
+
+# Host pattern is the value set for ansible_host and ansible_ssh_host, which
+# needs to be a hostname or ipaddress the ansible controlhost can reach.
+#host_pattern={{ guest.ipaddress }}
+
+
+# Host filters are a comma separated list of jinja patterns to remove
+# non-matching hosts from the final result.
+# EXAMPLES:
+# host_filters={{ config.guestid == 'rhel7_64Guest' }}
+# host_filters={{ config.cpuhotremoveenabled != False }},{{ runtime.maxmemoryusage >= 512 }}
+# host_filters={{ config.cpuhotremoveenabled != False }},{{ runtime.maxmemoryusage >= 512 }}
+# host_filters={{ runtime.powerstate == "poweredOn" }}
+# host_filters={{ guest.gueststate == "notRunning" }}
+# The default value is powerstate of virtual machine equal to "poweredOn". (Changed in version 2.5)
+# Runtime state does not require to have vmware tools installed as compared to "guest.gueststate"
+#host_filters={{ runtime.powerstate == "poweredOn" }}
+
+
+
+# Groupby patterns enable the user to create groups via any possible jinja
+# expression. The resulting value will the groupname and the host will be added
+# to that group. Be careful to not make expressions that simply return True/False
+# because those values will become the literal group name. The patterns can be
+# comma delimited to create as many groups as necessary
+#groupby_patterns={{ guest.guestid }},{{ 'templates' if config.template else 'guests'}}
+
+# Group by custom fields will use VMware custom fields to generate hostgroups
+# based on {{ custom_field_group_prefix }} + field_name + _ + field_value
+# Set groupby_custom_field to True will enable this feature
+# If custom field value is comma separated, multiple groups are created.
+# Warning: This required max_object_level to be set to 2 or greater.
+#groupby_custom_field = False
+
+# You can customize prefix used by custom field hostgroups generation here.
+# vmware_tag_ prefix is the default and consistent with ec2_tag_
+#custom_field_group_prefix = vmware_tag_
+
+# You can blacklist custom fields so that they are not included in the
+# groupby_custom_field option. This is useful when you have custom fields that
+# have values that are unique to individual hosts. Timestamps for example.
+# The groupby_custom_field_excludes option should be a comma separated list of custom
+# field keys to be blacklisted.
+#groupby_custom_field_excludes=<custom_field_1>,<custom_field_2>,<custom_field_3>
+
+# The script attempts to recurse into virtualmachine objects and serialize
+# all available data. The serialization is comprehensive but slow. If the
+# vcenter environment is large and the desired properties are known, create
+# a 'properties' section in this config and make an arbitrary list of
+# key=value settings where the value is a path to a specific property. If
+# If this feature is enabled, be sure to fetch every property that is used
+# in the jinja expressions defined above. For performance tuning, reduce
+# the number of properties to the smallest amount possible and limit the
+# use of properties that are not direct attributes of vim.VirtualMachine
+#[properties]
+#prop01=name
+#prop02=config.cpuHotAddEnabled
+#prop03=config.cpuHotRemoveEnabled
+#prop04=config.instanceUuid
+#prop05=config.hardware.numCPU
+#prop06=config.template
+#prop07=config.name
+#prop08=guest.hostName
+#prop09=guest.ipAddress
+#prop10=guest.guestId
+#prop11=guest.guestState
+#prop12=runtime.maxMemoryUsage
+# In order to populate `customValue` (virtual machine's custom attributes) inside hostvars,
+# uncomment following property. Please see - https://github.com/ansible/ansible/issues/41395
+#prop13=customValue
diff --git a/test/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.py b/test/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.py
new file mode 100755
index 0000000000..0271110c96
--- /dev/null
+++ b/test/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.py
@@ -0,0 +1,793 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C): 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Requirements
+# - pyvmomi >= 6.0.0.2016.4
+
+# TODO:
+# * more jq examples
+# * optional folder hierarchy
+
+"""
+$ jq '._meta.hostvars[].config' data.json | head
+{
+ "alternateguestname": "",
+ "instanceuuid": "5035a5cd-b8e8-d717-e133-2d383eb0d675",
+ "memoryhotaddenabled": false,
+ "guestfullname": "Red Hat Enterprise Linux 7 (64-bit)",
+ "changeversion": "2016-05-16T18:43:14.977925Z",
+ "uuid": "4235fc97-5ddb-7a17-193b-9a3ac97dc7b4",
+ "cpuhotremoveenabled": false,
+ "vpmcenabled": false,
+ "firmware": "bios",
+"""
+
+from __future__ import print_function
+
+import atexit
+import datetime
+import itertools
+import json
+import os
+import re
+import ssl
+import sys
+import uuid
+from time import time
+
+from jinja2 import Environment
+
+from ansible.module_utils.six import integer_types, PY3
+from ansible.module_utils.six.moves import configparser
+
+try:
+ import argparse
+except ImportError:
+ sys.exit('Error: This inventory script required "argparse" python module. Please install it or upgrade to python-2.7')
+
+try:
+ from pyVmomi import vim, vmodl
+ from pyVim.connect import SmartConnect, Disconnect
+except ImportError:
+ sys.exit("ERROR: This inventory script required 'pyVmomi' Python module, it was not able to load it")
+
+
+def regex_match(s, pattern):
+ '''Custom filter for regex matching'''
+ reg = re.compile(pattern)
+ if reg.match(s):
+ return True
+ else:
+ return False
+
+
+def select_chain_match(inlist, key, pattern):
+ '''Get a key from a list of dicts, squash values to a single list, then filter'''
+ outlist = [x[key] for x in inlist]
+ outlist = list(itertools.chain(*outlist))
+ outlist = [x for x in outlist if regex_match(x, pattern)]
+ return outlist
+
+
+class VMwareMissingHostException(Exception):
+ pass
+
+
+class VMWareInventory(object):
+ __name__ = 'VMWareInventory'
+
+ guest_props = False
+ instances = []
+ debug = False
+ load_dumpfile = None
+ write_dumpfile = None
+ maxlevel = 1
+ lowerkeys = True
+ config = None
+ cache_max_age = None
+ cache_path_cache = None
+ cache_path_index = None
+ cache_dir = None
+ server = None
+ port = None
+ username = None
+ password = None
+ validate_certs = True
+ host_filters = []
+ skip_keys = []
+ groupby_patterns = []
+ groupby_custom_field_excludes = []
+
+ safe_types = [bool, str, float, None] + list(integer_types)
+ iter_types = [dict, list]
+
+ bad_types = ['Array', 'disabledMethod', 'declaredAlarmState']
+
+ vimTableMaxDepth = {
+ "vim.HostSystem": 2,
+ "vim.VirtualMachine": 2,
+ }
+
+ custom_fields = {}
+
+ # use jinja environments to allow for custom filters
+ env = Environment()
+ env.filters['regex_match'] = regex_match
+ env.filters['select_chain_match'] = select_chain_match
+
+ # translation table for attributes to fetch for known vim types
+
+ vimTable = {
+ vim.Datastore: ['_moId', 'name'],
+ vim.ResourcePool: ['_moId', 'name'],
+ vim.HostSystem: ['_moId', 'name'],
+ }
+
+ @staticmethod
+ def _empty_inventory():
+ return {"_meta": {"hostvars": {}}}
+
+ def __init__(self, load=True):
+ self.inventory = VMWareInventory._empty_inventory()
+
+ if load:
+ # Read settings and parse CLI arguments
+ self.parse_cli_args()
+ self.read_settings()
+
+ # Check the cache
+ cache_valid = self.is_cache_valid()
+
+ # Handle Cache
+ if self.args.refresh_cache or not cache_valid:
+ self.do_api_calls_update_cache()
+ else:
+ self.debugl('loading inventory from cache')
+ self.inventory = self.get_inventory_from_cache()
+
+ def debugl(self, text):
+ if self.args.debug:
+ try:
+ text = str(text)
+ except UnicodeEncodeError:
+ text = text.encode('utf-8')
+ print('%s %s' % (datetime.datetime.now(), text))
+
+ def show(self):
+ # Data to print
+ self.debugl('dumping results')
+ data_to_print = None
+ if self.args.host:
+ data_to_print = self.get_host_info(self.args.host)
+ elif self.args.list:
+ # Display list of instances for inventory
+ data_to_print = self.inventory
+ return json.dumps(data_to_print, indent=2)
+
+ def is_cache_valid(self):
+ ''' Determines if the cache files have expired, or if it is still valid '''
+
+ valid = False
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + self.cache_max_age) > current_time:
+ valid = True
+
+ return valid
+
+ def do_api_calls_update_cache(self):
+ ''' Get instances and cache the data '''
+ self.inventory = self.instances_to_inventory(self.get_instances())
+ self.write_to_cache(self.inventory)
+
+ def write_to_cache(self, data):
+ ''' Dump inventory to json file '''
+ with open(self.cache_path_cache, 'w') as f:
+ f.write(json.dumps(data, indent=2))
+
+ def get_inventory_from_cache(self):
+ ''' Read in jsonified inventory '''
+
+ jdata = None
+ with open(self.cache_path_cache, 'r') as f:
+ jdata = f.read()
+ return json.loads(jdata)
+
+ def read_settings(self):
+ ''' Reads the settings from the vmware_inventory.ini file '''
+
+ scriptbasename = __file__
+ scriptbasename = os.path.basename(scriptbasename)
+ scriptbasename = scriptbasename.replace('.py', '')
+
+ defaults = {'vmware': {
+ 'server': '',
+ 'port': 443,
+ 'username': '',
+ 'password': '',
+ 'validate_certs': True,
+ 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename),
+ 'cache_name': 'ansible-vmware',
+ 'cache_path': '~/.ansible/tmp',
+ 'cache_max_age': 3600,
+ 'max_object_level': 1,
+ 'skip_keys': 'declaredalarmstate,'
+ 'disabledmethod,'
+ 'dynamicproperty,'
+ 'dynamictype,'
+ 'environmentbrowser,'
+ 'managedby,'
+ 'parent,'
+ 'childtype,'
+ 'resourceconfig',
+ 'alias_pattern': '{{ config.name + "_" + config.uuid }}',
+ 'host_pattern': '{{ guest.ipaddress }}',
+ 'host_filters': '{{ runtime.powerstate == "poweredOn" }}',
+ 'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
+ 'lower_var_keys': True,
+ 'custom_field_group_prefix': 'vmware_tag_',
+ 'groupby_custom_field_excludes': '',
+ 'groupby_custom_field': False}
+ }
+
+ if PY3:
+ config = configparser.ConfigParser()
+ else:
+ config = configparser.SafeConfigParser()
+
+ # where is the config?
+ vmware_ini_path = os.environ.get('VMWARE_INI_PATH', defaults['vmware']['ini_path'])
+ vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path))
+ config.read(vmware_ini_path)
+
+ if 'vmware' not in config.sections():
+ config.add_section('vmware')
+
+ # apply defaults
+ for k, v in defaults['vmware'].items():
+ if not config.has_option('vmware', k):
+ config.set('vmware', k, str(v))
+
+ # where is the cache?
+ self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
+ if self.cache_dir and not os.path.exists(self.cache_dir):
+ os.makedirs(self.cache_dir)
+
+ # set the cache filename and max age
+ cache_name = config.get('vmware', 'cache_name')
+ self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
+ self.debugl('cache path is %s' % self.cache_path_cache)
+ self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))
+
+ # mark the connection info
+ self.server = os.environ.get('VMWARE_SERVER', config.get('vmware', 'server'))
+ self.debugl('server is %s' % self.server)
+ self.port = int(os.environ.get('VMWARE_PORT', config.get('vmware', 'port')))
+ self.username = os.environ.get('VMWARE_USERNAME', config.get('vmware', 'username'))
+ self.debugl('username is %s' % self.username)
+ self.password = os.environ.get('VMWARE_PASSWORD', config.get('vmware', 'password', raw=True))
+ self.validate_certs = os.environ.get('VMWARE_VALIDATE_CERTS', config.get('vmware', 'validate_certs'))
+ if self.validate_certs in ['no', 'false', 'False', False]:
+ self.validate_certs = False
+
+ self.debugl('cert validation is %s' % self.validate_certs)
+
+ # behavior control
+ self.maxlevel = int(config.get('vmware', 'max_object_level'))
+ self.debugl('max object level is %s' % self.maxlevel)
+ self.lowerkeys = config.get('vmware', 'lower_var_keys')
+ if type(self.lowerkeys) != bool:
+ if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
+ self.lowerkeys = True
+ else:
+ self.lowerkeys = False
+ self.debugl('lower keys is %s' % self.lowerkeys)
+ self.skip_keys = list(config.get('vmware', 'skip_keys').split(','))
+ self.debugl('skip keys is %s' % self.skip_keys)
+ temp_host_filters = list(config.get('vmware', 'host_filters').split('}},'))
+ for host_filter in temp_host_filters:
+ host_filter = host_filter.rstrip()
+ if host_filter != "":
+ if not host_filter.endswith("}}"):
+ host_filter += "}}"
+ self.host_filters.append(host_filter)
+ self.debugl('host filters are %s' % self.host_filters)
+
+ temp_groupby_patterns = list(config.get('vmware', 'groupby_patterns').split('}},'))
+ for groupby_pattern in temp_groupby_patterns:
+ groupby_pattern = groupby_pattern.rstrip()
+ if groupby_pattern != "":
+ if not groupby_pattern.endswith("}}"):
+ groupby_pattern += "}}"
+ self.groupby_patterns.append(groupby_pattern)
+ self.debugl('groupby patterns are %s' % self.groupby_patterns)
+ temp_groupby_custom_field_excludes = config.get('vmware', 'groupby_custom_field_excludes')
+ self.groupby_custom_field_excludes = [x.strip('"') for x in [y.strip("'") for y in temp_groupby_custom_field_excludes.split(",")]]
+ self.debugl('groupby exclude strings are %s' % self.groupby_custom_field_excludes)
+
+ # Special feature to disable the brute force serialization of the
+ # virtual machine objects. The key name for these properties does not
+ # matter because the values are just items for a larger list.
+ if config.has_section('properties'):
+ self.guest_props = []
+ for prop in config.items('properties'):
+ self.guest_props.append(prop[1])
+
+ # save the config
+ self.config = config
+
+ def parse_cli_args(self):
+ ''' Command line argument processing '''
+
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on PyVmomi')
+ parser.add_argument('--debug', action='store_true', default=False,
+ help='show debug info')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List instances (default: True)')
+ parser.add_argument('--host', action='store',
+ help='Get all the variables about a specific instance')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests to VSphere (default: False - use cache files)')
+ parser.add_argument('--max-instances', default=None, type=int,
+ help='maximum number of instances to retrieve')
+ self.args = parser.parse_args()
+
+ def get_instances(self):
+ ''' Get a list of vm instances with pyvmomi '''
+ kwargs = {'host': self.server,
+ 'user': self.username,
+ 'pwd': self.password,
+ 'port': int(self.port)}
+
+ if self.validate_certs and hasattr(ssl, 'SSLContext'):
+ context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ context.verify_mode = ssl.CERT_REQUIRED
+ context.check_hostname = True
+ kwargs['sslContext'] = context
+ elif self.validate_certs and not hasattr(ssl, 'SSLContext'):
+ sys.exit('pyVim does not support changing verification mode with python < 2.7.9. Either update '
+ 'python or use validate_certs=false.')
+ elif not self.validate_certs and hasattr(ssl, 'SSLContext'):
+ context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ context.verify_mode = ssl.CERT_NONE
+ context.check_hostname = False
+ kwargs['sslContext'] = context
+ elif not self.validate_certs and not hasattr(ssl, 'SSLContext'):
+ # Python 2.7.9 < or RHEL/CentOS 7.4 <
+ pass
+
+ return self._get_instances(kwargs)
+
+ def _get_instances(self, inkwargs):
+ ''' Make API calls '''
+ instances = []
+ si = None
+ try:
+ si = SmartConnect(**inkwargs)
+ except ssl.SSLError as connection_error:
+ if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and self.validate_certs:
+ sys.exit("Unable to connect to ESXi server due to %s, "
+ "please specify validate_certs=False and try again" % connection_error)
+
+ except Exception as exc:
+ self.debugl("Unable to connect to ESXi server due to %s" % exc)
+ sys.exit("Unable to connect to ESXi server due to %s" % exc)
+
+ self.debugl('retrieving all instances')
+ if not si:
+ sys.exit("Could not connect to the specified host using specified "
+ "username and password")
+ atexit.register(Disconnect, si)
+ content = si.RetrieveContent()
+
+ # Create a search container for virtualmachines
+ self.debugl('creating containerview for virtualmachines')
+ container = content.rootFolder
+ viewType = [vim.VirtualMachine]
+ recursive = True
+ containerView = content.viewManager.CreateContainerView(container, viewType, recursive)
+ children = containerView.view
+ for child in children:
+ # If requested, limit the total number of instances
+ if self.args.max_instances:
+ if len(instances) >= self.args.max_instances:
+ break
+ instances.append(child)
+ self.debugl("%s total instances in container view" % len(instances))
+
+ if self.args.host:
+ instances = [x for x in instances if x.name == self.args.host]
+
+ instance_tuples = []
+ for instance in instances:
+ if self.guest_props:
+ ifacts = self.facts_from_proplist(instance)
+ else:
+ ifacts = self.facts_from_vobj(instance)
+ instance_tuples.append((instance, ifacts))
+ self.debugl('facts collected for all instances')
+
+ try:
+ cfm = content.customFieldsManager
+ if cfm is not None and cfm.field:
+ for f in cfm.field:
+ if not f.managedObjectType or f.managedObjectType == vim.VirtualMachine:
+ self.custom_fields[f.key] = f.name
+ self.debugl('%d custom fields collected' % len(self.custom_fields))
+ except vmodl.RuntimeFault as exc:
+ self.debugl("Unable to gather custom fields due to %s" % exc.msg)
+ except IndexError as exc:
+ self.debugl("Unable to gather custom fields due to %s" % exc)
+
+ return instance_tuples
+
+ def instances_to_inventory(self, instances):
+ ''' Convert a list of vm objects into a json compliant inventory '''
+ self.debugl('re-indexing instances based on ini settings')
+ inventory = VMWareInventory._empty_inventory()
+ inventory['all'] = {}
+ inventory['all']['hosts'] = []
+ for idx, instance in enumerate(instances):
+ # make a unique id for this object to avoid vmware's
+ # numerous uuid's which aren't all unique.
+ thisid = str(uuid.uuid4())
+ idata = instance[1]
+
+ # Put it in the inventory
+ inventory['all']['hosts'].append(thisid)
+ inventory['_meta']['hostvars'][thisid] = idata.copy()
+ inventory['_meta']['hostvars'][thisid]['ansible_uuid'] = thisid
+
+ # Make a map of the uuid to the alias the user wants
+ name_mapping = self.create_template_mapping(
+ inventory,
+ self.config.get('vmware', 'alias_pattern')
+ )
+
+ # Make a map of the uuid to the ssh hostname the user wants
+ host_mapping = self.create_template_mapping(
+ inventory,
+ self.config.get('vmware', 'host_pattern')
+ )
+
+ # Reset the inventory keys
+ for k, v in name_mapping.items():
+
+ if not host_mapping or k not in host_mapping:
+ continue
+
+ # set ansible_host (2.x)
+ try:
+ inventory['_meta']['hostvars'][k]['ansible_host'] = host_mapping[k]
+ # 1.9.x backwards compliance
+ inventory['_meta']['hostvars'][k]['ansible_ssh_host'] = host_mapping[k]
+ except Exception:
+ continue
+
+ if k == v:
+ continue
+
+ # add new key
+ inventory['all']['hosts'].append(v)
+ inventory['_meta']['hostvars'][v] = inventory['_meta']['hostvars'][k]
+
+ # cleanup old key
+ inventory['all']['hosts'].remove(k)
+ inventory['_meta']['hostvars'].pop(k, None)
+
+ self.debugl('pre-filtered hosts:')
+ for i in inventory['all']['hosts']:
+ self.debugl(' * %s' % i)
+ # Apply host filters
+ for hf in self.host_filters:
+ if not hf:
+ continue
+ self.debugl('filter: %s' % hf)
+ filter_map = self.create_template_mapping(inventory, hf, dtype='boolean')
+ for k, v in filter_map.items():
+ if not v:
+ # delete this host
+ inventory['all']['hosts'].remove(k)
+ inventory['_meta']['hostvars'].pop(k, None)
+
+ self.debugl('post-filter hosts:')
+ for i in inventory['all']['hosts']:
+ self.debugl(' * %s' % i)
+
+ # Create groups
+ for gbp in self.groupby_patterns:
+ groupby_map = self.create_template_mapping(inventory, gbp)
+ for k, v in groupby_map.items():
+ if v not in inventory:
+ inventory[v] = {}
+ inventory[v]['hosts'] = []
+ if k not in inventory[v]['hosts']:
+ inventory[v]['hosts'].append(k)
+
+ if self.config.get('vmware', 'groupby_custom_field'):
+ for k, v in inventory['_meta']['hostvars'].items():
+ if 'customvalue' in v:
+ for tv in v['customvalue']:
+ newkey = None
+ field_name = self.custom_fields[tv['key']] if tv['key'] in self.custom_fields else tv['key']
+ if field_name in self.groupby_custom_field_excludes:
+ continue
+ values = []
+ keylist = map(lambda x: x.strip(), tv['value'].split(','))
+ for kl in keylist:
+ try:
+ newkey = "%s%s_%s" % (self.config.get('vmware', 'custom_field_group_prefix'), str(field_name), kl)
+ newkey = newkey.strip()
+ except Exception as e:
+ self.debugl(e)
+ values.append(newkey)
+ for tag in values:
+ if not tag:
+ continue
+ if tag not in inventory:
+ inventory[tag] = {}
+ inventory[tag]['hosts'] = []
+ if k not in inventory[tag]['hosts']:
+ inventory[tag]['hosts'].append(k)
+
+ return inventory
+
+ def create_template_mapping(self, inventory, pattern, dtype='string'):
+ ''' Return a hash of uuid to templated string from pattern '''
+ mapping = {}
+ for k, v in inventory['_meta']['hostvars'].items():
+ t = self.env.from_string(pattern)
+ newkey = None
+ try:
+ newkey = t.render(v)
+ newkey = newkey.strip()
+ except Exception as e:
+ self.debugl(e)
+ if not newkey:
+ continue
+ elif dtype == 'integer':
+ newkey = int(newkey)
+ elif dtype == 'boolean':
+ if newkey.lower() == 'false':
+ newkey = False
+ elif newkey.lower() == 'true':
+ newkey = True
+ elif dtype == 'string':
+ pass
+ mapping[k] = newkey
+ return mapping
+
+ def facts_from_proplist(self, vm):
+ '''Get specific properties instead of serializing everything'''
+
+ rdata = {}
+ for prop in self.guest_props:
+ self.debugl('getting %s property for %s' % (prop, vm.name))
+ key = prop
+ if self.lowerkeys:
+ key = key.lower()
+
+ if '.' not in prop:
+ # props without periods are direct attributes of the parent
+ vm_property = getattr(vm, prop)
+ if isinstance(vm_property, vim.CustomFieldsManager.Value.Array):
+ temp_vm_property = []
+ for vm_prop in vm_property:
+ temp_vm_property.append({'key': vm_prop.key,
+ 'value': vm_prop.value})
+ rdata[key] = temp_vm_property
+ else:
+ rdata[key] = vm_property
+ else:
+ # props with periods are subkeys of parent attributes
+ parts = prop.split('.')
+ total = len(parts) - 1
+
+ # pointer to the current object
+ val = None
+ # pointer to the current result key
+ lastref = rdata
+
+ for idx, x in enumerate(parts):
+
+ if isinstance(val, dict):
+ if x in val:
+ val = val.get(x)
+ elif x.lower() in val:
+ val = val.get(x.lower())
+ else:
+ # if the val wasn't set yet, get it from the parent
+ if not val:
+ try:
+ val = getattr(vm, x)
+ except AttributeError as e:
+ self.debugl(e)
+ else:
+ # in a subkey, get the subprop from the previous attrib
+ try:
+ val = getattr(val, x)
+ except AttributeError as e:
+ self.debugl(e)
+
+ # make sure it serializes
+ val = self._process_object_types(val)
+
+ # lowercase keys if requested
+ if self.lowerkeys:
+ x = x.lower()
+
+ # change the pointer or set the final value
+ if idx != total:
+ if x not in lastref:
+ lastref[x] = {}
+ lastref = lastref[x]
+ else:
+ lastref[x] = val
+ if self.args.debug:
+ self.debugl("For %s" % vm.name)
+ for key in list(rdata.keys()):
+ if isinstance(rdata[key], dict):
+ for ikey in list(rdata[key].keys()):
+ self.debugl("Property '%s.%s' has value '%s'" % (key, ikey, rdata[key][ikey]))
+ else:
+ self.debugl("Property '%s' has value '%s'" % (key, rdata[key]))
+ return rdata
+
+ def facts_from_vobj(self, vobj, level=0):
+ ''' Traverse a VM object and return a json compliant data structure '''
+
+ # pyvmomi objects are not yet serializable, but may be one day ...
+ # https://github.com/vmware/pyvmomi/issues/21
+
+ # WARNING:
+ # Accessing an object attribute will trigger a SOAP call to the remote.
+ # Increasing the attributes collected or the depth of recursion greatly
+ # increases runtime duration and potentially memory+network utilization.
+
+ if level == 0:
+ try:
+ self.debugl("get facts for %s" % vobj.name)
+ except Exception as e:
+ self.debugl(e)
+
+ rdata = {}
+
+ methods = dir(vobj)
+ methods = [str(x) for x in methods if not x.startswith('_')]
+ methods = [x for x in methods if x not in self.bad_types]
+ methods = [x for x in methods if not x.lower() in self.skip_keys]
+ methods = sorted(methods)
+
+ for method in methods:
+ # Attempt to get the method, skip on fail
+ try:
+ methodToCall = getattr(vobj, method)
+ except Exception as e:
+ continue
+
+ # Skip callable methods
+ if callable(methodToCall):
+ continue
+
+ if self.lowerkeys:
+ method = method.lower()
+
+ rdata[method] = self._process_object_types(
+ methodToCall,
+ thisvm=vobj,
+ inkey=method,
+ )
+
+ return rdata
+
+ def _process_object_types(self, vobj, thisvm=None, inkey='', level=0):
+ ''' Serialize an object '''
+ rdata = {}
+
+ if type(vobj).__name__ in self.vimTableMaxDepth and level >= self.vimTableMaxDepth[type(vobj).__name__]:
+ return rdata
+
+ if vobj is None:
+ rdata = None
+ elif type(vobj) in self.vimTable:
+ rdata = {}
+ for key in self.vimTable[type(vobj)]:
+ try:
+ rdata[key] = getattr(vobj, key)
+ except Exception as e:
+ self.debugl(e)
+
+ elif issubclass(type(vobj), str) or isinstance(vobj, str):
+ if vobj.isalnum():
+ rdata = vobj
+ else:
+ rdata = vobj.encode('utf-8').decode('utf-8')
+ elif issubclass(type(vobj), bool) or isinstance(vobj, bool):
+ rdata = vobj
+ elif issubclass(type(vobj), integer_types) or isinstance(vobj, integer_types):
+ rdata = vobj
+ elif issubclass(type(vobj), float) or isinstance(vobj, float):
+ rdata = vobj
+ elif issubclass(type(vobj), list) or issubclass(type(vobj), tuple):
+ rdata = []
+ try:
+ vobj = sorted(vobj)
+ except Exception:
+ pass
+
+ for idv, vii in enumerate(vobj):
+ if level + 1 <= self.maxlevel:
+ vid = self._process_object_types(
+ vii,
+ thisvm=thisvm,
+ inkey=inkey + '[' + str(idv) + ']',
+ level=(level + 1)
+ )
+
+ if vid:
+ rdata.append(vid)
+
+ elif issubclass(type(vobj), dict):
+ pass
+
+ elif issubclass(type(vobj), object):
+ methods = dir(vobj)
+ methods = [str(x) for x in methods if not x.startswith('_')]
+ methods = [x for x in methods if x not in self.bad_types]
+ methods = [x for x in methods if not inkey + '.' + x.lower() in self.skip_keys]
+ methods = sorted(methods)
+
+ for method in methods:
+ # Attempt to get the method, skip on fail
+ try:
+ methodToCall = getattr(vobj, method)
+ except Exception as e:
+ continue
+
+ if callable(methodToCall):
+ continue
+
+ if self.lowerkeys:
+ method = method.lower()
+ if level + 1 <= self.maxlevel:
+ try:
+ rdata[method] = self._process_object_types(
+ methodToCall,
+ thisvm=thisvm,
+ inkey=inkey + '.' + method,
+ level=(level + 1)
+ )
+ except vim.fault.NoPermission:
+ self.debugl("Skipping method %s (NoPermission)" % method)
+ else:
+ pass
+
+ return rdata
+
+ def get_host_info(self, host):
+ ''' Return hostvars for a single host '''
+
+ if host in self.inventory['_meta']['hostvars']:
+ return self.inventory['_meta']['hostvars'][host]
+ elif self.args.host and self.inventory['_meta']['hostvars']:
+ match = None
+ for k, v in self.inventory['_meta']['hostvars'].items():
+ if self.inventory['_meta']['hostvars'][k]['name'] == self.args.host:
+ match = k
+ break
+ if match:
+ return self.inventory['_meta']['hostvars'][match]
+ else:
+ raise VMwareMissingHostException('%s not found' % host)
+ else:
+ raise VMwareMissingHostException('%s not found' % host)
+
+
+if __name__ == "__main__":
+ # Run the script
+ print(VMWareInventory().show())
diff --git a/test/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.sh b/test/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.sh
new file mode 100755
index 0000000000..b6399f14fb
--- /dev/null
+++ b/test/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+python.py "./vmware_inventory.py" "$@"
diff --git a/test/integration/targets/incidental_setup_ec2/aliases b/test/integration/targets/incidental_setup_ec2/aliases
new file mode 100644
index 0000000000..136c05e0d0
--- /dev/null
+++ b/test/integration/targets/incidental_setup_ec2/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_setup_ec2/defaults/main.yml b/test/integration/targets/incidental_setup_ec2/defaults/main.yml
new file mode 100644
index 0000000000..fb1f88b1ec
--- /dev/null
+++ b/test/integration/targets/incidental_setup_ec2/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+resource_prefix: 'ansible-testing-'
diff --git a/test/integration/targets/incidental_setup_ec2/tasks/common.yml b/test/integration/targets/incidental_setup_ec2/tasks/common.yml
new file mode 100644
index 0000000000..bf23f539a9
--- /dev/null
+++ b/test/integration/targets/incidental_setup_ec2/tasks/common.yml
@@ -0,0 +1,119 @@
+---
+
+# ============================================================
+- name: test with no parameters
+ action: "{{module_name}}"
+ register: result
+ ignore_errors: true
+
+- name: assert failure when called with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "missing required arguments: name"'
+
+# ============================================================
+- name: test with only name
+ action: "{{module_name}} name={{ec2_key_name}}"
+ register: result
+ ignore_errors: true
+
+- name: assert failure when called with only 'name'
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg == "Either region or ec2_url must be specified"'
+
+# ============================================================
+- name: test invalid region parameter
+ action: "{{module_name}} name='{{ec2_key_name}}' region='asdf querty 1234'"
+ register: result
+ ignore_errors: true
+
+- name: assert invalid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("value of region must be one of:")'
+
+# ============================================================
+- name: test valid region parameter
+ action: "{{module_name}} name='{{ec2_key_name}}' region='{{ec2_region}}'"
+ register: result
+ ignore_errors: true
+
+- name: assert valid region parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test environment variable EC2_REGION
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_REGION: '{{ec2_region}}'
+ register: result
+ ignore_errors: true
+
+- name: assert environment variable EC2_REGION
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test invalid ec2_url parameter
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_URL: bogus.example.com
+ register: result
+ ignore_errors: true
+
+- name: assert invalid ec2_url parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test valid ec2_url parameter
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_URL: '{{ec2_url}}'
+ register: result
+ ignore_errors: true
+
+- name: assert valid ec2_url parameter
+ assert:
+ that:
+ - 'result.failed'
+ - 'result.msg.startswith("No handler was ready to authenticate.")'
+
+# ============================================================
+- name: test credentials from environment
+ action: "{{module_name}} name='{{ec2_key_name}}'"
+ environment:
+ EC2_REGION: '{{ec2_region}}'
+ EC2_ACCESS_KEY: bogus_access_key
+ EC2_SECRET_KEY: bogus_secret_key
+ register: result
+ ignore_errors: true
+
+- name: assert ec2_key with valid ec2_url
+ assert:
+ that:
+ - 'result.failed'
+ - '"EC2ResponseError: 401 Unauthorized" in result.msg'
+
+# ============================================================
+- name: test credential parameters
+ action: "{{module_name}} name='{{ec2_key_name}}' ec2_region='{{ec2_region}}' ec2_access_key=bogus_access_key ec2_secret_key=bogus_secret_key"
+ register: result
+ ignore_errors: true
+
+- name: assert credential parameters
+ assert:
+ that:
+ - 'result.failed'
+ - '"EC2ResponseError: 401 Unauthorized" in result.msg'
diff --git a/test/integration/targets/incidental_setup_ec2/vars/main.yml b/test/integration/targets/incidental_setup_ec2/vars/main.yml
new file mode 100644
index 0000000000..3d7209ef1b
--- /dev/null
+++ b/test/integration/targets/incidental_setup_ec2/vars/main.yml
@@ -0,0 +1,3 @@
+---
+ec2_url: ec2.amazonaws.com
+ec2_region: us-east-1
diff --git a/test/integration/targets/incidental_sts_assume_role/aliases b/test/integration/targets/incidental_sts_assume_role/aliases
new file mode 100644
index 0000000000..29f60feb44
--- /dev/null
+++ b/test/integration/targets/incidental_sts_assume_role/aliases
@@ -0,0 +1,2 @@
+cloud/aws
+shippable/aws/incidental
diff --git a/test/integration/targets/incidental_sts_assume_role/meta/main.yml b/test/integration/targets/incidental_sts_assume_role/meta/main.yml
new file mode 100644
index 0000000000..aa8ab19226
--- /dev/null
+++ b/test/integration/targets/incidental_sts_assume_role/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - incidental_setup_ec2
diff --git a/test/integration/targets/incidental_sts_assume_role/tasks/main.yml b/test/integration/targets/incidental_sts_assume_role/tasks/main.yml
new file mode 100644
index 0000000000..345454932f
--- /dev/null
+++ b/test/integration/targets/incidental_sts_assume_role/tasks/main.yml
@@ -0,0 +1,384 @@
+---
+# tasks file for sts_assume_role
+
+- block:
+
+ # ============================================================
+ # TODO create simple ansible sts_get_caller_identity module
+ - blockinfile:
+ path: "{{ output_dir }}/sts.py"
+ create: yes
+ block: |
+ #!/usr/bin/env python
+ import boto3
+ sts = boto3.client('sts')
+ response = sts.get_caller_identity()
+ print(response['Account'])
+
+ - name: get the aws account id
+ command: "{{ ansible_python.executable }} '{{ output_dir }}/sts.py'"
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
+ AWS_SESSION_TOKEN: "{{ security_token }}"
+ register: result
+
+ - name: register account id
+ set_fact:
+ aws_account: "{{ result.stdout | replace('\n', '') }}"
+
+ # ============================================================
+ - name: create test iam role
+ iam_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ name: "ansible-test-sts-{{ resource_prefix }}"
+ assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
+ create_instance_profile: False
+ managed_policy:
+ - arn:aws:iam::aws:policy/IAMReadOnlyAccess
+ state: present
+ register: test_role
+
+ # ============================================================
+ - name: pause to ensure role exists before using
+ pause:
+ seconds: 30
+
+ # ============================================================
+ - name: test with no parameters
+ sts_assume_role:
+ register: result
+ ignore_errors: true
+
+ - name: assert with no parameters
+ assert:
+ that:
+ - 'result.failed'
+ - "'missing required arguments:' in result.msg"
+
+ # ============================================================
+ - name: test with empty parameters
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn:
+ role_session_name:
+ policy:
+ duration_seconds:
+ external_id:
+ mfa_token:
+ mfa_serial_number:
+ register: result
+ ignore_errors: true
+
+ - name: assert with empty parameters
+ assert:
+ that:
+ - 'result.failed'
+ - "'Missing required parameter in input:' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert with empty parameters
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must have length greater than or equal to 20' in result.module_stderr"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test with only 'role_arn' parameter
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ register: result
+ ignore_errors: true
+
+ - name: assert with only 'role_arn' parameter
+ assert:
+ that:
+ - 'result.failed'
+ - "'missing required arguments: role_session_name' in result.msg"
+
+ # ============================================================
+ - name: test with only 'role_session_name' parameter
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ role_session_name: "AnsibleTest"
+ register: result
+ ignore_errors: true
+
+ - name: assert with only 'role_session_name' parameter
+ assert:
+ that:
+ - 'result.failed'
+ - "'missing required arguments: role_arn' in result.msg"
+
+ # ============================================================
+ - name: test assume role with invalid policy
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ role_session_name: "AnsibleTest"
+ policy: "invalid policy"
+ register: result
+ ignore_errors: true
+
+ - name: assert assume role with invalid policy
+ assert:
+ that:
+ - 'result.failed'
+ - "'The policy is not in the valid JSON format.' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert assume role with invalid policy
+ assert:
+ that:
+ - 'result.failed'
+ - "'The policy is not in the valid JSON format.' in result.module_stderr"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test assume role with invalid duration seconds
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ role_session_name: AnsibleTest
+ duration_seconds: invalid duration
+ register: result
+ ignore_errors: true
+
+ - name: assert assume role with invalid duration seconds
+ assert:
+ that:
+ - result is failed
+ - 'result.msg is search("argument \w+ is of type <.*> and we were unable to convert to int: <.*> cannot be converted to an int")'
+
+ # ============================================================
+ - name: test assume role with invalid external id
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ role_session_name: AnsibleTest
+ external_id: invalid external id
+ register: result
+ ignore_errors: true
+
+ - name: assert assume role with invalid external id
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must satisfy regular expression pattern:' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert assume role with invalid external id
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must satisfy regular expression pattern:' in result.module_stderr"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test assume role with invalid mfa serial number
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ role_session_name: AnsibleTest
+ mfa_serial_number: invalid serial number
+ register: result
+ ignore_errors: true
+
+ - name: assert assume role with invalid mfa serial number
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must satisfy regular expression pattern:' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert assume role with invalid mfa serial number
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must satisfy regular expression pattern:' in result.module_stderr"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test assume role with invalid mfa token code
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ role_session_name: AnsibleTest
+ mfa_token: invalid token code
+ register: result
+ ignore_errors: true
+
+ - name: assert assume role with invalid mfa token code
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must satisfy regular expression pattern:' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert assume role with invalid mfa token code
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must satisfy regular expression pattern:' in result.module_stderr"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test assume role with invalid role_arn
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn: invalid role arn
+ role_session_name: AnsibleTest
+ register: result
+ ignore_errors: true
+
+ - name: assert assume role with invalid role_arn
+ assert:
+ that:
+ - result.failed
+ - "'Invalid length for parameter RoleArn' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert assume role with invalid role_arn
+ assert:
+ that:
+ - 'result.failed'
+ - "'Member must have length greater than or equal to 20' in result.module_stderr"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test assume not existing sts role
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region}}"
+ role_arn: "arn:aws:iam::123456789:role/non-existing-role"
+ role_session_name: "AnsibleTest"
+ register: result
+ ignore_errors: true
+
+ - name: assert assume not existing sts role
+ assert:
+ that:
+ - 'result.failed'
+ - "'is not authorized to perform: sts:AssumeRole' in result.msg"
+ when: result.module_stderr is not defined
+
+ - name: assert assume not existing sts role
+ assert:
+ that:
+ - 'result.failed'
+ - "'is not authorized to perform: sts:AssumeRole' in result.msg"
+ when: result.module_stderr is defined
+
+ # ============================================================
+ - name: test assume role
+ sts_assume_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ region: "{{ aws_region }}"
+ role_arn: "{{ test_role.iam_role.arn }}"
+ role_session_name: AnsibleTest
+ register: assumed_role
+
+ - name: assert assume role
+ assert:
+ that:
+ - 'not assumed_role.failed'
+ - "'sts_creds' in assumed_role"
+ - "'access_key' in assumed_role.sts_creds"
+ - "'secret_key' in assumed_role.sts_creds"
+ - "'session_token' in assumed_role.sts_creds"
+
+ # ============================================================
+ - name: test that assumed credentials have IAM read-only access
+ iam_role:
+ aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
+ aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
+ security_token: "{{ assumed_role.sts_creds.session_token }}"
+ region: "{{ aws_region}}"
+ name: "ansible-test-sts-{{ resource_prefix }}"
+ assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
+ create_instance_profile: False
+ state: present
+ register: result
+
+ - name: assert assumed role with privileged action (expect changed=false)
+ assert:
+ that:
+ - 'not result.failed'
+ - 'not result.changed'
+ - "'iam_role' in result"
+
+ # ============================================================
+ - name: test assumed role with unprivileged action
+ iam_role:
+ aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
+ aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
+ security_token: "{{ assumed_role.sts_creds.session_token }}"
+ region: "{{ aws_region}}"
+ name: "ansible-test-sts-{{ resource_prefix }}-new"
+ assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
+ state: present
+ register: result
+ ignore_errors: true
+
+ - name: assert assumed role with unprivileged action (expect changed=false)
+ assert:
+ that:
+ - 'result.failed'
+ - "'is not authorized to perform: iam:CreateRole' in result.msg"
+ # runs on Python2
+ when: result.module_stderr is not defined
+
+ - name: assert assumed role with unprivileged action (expect changed=false)
+ assert:
+ that:
+ - 'result.failed'
+ - "'is not authorized to perform: iam:CreateRole' in result.module_stderr"
+ # runs on Python3
+ when: result.module_stderr is defined
+
+ # ============================================================
+ always:
+
+ - name: delete test iam role
+ iam_role:
+ aws_access_key: "{{ aws_access_key }}"
+ aws_secret_key: "{{ aws_secret_key }}"
+ security_token: "{{ security_token }}"
+ name: "ansible-test-sts-{{ resource_prefix }}"
+ assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
+ managed_policy:
+ - arn:aws:iam::aws:policy/IAMReadOnlyAccess
+ state: absent
diff --git a/test/integration/targets/incidental_sts_assume_role/templates/policy.json.j2 b/test/integration/targets/incidental_sts_assume_role/templates/policy.json.j2
new file mode 100644
index 0000000000..559562fd91
--- /dev/null
+++ b/test/integration/targets/incidental_sts_assume_role/templates/policy.json.j2
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::{{ aws_account }}:root"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+} \ No newline at end of file
diff --git a/test/integration/targets/incidental_tower_credential_type/aliases b/test/integration/targets/incidental_tower_credential_type/aliases
new file mode 100644
index 0000000000..7e198b4a51
--- /dev/null
+++ b/test/integration/targets/incidental_tower_credential_type/aliases
@@ -0,0 +1,2 @@
+cloud/tower
+shippable/tower/incidental
diff --git a/test/integration/targets/incidental_tower_credential_type/tasks/main.yml b/test/integration/targets/incidental_tower_credential_type/tasks/main.yml
new file mode 100644
index 0000000000..9d7cc74e73
--- /dev/null
+++ b/test/integration/targets/incidental_tower_credential_type/tasks/main.yml
@@ -0,0 +1,23 @@
+---
+- name: Add Tower credential type
+ tower_credential_type:
+ description: Credential type for Test
+ name: test-credential-type
+ kind: cloud
+ inputs: '{"fields": [{"type": "string", "id": "username", "label": "Username"}, {"secret": True, "type": "string", "id": "password", "label": "Password"}], "required": ["username", "password"]}'
+ injectors: '{"extra_vars": {"test": "foo"}}'
+ register: result
+
+- assert:
+ that:
+ - "result is changed"
+
+- name: Remove a Tower credential type
+ tower_credential_type:
+ name: test-credential-type
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - "result is changed"
diff --git a/test/integration/targets/incidental_tower_receive/aliases b/test/integration/targets/incidental_tower_receive/aliases
new file mode 100644
index 0000000000..7e198b4a51
--- /dev/null
+++ b/test/integration/targets/incidental_tower_receive/aliases
@@ -0,0 +1,2 @@
+cloud/tower
+shippable/tower/incidental
diff --git a/test/integration/targets/incidental_tower_receive/tasks/main.yml b/test/integration/targets/incidental_tower_receive/tasks/main.yml
new file mode 100644
index 0000000000..9c22e6f7a7
--- /dev/null
+++ b/test/integration/targets/incidental_tower_receive/tasks/main.yml
@@ -0,0 +1,17 @@
+- name: Export all Tower assets
+ tower_receive:
+ all: True
+ register: result
+
+- assert:
+ that:
+ - "result is successful"
+
+- name: Extract names from output
+ set_fact:
+ object_names: "{{ result.assets | map(attribute='name') | list }}"
+
+- assert:
+ that:
+ - "result is successful"
+ - "'Default' in object_names"
diff --git a/test/integration/targets/incidental_vmware_guest/aliases b/test/integration/targets/incidental_vmware_guest/aliases
new file mode 100644
index 0000000000..d9ee32bcc6
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/aliases
@@ -0,0 +1,3 @@
+shippable/vcenter/incidental
+cloud/vcenter
+needs/target/incidental_vmware_prepare_tests
diff --git a/test/integration/targets/incidental_vmware_guest/defaults/main.yml b/test/integration/targets/incidental_vmware_guest/defaults/main.yml
new file mode 100644
index 0000000000..dfb0fd65dc
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/defaults/main.yml
@@ -0,0 +1,33 @@
+vmware_guest_test_playbooks:
+ - boot_firmware_d1_c1_f0.yml
+ - boot_firmware_d1_c1_f0.yml
+ - cdrom_d1_c1_f0.yml
+ - check_mode.yml
+ - clone_customize_guest_test.yml
+ - clone_d1_c1_f0.yml
+ - clone_resize_disks.yml
+ - clone_with_convert.yml
+ - create_d1_c1_f0.yml
+ - create_guest_invalid_d1_c1_f0.yml
+ - create_nw_d1_c1_f0.yml
+ - create_rp_d1_c1_f0.yml
+ - delete_vm.yml
+ - disk_mode_d1_c1_f0.yml
+ - disk_size_d1_c1_f0.yml
+ - disk_type_d1_c1_f0.yml
+ - linked_clone_d1_c1_f0.yml
+ - mac_address_d1_c1_f0.yml
+ - max_connections.yml
+ - mem_reservation.yml
+ - network_negative_test.yml
+ - network_with_device.yml
+# Currently, VCSIM doesn't support DVPG (as portkeys are not available) so commenting this test
+# - network_with_dvpg.yml
+# - network_with_portgroup.yml
+ - non_existent_vm_ops.yml
+ - poweroff_d1_c1_f0.yml
+ - poweroff_d1_c1_f1.yml
+# - template_d1_c1_f0.yml
+ - vapp_d1_c1_f0.yml
+ - reconfig_vm_to_latest_version.yml
+ - remove_vm_from_inventory.yml
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/boot_firmware_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/boot_firmware_d1_c1_f0.yml
new file mode 100644
index 0000000000..aade1494e3
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/boot_firmware_d1_c1_f0.yml
@@ -0,0 +1,117 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: create new VMs with boot_firmware as 'bios'
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ boot_firmware: "bios"
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: clone_d1_c1_f0
+
+- debug: var=clone_d1_c1_f0
+
+- name: assert that changes were made
+ assert:
+ that:
+ - clone_d1_c1_f0 is changed
+
+# VCSIM does not recognizes existing VMs boot firmware
+- when: vcsim is not defined
+ block:
+ - name: create new VMs again with boot_firmware as 'bios'
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm2
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ boot_firmware: "bios"
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: clone_d1_c1_f0
+ - debug: var=clone_d1_c1_f0
+ - name: assert that changes were not made
+ assert:
+ that:
+ - clone_d1_c1_f0 is changed
+
+- name: create new VMs with boot_firmware as 'efi'
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm3
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ boot_firmware: "efi"
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: clone_d1_c1_f0
+
+- debug: var=clone_d1_c1_f0
+
+- name: assert that changes were made
+ assert:
+ that:
+ - clone_d1_c1_f0 is changed
+
+# VCSIM does not recognizes existing VMs boot firmware
+- when: vcsim is not defined
+ block:
+ - name: create new VMs again with boot_firmware as 'efi'
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm3
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ boot_firmware: "efi"
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: clone_d1_c1_f0
+ - debug: var=clone_d1_c1_f0
+ - name: assert that changes were not made
+ assert:
+ that:
+ - not (clone_d1_c1_f0 is changed)
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/cdrom_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/cdrom_d1_c1_f0.yml
new file mode 100644
index 0000000000..467cf25d1a
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/cdrom_d1_c1_f0.yml
@@ -0,0 +1,269 @@
+- name: Create VM with CDROM
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: vm
+ name: test_vm1
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ resource_pool: Resources
+ guest_id: centos64Guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ scsi: paravirtual
+ disk:
+ - size_mb: 128
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ cdrom:
+ type: iso
+ iso_path: "[{{ ro_datastore }}] centos.iso"
+ register: cdrom_vm
+
+- debug: var=cdrom_vm
+
+- name: assert the VM was created
+ assert:
+ that:
+ - "cdrom_vm.changed == true"
+
+- name: Update CDROM to iso for the new VM
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "vm"
+ name: test_vm1
+ datastore: "{{ rw_datastore }}"
+ datacenter: "{{ dc1 }}"
+ cdrom:
+ type: iso
+ iso_path: "[{{ ro_datastore }}] fedora.iso"
+ state: present
+ register: cdrom_vm
+
+- debug: var=cdrom_vm
+
+- name: assert the VM was changed
+ assert:
+ that:
+ - "cdrom_vm.changed == true"
+
+- name: Update CDROM to client for the new VM
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: vm
+ name: test_vm1
+ datacenter: "{{ dc1 }}"
+ cdrom:
+ type: client
+ state: present
+ register: cdrom_vm
+
+- debug: var=cdrom_vm
+
+- name: assert the VM was changed
+ assert:
+ that:
+ - "cdrom_vm.changed == true"
+
+- name: clone vm
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm2
+ template: test_vm1
+ datacenter: "{{ dc1 }}"
+ state: poweredoff
+ folder: vm
+ convert: thin
+
+- name: Update CDROM to none for the new VM
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: vm
+ name: test_vm2
+ datacenter: "{{ dc1 }}"
+ cdrom:
+ type: none
+ state: present
+ register: cdrom_vm
+
+- debug: var=cdrom_vm
+
+- name: assert the VM was changed
+ assert:
+ that:
+ - "cdrom_vm.changed == true"
+
+- name: Create VM with multiple disks and a CDROM - GitHub issue 38679
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "{{ f0 }}"
+ name: test_vm3
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ resource_pool: Resources
+ guest_id: centos64Guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ scsi: paravirtual
+ disk:
+ - size_mb: 128
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ - size_mb: 128
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ - size_mb: 128
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ cdrom:
+ type: iso
+ iso_path: "[{{ ro_datastore }}] fedora.iso"
+ register: cdrom_vm
+
+- debug: var=cdrom_vm
+
+- name: assert the VM was created
+ assert:
+ that:
+ - "cdrom_vm.changed == true"
+
+- name: Create VM with multiple CDROMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: vm
+ name: test_vm1
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ resource_pool: Resources
+ guest_id: centos64Guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ scsi: paravirtual
+ disk:
+ - size_mb: 128
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ cdrom:
+ - controller_type: ide
+ controller_number: 0
+ unit_number: 0
+ type: iso
+ iso_path: "[{{ ro_datastore }}] centos.iso"
+ - controller_type: ide
+ controller_number: 0
+ unit_number: 1
+ type: client
+ - controller_number: 1
+ unit_number: 0
+ type: none
+ - controller_number: 1
+ unit_number: 1
+ type: client
+ register: cdrom_vm
+
+- debug: var=cdrom_vm
+
+- name: assert the VM was created
+ assert:
+ that:
+ - "cdrom_vm.changed == true"
+
+- name: Remove the last 2 CDROMs and update the first 2 for the new VM
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: vm
+ name: test_vm1
+ datacenter: "{{ dc1 }}"
+ cdrom:
+ - controller_type: ide
+ controller_number: 0
+ unit_number: 0
+ type: client
+ - controller_type: ide
+ controller_number: 0
+ unit_number: 1
+ type: iso
+ iso_path: "[{{ ro_datastore }}] fedora.iso"
+ - controller_type: ide
+ controller_number: 1
+ unit_number: 0
+ state: absent
+ - controller_type: ide
+ controller_number: 1
+ unit_number: 1
+ state: absent
+ state: present
+ register: cdrom_vm
+
+- debug: var=cdrom_vm
+
+- name: assert the VM was changed
+ assert:
+ that:
+ - "cdrom_vm.changed == true"
+
+# VCSIM fails with invalidspec exception but real vCenter PASS testcase
+# Commenting this testcase till the time
+- when: vcsim is not defined
+ block:
+ - name: Again create VM with multiple disks and a CDROM - GitHub issue 38679
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "{{ f0 }}"
+ name: test_vm3
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ resource_pool: Resources
+ guest_id: centos64Guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ scsi: paravirtual
+ disk:
+ - size_mb: 128
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ - size_mb: 128
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ - size_mb: 128
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ cdrom:
+ type: iso
+ iso_path: "[{{ ro_datastore }}] base.iso"
+ register: cdrom_vm
+ - debug: var=cdrom_vm
+ - name: assert the VM was created
+ assert:
+ that:
+ - cdrom_vm is changed
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/check_mode.yml b/test/integration/targets/incidental_vmware_guest/tasks/check_mode.yml
new file mode 100644
index 0000000000..d3f6f22634
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/check_mode.yml
@@ -0,0 +1,60 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: Perform all operation in check mode
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "{{ virtual_machines[0].name }}"
+ datacenter: "{{ dc1 }}"
+ state: "{{ item }}"
+ with_items:
+ - absent
+ - present
+ - poweredoff
+ - poweredon
+ - restarted
+ - suspended
+ - shutdownguest
+ - rebootguest
+ register: check_mode_state
+ check_mode: yes
+
+- debug:
+ var: check_mode_state
+
+- name: assert that changes were made
+ assert:
+ that:
+ - "check_mode_state.results|map(attribute='changed')|unique|list == [true]"
+ - "check_mode_state.results|map(attribute='vm_name')|unique|list == [ virtual_machines[0].name ]"
+
+- name: Perform all operation on non-existent VM in check mode
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: non_existent_vm
+ datacenter: "{{ dc1 }}"
+ state: "{{ item }}"
+ with_items:
+ - present
+ - poweredoff
+ - poweredon
+ - restarted
+ - suspended
+ register: check_mode_state
+ check_mode: yes
+
+- debug:
+ var: check_mode_state
+
+- name: assert that changes were made
+ assert:
+ that:
+ - "check_mode_state.results|map(attribute='changed')|unique|list == [true]"
+ - "check_mode_state.results|map(attribute='desired_operation')|unique|list == ['deploy_vm']"
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/clone_customize_guest_test.yml b/test/integration/targets/incidental_vmware_guest/tasks/clone_customize_guest_test.yml
new file mode 100644
index 0000000000..f40848298c
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/clone_customize_guest_test.yml
@@ -0,0 +1,47 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2019, Pavan Bidkar <pbidkar@vmware.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: clone vm from template and customize GOS
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ template: "{{ virtual_machines[0].name }}"
+ datacenter: "{{ dc1 }}"
+ state: poweredoff
+ folder: "{{ virtual_machines[0].folder }}"
+ convert: thin
+ register: clone_customize
+
+- debug:
+ var: clone_customize
+
+- name: assert that changes were made
+ assert:
+ that:
+ - clone_customize is changed
+
+- name: clone vm from template and customize GOS again
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ template: "{{ virtual_machines[0].name }}"
+ datacenter: "{{ dc1 }}"
+ state: poweredoff
+ folder: "{{ virtual_machines[0].folder }}"
+ convert: thin
+ register: clone_customize_again
+
+- debug:
+ var: clone_customize_again
+
+- name: assert that changes were not made
+ assert:
+ that:
+ - not (clone_customize_again is changed)
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/clone_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/clone_d1_c1_f0.yml
new file mode 100644
index 0000000000..3b3d2ad543
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/clone_d1_c1_f0.yml
@@ -0,0 +1,101 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2017, James Tanner <tanner.jc@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: create new linked clone without specifying snapshot_src
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ template: "{{ virtual_machines[0].name }}"
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ folder: "{{ f0 }}"
+ linked_clone: True
+ register: linked_clone_d1_c1_f0
+ ignore_errors: True
+
+- debug:
+ var: linked_clone_d1_c1_f0
+
+- name: assert that changes were not made
+ assert:
+ that:
+ - not (linked_clone_d1_c1_f0 is changed)
+
+- name: create new linked clone without specifying linked_clone
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm2
+ template: "{{ virtual_machines[0].name }}"
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ folder: "{{ f0 }}"
+ snapshot_src: "snap_shot1"
+ register: linked_clone_d1_c1_f0
+ ignore_errors: True
+
+- debug:
+ var: linked_clone_d1_c1_f0
+
+- name: assert that changes were not made
+ assert:
+ that:
+ - not (linked_clone_d1_c1_f0 is changed)
+
+# TODO: VCSIM: snapshot is not supported in current vcsim
+#
+#- name: create new linked clone with linked_clone and snapshot_src
+# vmware_guest:
+# validate_certs: False
+# hostname: "{{ vcenter_hostname }}"
+# username: "{{ vcenter_username }}"
+# password: "{{ vcenter_password }}"
+# name: "{{ 'new_vm_' + item|basename }}"
+# template: "{{ item|basename }}"
+# guest_id: centos64Guest
+# datacenter: "{{ (item|basename).split('_')[0] }}"
+# folder: "{{ item|dirname }}"
+# snapshot_src: "snap_shot1"
+# linked_clone: True
+# with_items: "{{ vmlist['json'] }}"
+# register: linked_clone_d1_c1_f0
+# ignore_errors: True
+
+#- debug: var=linked_clone_d1_c1_f0
+
+#- name: assert that changes were made
+# assert:
+# that:
+# - "linked_clone_d1_c1_f0.results|map(attribute='changed')|unique|list == [true]"
+
+# TODO: VCSIM: snapshot is not supported in current vcsim
+#
+#- name: create new linked clone with linked_clone and snapshot_src again
+# vmware_guest:
+# validate_certs: False
+# hostname: "{{ vcenter_hostname }}"
+# username: "{{ vcenter_username }}"
+# password: "{{ vcenter_password }}"
+# name: "{{ 'new_vm_' + item|basename }}"
+# template: "{{ item|basename }}"
+# guest_id: centos64Guest
+# datacenter: "{{ (item|basename).split('_')[0] }}"
+# folder: "{{ item|dirname }}"
+# snapshot_src: "snap_shot1"
+# linked_clone: True
+# with_items: "{{ vmlist['json'] }}"
+# register: linked_clone_d1_c1_f0
+# ignore_errors: True
+
+#- debug: var=linked_clone_d1_c1_f0
+
+#- name: assert that changes were not made
+# assert:
+# that:
+# - "linked_clone_d1_c1_f0.results|map(attribute='changed')|unique|list == [false]"
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/clone_resize_disks.yml b/test/integration/targets/incidental_vmware_guest/tasks/clone_resize_disks.yml
new file mode 100644
index 0000000000..70004d161b
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/clone_resize_disks.yml
@@ -0,0 +1,77 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2019, Noe Gonzalez <noe.a.gonzalez@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- when: vcsim is not defined
+ block:
+ - name: create new VM
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: clone_resize_disks_original
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ folder: "{{ f0 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ guest_id: centos7_64Guest
+ disk:
+ - size_gb: 1
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+
+ - name: convert to VM template
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: clone_resize_disks_original
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ folder: "{{ f0 }}"
+ is_template: True
+
+ - name: clone template and modify disks
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: clone_resize_disks_clone
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ folder: "{{ f0 }}"
+ disk:
+ - size_gb: 2
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ - size_gb: 3
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ template: clone_resize_disks_original
+ state: poweredoff
+ register: l_clone_template_modify_disks
+
+ - assert:
+ that:
+ - l_clone_template_modify_disks.changed | bool
+
+ - name: delete VM clone & original template
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "{{ item }}"
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ folder: "{{ f0 }}"
+ state: absent
+ with_items:
+ - clone_resize_disks_original
+ - clone_resize_disks_clone
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/clone_with_convert.yml b/test/integration/targets/incidental_vmware_guest/tasks/clone_with_convert.yml
new file mode 100644
index 0000000000..bf3abfae36
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/clone_with_convert.yml
@@ -0,0 +1,66 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, Christophe FERREIRA <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: clone vm from template and convert to thin
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ template: "{{ virtual_machines[0].name }}"
+ datacenter: "{{ dc1 }}"
+ state: poweredoff
+ folder: "{{ virtual_machines[0].folder }}"
+ convert: thin
+ register: clone_thin
+
+- debug: var=clone_thin
+
+- name: assert that changes were made
+ assert:
+ that:
+ - clone_thin is changed
+
+- name: clone vm from template and convert to thick
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm2
+ template: "{{ virtual_machines[0].name }}"
+ datacenter: "{{ dc1 }}"
+ state: poweredoff
+ folder: "{{ virtual_machines[0].folder }}"
+ convert: thick
+ register: clone_thick
+
+- debug: var=clone_thick
+
+- name: assert that changes were made
+ assert:
+ that:
+ - clone_thick is changed
+
+- name: clone vm from template and convert to eagerzeroedthick
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm3
+ template: "{{ virtual_machines[0].name }}"
+ datacenter: "{{ dc1 }}"
+ state: poweredoff
+ folder: "{{ virtual_machines[0].folder }}"
+ convert: eagerzeroedthick
+ register: clone_eagerzeroedthick
+
+- debug: var=clone_eagerzeroedthick
+
+- name: assert that changes were made
+ assert:
+ that:
+ - clone_eagerzeroedthick is changed
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/create_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/create_d1_c1_f0.yml
new file mode 100644
index 0000000000..b4ee85002d
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/create_d1_c1_f0.yml
@@ -0,0 +1,164 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2017, James Tanner <tanner.jc@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: create new VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ num_cpu_cores_per_socket: 1
+ memory_mb: 128
+ hotadd_memory: true
+ hotadd_cpu: false
+ # vcsim does not support these settings, so commenting
+ # till the time.
+ # memory_reservation: 128
+ # memory_reservation_lock: False
+ # nested_virt: True
+ # hotremove_cpu: True
+ # mem_limit: 8096
+ # mem_reservation: 4096
+ # cpu_limit: 8096
+ # cpu_reservation: 4096
+ max_connections: 10
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+ folder: '{{ f0 }}'
+ register: clone_d1_c1_f0
+
+- debug: var=clone_d1_c1_f0
+
+- name: assert that changes were made
+ assert:
+ that:
+ - clone_d1_c1_f0 is changed
+
+- name: create the VM again
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ num_cpu_cores_per_socket: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+ folder: '{{ f0 }}'
+ register: clone_d1_c1_f0_recreate
+
+- debug: var=clone_d1_c1_f0_recreate
+
+- name: assert that no changes were made after re-creating
+ assert:
+ that:
+ - not (clone_d1_c1_f0_recreate is changed)
+
+- name: modify the new VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 2
+ memory_mb: 128
+ state: present
+ folder: '{{ f0 }}'
+ register: clone_d1_c1_f0_modify
+
+- debug: var=clone_d1_c1_f0_modify
+
+- name: assert that changes were made with modification
+ assert:
+ that:
+ - clone_d1_c1_f0_modify is changed
+
+- name: re-modify the new VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 2
+ memory_mb: 128
+ state: present
+ folder: '{{ f0 }}'
+ register: clone_d1_c1_f0_remodify
+
+- debug: var=clone_d1_c1_f0_remodify
+
+- name: assert that no changes were made when re-modified
+ assert:
+ that:
+ - not (clone_d1_c1_f0_remodify is changed)
+
+- name: delete the new VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ #guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ state: absent
+ folder: '{{ f0 }}'
+ register: clone_d1_c1_f0_delete
+
+- debug: var=clone_d1_c1_f0_delete
+
+- name: assert that changes were made with deletion
+ assert:
+ that:
+ - clone_d1_c1_f0_delete is changed
+
+- name: re-delete the new VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ #guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ state: absent
+ folder: '{{ f0 }}'
+ register: clone_d1_c1_f0_redelete
+
+- debug: var=clone_d1_c1_f0_redelete
+
+- name: assert that no changes were made with redeletion
+ assert:
+ that:
+ - not (clone_d1_c1_f0_redelete is changed)
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/create_guest_invalid_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/create_guest_invalid_d1_c1_f0.yml
new file mode 100644
index 0000000000..8a0b6468b6
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/create_guest_invalid_d1_c1_f0.yml
@@ -0,0 +1,32 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- when: vcsim is not defined
+ block:
+ - name: create new virtual machine with invalid guest id
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: invalid_vm
+ guest_id: "invalid_guest_id"
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: present
+ folder: "{{ f0 }}"
+ register: invalid_guest_0001_d1_c1_f0
+ ignore_errors: yes
+ - debug: var=invalid_guest_0001_d1_c1_f0
+ - name: assert that changes were made
+ assert:
+ that:
+ - "not (invalid_guest_0001_d1_c1_f0 is changed)"
+ - "'configSpec.guestId' in invalid_guest_0001_d1_c1_f0['msg']"
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/create_nw_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/create_nw_d1_c1_f0.yml
new file mode 100644
index 0000000000..2a22dce2f6
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/create_nw_d1_c1_f0.yml
@@ -0,0 +1,38 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: create new VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - name: 'VM Network'
+ device_type: vmxnet3
+ ip: 192.168.10.1
+ netmask: 255.255.255.0
+ wake_on_lan: True
+ start_connected: True
+ allow_guest_control: True
+ state: poweredoff
+ folder: F0
+ register: clone_d1_c1_f0
+
+- debug: var=clone_d1_c1_f0
+
+- name: assert that changes were made
+ assert:
+ that:
+ - clone_d1_c1_f0 is changed
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/create_rp_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/create_rp_d1_c1_f0.yml
new file mode 100644
index 0000000000..30319b78d7
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/create_rp_d1_c1_f0.yml
@@ -0,0 +1,205 @@
+# Create one with the defaults
+- name: create new VM with default resource pool
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+ folder: F0
+ register: clone_rp_d1_c1_f0
+
+- debug: var=clone_rp_d1_c1_f0
+
+- name: assert that changes were made
+ assert:
+ that:
+ - clone_rp_d1_c1_f0 is changed
+
+- name: delete the new VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ #guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ state: absent
+ folder: F0
+ register: clone_rp_d1_c1_f0_delete
+
+- debug: var=clone_rp_d1_c1_f0_delete
+
+- name: assert that changes were made with deletion
+ assert:
+ that:
+ - clone_rp_d1_c1_f0_delete is changed
+
+# now create with just a cluster
+- name: create new VM with default resource pool in cluster
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+ folder: F0
+ register: clone_rpc_d1_c1_f0
+
+- debug: var=clone_rpc_d1_c1_f0
+
+- name: assert that changes were made
+ assert:
+ that:
+ - clone_rpc_d1_c1_f0 is changed
+
+- name: delete the new VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ #guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ state: absent
+ folder: F0
+ register: clone_rpc_d1_c1_f0_delete
+
+- debug: var=clone_rpc_d1_c1_f0_delete
+
+- name: assert that changes were made with deletion
+ assert:
+ that:
+ - clone_rpc_d1_c1_f0_delete is changed
+
+# now create with a specific resource pool
+- name: create new VM with specific resource pool in cluster
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ resource_pool: DC0_C0_RP1
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+ folder: F0
+ register: clone_rpcp_d1_c1_f0
+
+- debug: var=clone_rpcp_d1_c1_f0
+
+- name: assert that changes were made
+ assert:
+ that:
+ - clone_rpcp_d1_c1_f0 is changed
+
+- name: delete the new VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ #guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ state: absent
+ folder: F0
+ register: clone_rpcp_d1_c1_f0_delete
+
+- debug: var=clone_rpcp_d1_c1_f0_delete
+
+- name: assert that changes were made with deletion
+ assert:
+ that:
+ - clone_rpcp_d1_c1_f0_delete is changed
+
+# now create with a specific host
+- name: create new VM with specific host
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ esxi_hostname: '{{ esxi1 }}'
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+ folder: F0
+ register: clone_rph_d1_c1_f0
+
+- debug: var=clone_rph_d1_c1_f0
+
+- name: assert that changes were made
+ assert:
+ that:
+ - clone_rph_d1_c1_f0 is changed
+
+- name: delete the new VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ #template: "{{ item|basename }}"
+ #guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ state: absent
+ folder: F0
+ register: clone_rph_d1_c1_f0_delete
+
+- debug: var=clone_rph_d1_c1_f0_delete
+
+- name: assert that changes were made with deletion
+ assert:
+ that:
+ - clone_rph_d1_c1_f0_delete is changed
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/delete_vm.yml b/test/integration/targets/incidental_vmware_guest/tasks/delete_vm.yml
new file mode 100644
index 0000000000..600df0fc50
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/delete_vm.yml
@@ -0,0 +1,22 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: Delete VM
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: nothinghere
+ datacenter: "{{ dc1 }}"
+ state: absent
+ register: delete_vm
+ ignore_errors: yes
+
+- debug: var=delete_vm
+
+- name: assert that changes were made
+ assert:
+ that:
+ - "not delete_vm.changed"
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/disk_mode_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/disk_mode_d1_c1_f0.yml
new file mode 100644
index 0000000000..7a14d11981
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/disk_mode_d1_c1_f0.yml
@@ -0,0 +1,89 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: create new VMs with invalid disk mode
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: eagerzeroedthick
+ datastore: "{{ rw_datastore }}"
+ disk_mode: 'invalid_disk_mode'
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: test_vm1
+ ignore_errors: True
+
+- debug: var=test_vm1
+
+- name: assert that changes were not made
+ assert:
+ that:
+ - not(test_vm1 is changed)
+
+- name: create new VMs with valid disk mode
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: eagerzeroedthick
+ datastore: "{{ rw_datastore }}"
+ disk_mode: 'independent_persistent'
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: test_vm1_2
+
+- debug: var=test_vm1_2
+
+- name: assert that changes were made
+ assert:
+ that:
+ - test_vm1_2 is changed
+
+#TODO: vcsim does not support reconfiguration of disk mode, fails with types.InvalidDeviceSpec
+- when: vcsim is not defined
+ block:
+ - name: create new VMs with valid disk mode again
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: eagerzeroedthick
+ datastore: "{{ rw_datastore }}"
+ disk_mode: 'independent_persistent'
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: test_vm1_2
+ - debug: var=test_vm1_2
+ - name: assert that changes were not made
+ assert:
+ that:
+ - not (test_vm1_2 is changed)
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/disk_size_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/disk_size_d1_c1_f0.yml
new file mode 100644
index 0000000000..c40f19e624
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/disk_size_d1_c1_f0.yml
@@ -0,0 +1,31 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: create new VMs with invalid disk size
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ disk:
+ - size: 0gb
+ type: eagerzeroedthick
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: disk_size_d1_c1_f0
+ ignore_errors: True
+
+- debug: var=disk_size_d1_c1_f0
+
+- name: assert that changes were made
+ assert:
+ that:
+ - not (disk_size_d1_c1_f0 is changed)
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/disk_type_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/disk_type_d1_c1_f0.yml
new file mode 100644
index 0000000000..3bd4089597
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/disk_type_d1_c1_f0.yml
@@ -0,0 +1,33 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: create new VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: eagerzeroedthick
+ datastore: "{{ rw_datastore }}"
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: poweredoff
+ folder: F0
+ register: disk_type_d1_c1_f0
+
+- debug: var=disk_type_d1_c1_f0
+
+- name: assert that changes were made
+ assert:
+ that:
+ - disk_type_d1_c1_f0 is changed
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/linked_clone_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/linked_clone_d1_c1_f0.yml
new file mode 100644
index 0000000000..702ca59da7
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/linked_clone_d1_c1_f0.yml
@@ -0,0 +1,100 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: create new linked clone without specifying snapshot_src
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "{{ 'new_vm_' + item.name }}"
+ template: "{{ item.name }}"
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ folder: "{{ f0 }}"
+ linked_clone: True
+ with_items: "{{ virtual_machines }}"
+ register: linked_clone_d1_c1_f0
+ ignore_errors: True
+
+- debug: var=linked_clone_d1_c1_f0
+
+- name: assert that changes were not made
+ assert:
+ that:
+ - "linked_clone_d1_c1_f0.results|map(attribute='changed')|unique|list == [false]"
+
+- name: create new linked clone without specifying linked_clone
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "{{ 'new_vm_' + item.name }}"
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ folder: "{{ f0 }}"
+ snapshot_src: "snap_shot1"
+ with_items: "{{ virtual_machines }}"
+ register: linked_clone_d1_c1_f0
+ ignore_errors: True
+
+- debug: var=linked_clone_d1_c1_f0
+
+- name: assert that changes were not made
+ assert:
+ that:
+ - "linked_clone_d1_c1_f0.results|map(attribute='changed')|unique|list == [false]"
+
+# TODO: VCSIM: snapshot is not supported in current vcsim
+#
+#- name: create new linked clone with linked_clone and snapshot_src
+# vmware_guest:
+# validate_certs: False
+# hostname: "{{ vcsim }}"
+# username: "{{ vcsim_instance['json']['username'] }}"
+# password: "{{ vcsim_instance['json']['password'] }}"
+# name: "{{ 'new_vm_' + item|basename }}"
+# template: "{{ item|basename }}"
+# guest_id: centos64Guest
+# datacenter: "{{ (item|basename).split('_')[0] }}"
+# folder: "{{ item|dirname }}"
+# snapshot_src: "snap_shot1"
+# linked_clone: True
+# with_items: "{{ vmlist['json'] }}"
+# register: linked_clone_d1_c1_f0
+# ignore_errors: True
+
+#- debug: var=linked_clone_d1_c1_f0
+
+#- name: assert that changes were made
+# assert:
+# that:
+# - "linked_clone_d1_c1_f0.results|map(attribute='changed')|unique|list == [true]"
+
+# TODO: VCSIM: snapshot is not supported in current vcsim
+#
+#- name: create new linked clone with linked_clone and snapshot_src again
+# vmware_guest:
+# validate_certs: False
+# hostname: "{{ vcsim }}"
+# username: "{{ vcsim_instance['json']['username'] }}"
+# password: "{{ vcsim_instance['json']['password'] }}"
+# name: "{{ 'new_vm_' + item|basename }}"
+# template: "{{ item|basename }}"
+# guest_id: centos64Guest
+# datacenter: "{{ (item|basename).split('_')[0] }}"
+# folder: "{{ item|dirname }}"
+# snapshot_src: "snap_shot1"
+# linked_clone: True
+# with_items: "{{ vmlist['json'] }}"
+# register: linked_clone_d1_c1_f0
+# ignore_errors: True
+
+#- debug: var=linked_clone_d1_c1_f0
+
+#- name: assert that changes were not made
+# assert:
+# that:
+# - "linked_clone_d1_c1_f0.results|map(attribute='changed')|unique|list == [false]"
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/mac_address_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/mac_address_d1_c1_f0.yml
new file mode 100644
index 0000000000..6e5a8d9217
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/mac_address_d1_c1_f0.yml
@@ -0,0 +1,37 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: create new VMs with manual MAC address
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - name: VM Network
+ ip: 192.168.10.12
+ netmask: 255.255.255.0
+ gateway: 192.168.10.254
+ mac: aa:bb:cc:dd:aa:42
+ state: poweredoff
+ folder: vm
+ register: clone_d1_c1_f0
+
+- debug: var=clone_d1_c1_f0
+
+- name: assert that changes were made
+ assert:
+ that:
+ - "clone_d1_c1_f0['instance']['hw_eth0']['addresstype'] == 'manual'"
+ - "clone_d1_c1_f0['instance']['hw_eth0']['macaddress'] == 'aa:bb:cc:dd:aa:42'"
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/main.yml b/test/integration/targets/incidental_vmware_guest/tasks/main.yml
new file mode 100644
index 0000000000..ef064dc5c1
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/main.yml
@@ -0,0 +1,19 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2017, James Tanner <tanner.jc@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- import_role:
+ name: incidental_vmware_prepare_tests
+ vars:
+ setup_attach_host: true
+ setup_datacenter: true
+ setup_datastore: true
+ setup_dvswitch: true
+ setup_resource_pool: true
+ setup_virtualmachines: true
+ setup_dvs_portgroup: true
+
+- include_tasks: run_test_playbook.yml
+ with_items: '{{ vmware_guest_test_playbooks }}'
+ loop_control:
+ loop_var: test_playbook
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/max_connections.yml b/test/integration/targets/incidental_vmware_guest/tasks/max_connections.yml
new file mode 100644
index 0000000000..d9ab6b6621
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/max_connections.yml
@@ -0,0 +1,45 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- when: vcsim is not defined
+ block:
+ - &add_mk_conn
+ name: Create new VM with max_connections as 4
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ max_connections: 4
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: present
+ folder: "{{ f0 }}"
+ register: mk_conn_result_0001
+
+ - debug: var=mk_conn_result_0001
+
+ - name: Assert that changes were made
+ assert:
+ that:
+ - mk_conn_result_0001 is changed
+
+ - <<: *add_mk_conn
+ name: Again create new VMs again with max_connections as 4
+ register: mk_conn_result_0002
+
+ - debug: var=mk_conn_result_0002
+
+ - name: Assert that changes were not made
+ assert:
+ that:
+ - not (mk_conn_result_0002 is changed)
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/mem_reservation.yml b/test/integration/targets/incidental_vmware_guest/tasks/mem_reservation.yml
new file mode 100644
index 0000000000..a5a9762f62
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/mem_reservation.yml
@@ -0,0 +1,125 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- when: vcsim is not defined
+ block:
+ - &add_mem_reserve
+ name: Create new VMs with mem_reservation as 0
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ mem_reservation: 0
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: present
+ folder: "{{ virtual_machines[0].folder }}"
+ register: mem_reserve_result_0001
+
+ - debug: var=mem_reserve_result_0001
+
+ - name: Assert that changes were made
+ assert:
+ that:
+ - mem_reserve_result_0001 is changed
+
+ - <<: *add_mem_reserve
+ name: Again create new VMs with mem_reservation as 0
+ register: mem_reserve_result_0002
+
+ - debug: var=mem_reserve_result_0002
+
+ - name: Assert that changes were not made
+ assert:
+ that:
+ - not (mem_reserve_result_0002 is changed)
+
+ - &add_memory_reserve
+ name: Create new VM with memory_reservation as 0
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm2
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ memory_reservation: 0
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: present
+ folder: "{{ virtual_machines[0].folder }}"
+ register: memory_reserve_result_0003
+
+ - debug: var=memory_reserve_result_0003
+
+ - name: Assert that changes were made
+ assert:
+ that:
+ - memory_reserve_result_0003 is changed
+
+ - <<: *add_memory_reserve
+ name: Again create new VMs with memory_reservation as 0
+ register: memory_reserve_result_0004
+
+ - debug: var=memory_reserve_result_0004
+
+ - name: Assert that changes were not made
+ assert:
+ that:
+ - not (memory_reserve_result_0004 is changed)
+
+ - &no_memory_reserve
+ name: Create new VMs without memory_reservation or mem_reservation
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm3
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ memory_reservation: 0
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: present
+ folder: "{{ virtual_machines[0].folder }}"
+ register: no_memory_reserve_result_0005
+
+ - debug: var=no_memory_reserve_result_0005
+
+ - name: Assert that changes were made
+ assert:
+ that:
+ - no_memory_reserve_result_0005 is changed
+
+ - <<: *no_memory_reserve
+ name: Again create new VMs without memory_reservation or mem_reservation
+ register: no_memory_reserve_result_0006
+
+ - debug: var=no_memory_reserve_result_0006
+
+ - name: Assert that changes were not made
+ assert:
+ that:
+ - not (no_memory_reserve_result_0006 is changed)
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/network_negative_test.yml b/test/integration/targets/incidental_vmware_guest/tasks/network_negative_test.yml
new file mode 100644
index 0000000000..768e06b042
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/network_negative_test.yml
@@ -0,0 +1,339 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- debug: var=f0
+
+- name: create new VMs with non-existent network
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: new_vm
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ disk:
+ - size: 3mb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - name: "Non existent VM"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: non_existent_network
+ ignore_errors: yes
+
+- debug: var=non_existent_network
+
+- name: assert that no changes were not made
+ assert:
+ that:
+ - not (non_existent_network is changed)
+ - "'does not exist' in non_existent_network.msg"
+
+- name: create new VMs with network and with only IP
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: new_vm
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ disk:
+ - size: 3mb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - name: "VM Network"
+ type: static
+ ip: 10.10.10.10
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: no_netmask
+ ignore_errors: yes
+
+- debug: var=no_netmask
+
+- name: assert that no changes were not made
+ assert:
+ that:
+ - "not no_netmask.changed"
+ - "\"'netmask' is required if 'ip' is specified under VM network list.\" in no_netmask.msg"
+
+- name: create new VMs with network and with only netmask
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: new_vm
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ disk:
+ - size: 3mb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - name: "VM Network"
+ type: static
+ netmask: 255.255.255.0
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: no_ip
+ ignore_errors: yes
+
+- debug: var=no_ip
+
+- name: assert that changes were not made
+ assert:
+ that:
+ - "not no_ip.changed"
+ - "\"'ip' is required if 'netmask' is specified under VM network list.\" in no_ip.msg"
+
+- name: create new VMs with network and without network name
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: new_vm
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ disk:
+ - size: 3mb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - ip: 10.10.10.10
+ netmask: 255.255.255
+ type: static
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: no_network_name
+ ignore_errors: yes
+
+- debug: var=no_network_name
+
+- name: assert that no changes were not made
+ assert:
+ that:
+ - "not no_network_name.changed"
+ - "\"Please specify at least a network name or a VLAN name under VM network list.\" in no_network_name.msg"
+
+- name: create new VMs with network and without network name
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: new_vm
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ disk:
+ - size: 3mb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - vlan: non_existing_vlan
+ ip: 10.10.10.10
+ netmask: 255.255.255
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: no_network
+ ignore_errors: yes
+
+- debug: var=no_network
+
+- name: assert that changes were not made
+ assert:
+ that:
+ - "not no_network.changed"
+ - "\"VLAN 'non_existing_vlan' does not exist.\" in no_network.msg"
+
+- name: create new VMs with invalid device type
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: new_vm
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ disk:
+ - size: 3mb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - name: "VM Network"
+ ip: 10.10.10.10
+ netmask: 255.255.255
+ device_type: abc
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: invalid_device_type
+ ignore_errors: yes
+
+- debug: var=invalid_device_type
+
+- name: assert that changes were not made
+ assert:
+ that:
+ - "not invalid_device_type.changed"
+ - "\"Device type specified 'abc' is not valid.\" in invalid_device_type.msg"
+
+- name: create new VMs with invalid device MAC address
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: new_vm
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ disk:
+ - size: 3mb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - name: "VM Network"
+ ip: 10.10.10.10
+ netmask: 255.255.255
+ device_type: e1000
+ mac: abcdef
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: invalid_mac
+ ignore_errors: yes
+
+- debug: var=invalid_mac
+
+- name: assert that changes were not made
+ assert:
+ that:
+ - "not invalid_mac.changed"
+ - "\"Device MAC address 'abcdef' is invalid.\" in invalid_mac.msg"
+
+- name: create new VMs with invalid network type
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: new_vm
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ disk:
+ - size: 3mb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - name: "VM Network"
+ ip: 10.10.10.10
+ netmask: 255.255.255
+ device_type: e1000
+ mac: 01:23:45:67:89:ab
+ type: aaaaa
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: invalid_network_type
+ ignore_errors: yes
+
+- debug: var=invalid_network_type
+
+- name: assert that changes were not made
+ assert:
+ that:
+ - "not invalid_network_type.changed"
+ - "\"Network type 'aaaaa' is not a valid parameter.\" in invalid_network_type.msg"
+
+- name: create new VMs with IP, netmask and network type as "DHCP"
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: new_vm
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ disk:
+ - size: 3mb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - name: "VM Network"
+ ip: 10.10.10.10
+ netmask: 255.255.255
+ device_type: e1000
+ mac: 01:23:45:67:89:ab
+ type: dhcp
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: invalid_dhcp_network_type
+ ignore_errors: yes
+
+- debug: var=invalid_dhcp_network_type
+
+- name: assert that changes were not made
+ assert:
+ that:
+ - "not invalid_dhcp_network_type.changed"
+ - "\"Static IP information provided for network\" in invalid_dhcp_network_type.msg"
+
+- name: create new VMs with no network type which set network type as "DHCP"
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ disk:
+ - size: 3mb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - name: "VM Network"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: no_network_type
+ ignore_errors: yes
+
+- debug: var=no_network_type
+
+- name: assert that changes were made
+ assert:
+ that:
+ - "no_network_type.changed"
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/network_with_device.yml b/test/integration/targets/incidental_vmware_guest/tasks/network_with_device.yml
new file mode 100644
index 0000000000..64b591adb7
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/network_with_device.yml
@@ -0,0 +1,60 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Testcase to check #38605
+- name: Deploy VM first VM
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: False
+ datacenter: "{{ dc1 }}"
+ state: poweredon
+ folder: "{{ f0 }}"
+ name: test_vm1
+ disk:
+ - size: 10mb
+ datastore: "{{ rw_datastore }}"
+ guest_id: rhel7_64guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ networks:
+ - name: 'VM Network'
+ device_type: "vmxnet3"
+ register: vm_result
+
+- debug: var=vm_result
+
+- assert:
+ that:
+ - "vm_result.changed"
+
+- name: Deploy VM again
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: False
+ datacenter: "{{ dc1 }}"
+ state: poweredon
+ folder: "{{ f0 }}"
+ name: test_vm1
+ disk:
+ - size: 10mb
+ datastore: "{{ rw_datastore }}"
+ guest_id: rhel7_64guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ networks:
+ - name: 'VM Network'
+ device_type: "vmxnet3"
+ register: vm_result_again
+
+- debug: var=vm_result_again
+
+- assert:
+ that:
+ - not (vm_result_again is changed)
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/network_with_dvpg.yml b/test/integration/targets/incidental_vmware_guest/tasks/network_with_dvpg.yml
new file mode 100644
index 0000000000..fba25aeee5
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/network_with_dvpg.yml
@@ -0,0 +1,152 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Clone from existing VM with DVPG
+- when: vcsim is not defined
+ block:
+ - name: Deploy VM from template
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ datacenter: "{{ dc1 }}"
+ state: poweredon
+ folder: "{{ f0 }}"
+ template: "{{ virtual_machines[0].name }}"
+ name: test_vm1
+ disk:
+ - size: 1gb
+ datastore: "{{ rw_datastore }}"
+ guest_id: rhel7_64guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ networks:
+ - name: '{{ dvpg1 }}'
+ register: no_vm_result
+ - debug: var=no_vm_result
+ - assert:
+ that:
+ - no_vm_result is changed
+
+ # New clone with DVPG
+ - name: Deploy new VM with DVPG
+ vmware_guest:
+ esxi_hostname: "{{ esxi_hosts[0] }}"
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ datacenter: "{{ dc1 }}"
+ state: poweredon
+ folder: "{{ f0 }}"
+ name: test_vm2
+ disk:
+ - size: 1gb
+ datastore: "{{ rw_datastore }}"
+ guest_id: rhel7_64guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ networks:
+ - name: '{{ dvpg1 }}'
+ dvswitch_name: "{{ dvswitch1 }}"
+ register: no_vm_result
+ - debug: var=no_vm_result
+ - assert:
+ that:
+ - no_vm_result is changed
+
+ - name: Deploy same VM again
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ datacenter: "{{ dc1 }}"
+ state: poweredon
+ folder: "{{ f0 }}"
+ name: test_vm2
+ disk:
+ - size: 1gb
+ datastore: "{{ rw_datastore }}"
+ guest_id: rhel7_64guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ networks:
+ - name: '{{ dvpg1 }}'
+ register: no_vm_result
+ - debug: var=no_vm_result
+ - assert:
+ that:
+ - not (no_vm_result is changed)
+
+ - name: Deploy new VM with DVPG with slash in name
+ vmware_guest:
+ esxi_hostname: "{{ esxi_hosts[0] }}"
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ datacenter: "{{ dc1 }}"
+ state: poweredon
+ folder: "{{ f0 }}"
+ name: test_vm3
+ disk:
+ - size: 1gb
+ datastore: "{{ rw_datastore }}"
+ guest_id: rhel7_64guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ networks:
+ - name: '{{ dvpg_with_slash }}'
+ dvswitch_name: "{{ dvswitch1 }}"
+ register: no_vm_result
+ - debug: var=no_vm_result
+ - assert:
+ that:
+ - no_vm_result is changed
+
+ - name: Deploy same VM again
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ datacenter: "{{ dc1 }}"
+ state: poweredon
+ folder: "{{ f0 }}"
+ name: test_vm3
+ disk:
+ - size: 1gb
+ datastore: "{{ rw_datastore }}"
+ guest_id: rhel7_64guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ networks:
+ - name: '{{ dvpg_with_slash }}'
+ register: no_vm_result
+ - debug: var=no_vm_result
+ - assert:
+ that:
+ - not (no_vm_result is changed)
+ always:
+ - when: vcsim is not defined
+ name: Remove VM to free the portgroup
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ name: '{{ item }}'
+ force: yes
+ state: absent
+ with_items:
+ - test_vm1
+ - test_vm2
+ - test_vm3
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/network_with_portgroup.yml b/test/integration/targets/incidental_vmware_guest/tasks/network_with_portgroup.yml
new file mode 100644
index 0000000000..c9eb80186a
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/network_with_portgroup.yml
@@ -0,0 +1,47 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>, Tim Steinbach <tim@nequissimus.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: Add portgroup
+ vmware_dvs_portgroup:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ portgroup_name: "portgroup_network"
+ switch_name: "{{ dvswitch1 }}"
+ vlan_id: "1"
+ num_ports: 2
+ portgroup_type: earlyBinding
+ state: present
+ register: dvsportgroup
+- debug: var=dvsportgroup
+- name: create new VMs with portgroup
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ disk:
+ - size: 3mb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ networks:
+ - name: portgroup_network
+ switch_name: "{{ dvswitch1 }}"
+ hardware:
+ num_cpus: 1
+ memory_mb: 128
+ state: poweredoff
+ folder: "{{ f0 }}"
+ register: vm_with_portgroup
+ ignore_errors: no
+
+- debug: var=vm_with_portgroup
+
+- assert:
+ that:
+ - vm_with_portgroup is changed
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/non_existent_vm_ops.yml b/test/integration/targets/incidental_vmware_guest/tasks/non_existent_vm_ops.yml
new file mode 100644
index 0000000000..0815fdf9c6
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/non_existent_vm_ops.yml
@@ -0,0 +1,23 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: Perform operation on non-existent VM
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "non_existent_vm"
+ datacenter: "{{ dc1 }}"
+ folder: "{{ f0 }}"
+ state: poweredoff
+ register: non_existent_vm_ops
+ ignore_errors: yes
+- debug: var=non_existent_vm_ops
+- name: assert that changes were not made
+ assert:
+ that:
+ - not (non_existent_vm_ops is changed)
+ - "'msg' in non_existent_vm_ops"
+ - "'Unable to find the datastore with given parameters.' in non_existent_vm_ops.msg"
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/poweroff_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/poweroff_d1_c1_f0.yml
new file mode 100644
index 0000000000..6bddf568f7
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/poweroff_d1_c1_f0.yml
@@ -0,0 +1,27 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2017, James Tanner <tanner.jc@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: create a VM with the poweroff status
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "{{ item.name }}"
+ state: poweredoff
+ with_items: "{{ virtual_machines }}"
+ register: poweroff_d1_c1_f0
+
+- debug:
+ var: poweroff_d1_c1_f0
+
+- name: make sure changes were made
+ assert:
+ that:
+ - poweroff_d1_c1_f0.results[0].instance.hw_power_status == "poweredOff"
+
+- name: make sure no changes were made (the VMs are already off)
+ assert:
+ that:
+ - poweroff_d1_c1_f0.results[0].changed|bool == false
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/poweroff_d1_c1_f1.yml b/test/integration/targets/incidental_vmware_guest/tasks/poweroff_d1_c1_f1.yml
new file mode 100644
index 0000000000..cb5ada078f
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/poweroff_d1_c1_f1.yml
@@ -0,0 +1,22 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2017, James Tanner <tanner.jc@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# https://github.com/ansible/ansible/issues/25011
+# Sending "-folders 1" to vcsim nests the datacenter under
+# the folder so that the path prefix is no longer /vm
+#
+# /F0/DC0/vm/F0/DC0_H0_VM0
+
+- name: set state to poweredoff on all VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "{{ item.name }}"
+ state: poweredoff
+ folder: "{{ item.folder }}"
+ with_items: "{{ virtual_machines }}"
+ register: poweroff_d1_c1_f1
+- debug: var=poweroff_d1_c1_f1
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/reconfig_vm_to_latest_version.yml b/test/integration/targets/incidental_vmware_guest/tasks/reconfig_vm_to_latest_version.yml
new file mode 100644
index 0000000000..619090dc0b
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/reconfig_vm_to_latest_version.yml
@@ -0,0 +1,73 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2019, Pavan Bidkar <pbidkar@vmware.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Skipping out idepotency test untill issue fixed in reconfigure_vm() become_method
+
+- name: Create VM with hardware version 12
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos7_64Guest
+ datacenter: "{{ dc1 }}"
+ folder: "{{ f0 }}"
+ datastore: '{{ rw_datastore }}'
+ hardware:
+ num_cpus: 4
+ memory_mb: 1028
+ version: 12
+ state: present
+ register: create_vm_with_version_12
+
+- name: assert that changes were made
+ assert:
+ that:
+ - create_vm_with_version_12 is changed
+
+- name: Deploy New VM with latest hardware version
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm2
+ guest_id: centos7_64Guest
+ datacenter: "{{ dc1 }}"
+ folder: "{{ f0 }}"
+ datastore: '{{ rw_datastore }}'
+ hardware:
+ num_cpus: 4
+ memory_mb: 1028
+ version: latest
+ state: present
+ register: deploy_vm_to_latest
+
+- name: assert that changes were made
+ assert:
+ that:
+ - deploy_vm_to_latest is changed
+
+- name: Upgrade VM to latest version
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos7_64Guest
+ datacenter: "{{ dc1 }}"
+ folder: "{{ f0 }}"
+ datastore: '{{ rw_datastore }}'
+ hardware:
+ num_cpus: 4
+ memory_mb: 1028
+ version: latest
+ state: present
+ register: upgrade_vm
+
+- name: assert that changes were made
+ assert:
+ that:
+ - upgrade_vm is changed
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/remove_vm_from_inventory.yml b/test/integration/targets/incidental_vmware_guest/tasks/remove_vm_from_inventory.yml
new file mode 100644
index 0000000000..74da57d4b0
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/remove_vm_from_inventory.yml
@@ -0,0 +1,61 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2019, Pavan Bidkar <pbidkar@vmware.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: Create VM to unregister
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ guest_id: centos64Guest
+ datacenter: "{{ dc1 }}"
+ folder: F0
+ hardware:
+ num_cpus: 1
+ num_cpu_cores_per_socket: 1
+ memory_mb: 128
+ disk:
+ - size: 1gb
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ state: present
+ register: create_vm_for_test
+
+- name: assert that changes were made
+ assert:
+ that:
+ - create_vm_for_test is changed
+
+- name: Remove VM from Inventory
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ delete_from_inventory: True
+ state: absent
+ register: remove_vm_from_inventory
+
+- name: assert that changes were made
+ assert:
+ that:
+ - remove_vm_from_inventory is changed
+
+- name: Remove VM again from Inventory
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: test_vm1
+ delete_from_inventory: True
+ state: absent
+ register: remove_again_vm_from_inventory
+
+- name: assert that changes were made
+ assert:
+ that:
+ - not (remove_again_vm_from_inventory is changed)
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/run_test_playbook.yml b/test/integration/targets/incidental_vmware_guest/tasks/run_test_playbook.yml
new file mode 100644
index 0000000000..d60dc394e1
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/run_test_playbook.yml
@@ -0,0 +1,17 @@
+- block:
+ - include_tasks: '{{ test_playbook }}'
+ always:
+ - name: Remove VM
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+# cluster: "{{ ccr1 }}"
+ name: '{{ item }}'
+ force: yes
+ state: absent
+ with_items:
+ - test_vm1
+ - test_vm2
+ - test_vm3
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/template_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/template_d1_c1_f0.yml
new file mode 100644
index 0000000000..55e27c9ded
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/template_d1_c1_f0.yml
@@ -0,0 +1,105 @@
+- name: Create VMs with the poweredoff state
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ dc1 }}"
+ validate_certs: no
+ folder: '{{ f0 }}'
+ name: 'test_vm1'
+ state: poweredoff
+ guest_id: debian8_64Guest
+ disk:
+ - size_gb: 1
+ type: thin
+ datastore: '{{ rw_datastore }}'
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ scsi: paravirtual
+ cdrom:
+ type: iso
+ iso_path: "[{{ ro_datastore }}] fedora.iso"
+ networks:
+ - name: VM Network
+
+- name: ensure that VM1 are not flagged as templates
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ dc1 }}"
+ folder: "{{ virtual_machines[1].folder }}"
+ name: "{{ virtual_machines[1].name }}"
+ state: present
+ is_template: False
+ register: no_template_initial
+
+- debug: var=no_template_initial
+
+- name: ensure no changes were made
+ assert:
+ that:
+ - not (no_template_initial is changed)
+
+- name: convert VM1 to template
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ dc1 }}"
+ folder: "{{ virtual_machines[1].folder }}"
+ name: "{{ virtual_machines[1].name }}"
+ state: present
+ is_template: True
+ register: convert_to_template
+
+- debug: var=convert_to_template
+
+- name: ensure that changes were made
+ assert:
+ that:
+ - convert_to_template is changed
+
+- name: make double sure that VM1 is template
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ dc1 }}"
+ folder: "{{ virtual_machines[1].folder }}"
+ name: "{{ virtual_machines[1].name }}"
+ state: present
+ is_template: True
+ register: still_templates
+
+- debug: var=still_templates
+
+- name: ensure that no changes were made
+ assert:
+ that:
+ - not (still_templates is changed)
+
+# To avoid the follow error of vcsim: VirtualMachine:vm-67 does not implement: MarkAsVirtualMachine
+- when: vcsim is not defined
+ block:
+ - name: convert template back to VMs
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ dc1 }}"
+ folder: "{{ virtual_machines[1].folder }}"
+ name: "{{ virtual_machines[1].name }}"
+ state: present
+ is_template: False
+ register: revert_to_vm
+ - debug: var=revert_to_vm
+ - name: ensure that changes were made
+ assert:
+ that:
+ - revert_to_vm is changed
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/vapp_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/vapp_d1_c1_f0.yml
new file mode 100644
index 0000000000..bd83f97db6
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/vapp_d1_c1_f0.yml
@@ -0,0 +1,100 @@
+# Test code for the vmware_guest module.
+# Copyright: (c) 2018, goshkis
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- &vapp_new_vm
+ name: Create test VM with vAPP settings
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "vm"
+ name: test_vm1
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ resource_pool: Resources
+ guest_id: centos64Guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ scsi: paravirtual
+ disk:
+ - size_mb: 128
+ type: thin
+ datastore: "{{ rw_datastore }}"
+ vapp_properties:
+ - id: prop_id1
+ category: category
+ label: prop_label1
+ type: string
+ value: prop_value1
+ - id: prop_id2
+ category: category
+ label: prop_label2
+ type: string
+ value: prop_value2
+ register: vapp_vm
+
+- debug: var=vapp_vm
+
+- name: assert the vApp propeties were created
+ assert:
+ that:
+ - "vapp_vm.failed == false"
+ - "vapp_vm.changed == true"
+
+- when: vcsim is not defined
+ block:
+ - <<: *vapp_new_vm
+ name: Try to create same VM with same vAPP settings
+ register: vapp_vm_no_change
+
+ - debug: var=vapp_vm_no_change
+
+ - name: Assert that vApp properties were not changed
+ assert:
+ that:
+ - "vapp_vm_no_change.failed == false"
+ - "not vapp_vm_no_change.changed"
+
+- &vapp_edit_vm
+ name: Edit one vApp property and removing another
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "vm"
+ name: test_vm1
+ datacenter: "{{ dc1 }}"
+ vapp_properties:
+ - id: prop_id1
+ operation: remove
+ - id: prop_id2
+ value: prop_value3
+ state: present
+ register: vapp_vm
+
+- debug: var=vapp_vm
+
+- name: assert the VM was changed
+ assert:
+ that:
+ - "vapp_vm.failed == false"
+ - "vapp_vm.changed == true"
+
+- when: vcsim is not defined
+ block:
+ - <<: *vapp_edit_vm
+ name: Try to edit VM with vApp settings
+ register: vapp_vm_no_change_edit
+
+ - debug: var=vapp_vm_no_change_edit
+
+ - name: assert the VM was changed
+ assert:
+ that:
+ - "vapp_vm_no_change_edit.failed == false"
+ - "vapp_vm_no_change_edit.changed == false"
diff --git a/test/integration/targets/incidental_vmware_guest/tasks/windows_vbs_d1_c1_f0.yml b/test/integration/targets/incidental_vmware_guest/tasks/windows_vbs_d1_c1_f0.yml
new file mode 100644
index 0000000000..88a5757bf1
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest/tasks/windows_vbs_d1_c1_f0.yml
@@ -0,0 +1,93 @@
+- name: Create Windows 10 VM with VBS enabled
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "{{ f0 }}"
+ name: vbs-Test
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ resource_pool: Resources
+ guest_id: windows9_64Guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ virt_based_security: True
+ version: 14
+ boot_firmware: efi
+ scsi: paravirtual
+ disk:
+ - size_mb: 128
+ type: thin
+ datastore: '{{ rw_datastore }}'
+ cdrom:
+ type: client
+ register: vbs_vm
+
+- debug: var=vbs_vm
+
+- name: assert the VM was created
+ assert:
+ that:
+ - "vbs_vm.failed == false"
+ - "vbs_vm.changed == true"
+
+- name: Create Windows Server 2016 VM without VBS enabled
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "{{ f0 }}"
+ name: vbs-Test2
+ datacenter: "{{ dc1 }}"
+ cluster: "{{ ccr1 }}"
+ resource_pool: Resources
+ guest_id: windows9Server64Guest
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ version: 14
+ boot_firmware: efi
+ scsi: paravirtual
+ disk:
+ - size_mb: 128
+ type: thin
+ datastore: '{{ rw_datastore }}'
+ cdrom:
+ type: client
+ register: vbs_vm
+
+- debug: var=vbs_vm
+
+- name: assert the VM was created
+ assert:
+ that:
+ - "vbs_vm.failed == false"
+ - "vbs_vm.changed == true"
+
+- name: Enable VBS for Windows Server 2016 VM
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ folder: "{{ f0 }}"
+ name: vbs-Test2
+ datacenter: "{{ f0 }}"
+ disk:
+ - size_mb: 256
+ type: thin
+ datastore: '{{ rw_datastore }}'
+ hardware:
+ virt_based_security: True
+ state: present
+ register: vbs_vm
+
+- debug: var=vbs_vm
+
+- name: assert the VM was changed
+ assert:
+ that:
+ - vbs_vm is changed
diff --git a/test/integration/targets/incidental_vmware_guest_custom_attributes/aliases b/test/integration/targets/incidental_vmware_guest_custom_attributes/aliases
new file mode 100644
index 0000000000..0eb73d761d
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest_custom_attributes/aliases
@@ -0,0 +1,3 @@
+cloud/vcenter
+shippable/vcenter/incidental
+needs/target/incidental_vmware_prepare_tests
diff --git a/test/integration/targets/incidental_vmware_guest_custom_attributes/tasks/main.yml b/test/integration/targets/incidental_vmware_guest_custom_attributes/tasks/main.yml
new file mode 100644
index 0000000000..c9f6bdb41f
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_guest_custom_attributes/tasks/main.yml
@@ -0,0 +1,110 @@
+# Test code for the vmware_guest_custom_attributes module.
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# TODO: Current pinned version of vcsim does not support custom fields
+# commenting testcase below
+- import_role:
+ name: incidental_vmware_prepare_tests
+ vars:
+ setup_attach_host: true
+ setup_datastore: true
+ setup_virtualmachines: true
+- name: Add custom attribute to the given virtual machine
+ vmware_guest_custom_attributes:
+ validate_certs: False
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: "{{ dc1 }}"
+ name: "{{ virtual_machines[0].name }}"
+ folder: "{{ virtual_machines[0].folder }}"
+ state: present
+ attributes:
+ - name: 'sample_1'
+ value: 'sample_1_value'
+ - name: 'sample_2'
+ value: 'sample_2_value'
+ - name: 'sample_3'
+ value: 'sample_3_value'
+ register: guest_info_0001
+
+- debug: var=guest_info_0001
+
+- assert:
+ that:
+ - guest_info_0001 is changed
+
+- name: Add custom attribute to the given virtual machine again
+ vmware_guest_custom_attributes:
+ validate_certs: False
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: "{{ dc1 }}"
+ name: "{{ virtual_machines[0].name }}"
+ folder: "{{ virtual_machines[0].folder }}"
+ state: present
+ attributes:
+ - name: 'sample_1'
+ value: 'sample_1_value'
+ - name: 'sample_2'
+ value: 'sample_2_value'
+ - name: 'sample_3'
+ value: 'sample_3_value'
+ register: guest_info_0002
+
+- debug: var=guest_info_0002
+
+- assert:
+ that:
+ - not (guest_info_0002 is changed)
+
+- name: Remove custom attribute to the given virtual machine
+ vmware_guest_custom_attributes:
+ validate_certs: False
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: "{{ dc1 }}"
+ name: "{{ virtual_machines[0].name }}"
+ folder: "{{ virtual_machines[0].folder }}"
+ state: absent
+ attributes:
+ - name: 'sample_1'
+ - name: 'sample_2'
+ - name: 'sample_3'
+ register: guest_info_0004
+
+- debug: msg="{{ guest_info_0004 }}"
+
+- assert:
+ that:
+ - "guest_info_0004.changed"
+
+# TODO: vcsim returns duplicate values so removing custom attributes
+# results in change. vCenter show correct behavior. Commenting this
+# till this is supported by vcsim.
+- when: vcsim is not defined
+ block:
+ - name: Remove custom attribute to the given virtual machine again
+ vmware_guest_custom_attributes:
+ validate_certs: False
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ datacenter: "{{ dc1 }}"
+ name: "{{ virtual_machines[0].name }}"
+ folder: "{{ virtual_machines[0].folder }}"
+ state: absent
+ attributes:
+ - name: 'sample_1'
+ - name: 'sample_2'
+ - name: 'sample_3'
+ register: guest_info_0005
+
+ - debug: var=guest_info_0005
+
+ - assert:
+ that:
+ - not (guest_info_0005 is changed)
diff --git a/test/integration/targets/incidental_vmware_host_hyperthreading/aliases b/test/integration/targets/incidental_vmware_host_hyperthreading/aliases
new file mode 100644
index 0000000000..0eb73d761d
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_host_hyperthreading/aliases
@@ -0,0 +1,3 @@
+cloud/vcenter
+shippable/vcenter/incidental
+needs/target/incidental_vmware_prepare_tests
diff --git a/test/integration/targets/incidental_vmware_host_hyperthreading/tasks/main.yml b/test/integration/targets/incidental_vmware_host_hyperthreading/tasks/main.yml
new file mode 100644
index 0000000000..4d9e5f7608
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_host_hyperthreading/tasks/main.yml
@@ -0,0 +1,92 @@
+# Test code for the vmware_host_hyperthreading module.
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Hyperthreading optimization is not available for hosts in vcsim
+- import_role:
+ name: incidental_vmware_prepare_tests
+ vars:
+ setup_attach_host: true
+
+
+- name: Enable Hyperthreading everywhere
+ vmware_host_hyperthreading:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ state: disabled
+ register: enable_hyperthreading_everywhere
+ ignore_errors: true
+
+- when: enable_hyperthreading_everywhere is succeeded
+ block:
+
+ - name: Disable Hyperthreading for a given host
+ vmware_host_hyperthreading:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: '{{ esxi1 }}'
+ validate_certs: no
+ state: disabled
+ register: host_hyperthreading_info
+
+ - debug: var=host_hyperthreading_info
+
+ - assert:
+ that:
+ - host_hyperthreading_info is defined
+ - host_hyperthreading_info.changed
+
+ - name: Disable Hyperthreading for a given host in check mode
+ vmware_host_hyperthreading:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ esxi_hostname: '{{ esxi1 }}'
+ validate_certs: no
+ state: disabled
+ register: host_hyperthreading_info_check_mode
+ check_mode: yes
+
+ - debug: var=host_hyperthreading_info_check_mode
+
+ - assert:
+ that:
+ - host_hyperthreading_info_check_mode is defined
+
+ - name: Disable Hyperthreading for all hosts in given cluster
+ vmware_host_hyperthreading:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ cluster_name: "{{ ccr1 }}"
+ validate_certs: no
+ state: disabled
+ register: host_hyperthreading_info
+
+ - debug: var=host_hyperthreading_info
+
+ - assert:
+ that:
+ - host_hyperthreading_info is defined
+ - host_hyperthreading_info is changed
+
+ - name: Enable Hyperthreading for all hosts in given cluster in check mode
+ vmware_host_hyperthreading:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ cluster_name: "{{ ccr1 }}"
+ validate_certs: no
+ state: enabled
+ register: host_hyperthreading_info_check_mode
+ check_mode: yes
+
+ - debug: var=host_hyperthreading_info_check_mode
+
+ - assert:
+ that:
+ - host_hyperthreading_info_check_mode is defined
+ - host_hyperthreading_info_check_mode.changed
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/aliases b/test/integration/targets/incidental_vmware_prepare_tests/aliases
new file mode 100644
index 0000000000..136c05e0d0
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/meta/main.yml b/test/integration/targets/incidental_vmware_prepare_tests/meta/main.yml
new file mode 100644
index 0000000000..61d3ffe4f9
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/meta/main.yml
@@ -0,0 +1,2 @@
+---
+allow_duplicates: true
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/init_real_lab.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/init_real_lab.yml
new file mode 100644
index 0000000000..9450084fd1
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/init_real_lab.yml
@@ -0,0 +1,29 @@
+---
+- include_tasks: teardown_with_esxi.yml
+ when: esxi_hosts|length > 0
+- include_tasks: teardown.yml
+
+- when: setup_esxi_instance is not defined
+ block:
+ - include_tasks: setup_datacenter.yml
+ - include_tasks: setup_cluster.yml
+ - include_tasks: setup_attach_hosts.yml
+ when: setup_attach_host is defined
+ - include_tasks: setup_datastore.yml
+ when: setup_datastore is defined
+ - include_tasks: setup_virtualmachines.yml
+ when: setup_virtualmachines is defined
+ - include_tasks: setup_switch.yml
+ when: setup_switch is defined
+ - include_tasks: setup_dvswitch.yml
+ when: setup_dvswitch is defined
+ - include_tasks: setup_resource_pool.yml
+ when: setup_resource_pool is defined
+ - include_tasks: setup_category.yml
+ when: setup_category is defined
+ - include_tasks: setup_tag.yml
+ when: setup_tag is defined
+ - include_tasks: setup_content_library.yml
+ when: setup_content_library is defined
+ - include_tasks: setup_dvs_portgroup.yml
+ when: setup_dvs_portgroup is defined
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/init_vcsim.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/init_vcsim.yml
new file mode 100644
index 0000000000..bfb9ef6db7
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/init_vcsim.yml
@@ -0,0 +1,44 @@
+---
+- name: load vars
+ include_vars:
+ file: vcsim.yml
+
+- name: kill vcsim
+ uri:
+ url: http://{{ vcsim }}:5000/killall
+
+- name: start vcsim (all dressed)
+ uri:
+ url: http://{{ vcsim }}:5000/spawn?cluster=1&folder=1&ds=2
+ register: vcsim_instance
+ when: setup_esxi_instance is not defined
+
+- name: start vcsim (ESXi only)
+ uri:
+ url: http://{{ vcsim }}:5000/spawn?esx=1
+ register: vcsim_instance
+ when: setup_esxi_instance is defined
+
+# - name: get a list of Hosts from vcsim
+# uri:
+# url: http://{{ vcsim }}:5000/govc_find?filter=H
+# register: vcsim_host_list
+
+# - name: get a list of Hosts from vcsim
+# uri:
+# url: http://{{ vcsim }}:5000/govc_find?filter=F
+# register: vcsim_host_folder
+# - debug: var=vcsim_host_folder
+
+- set_fact:
+ vcenter_hostname: "{{ vcsim }}"
+ vcenter_username: "user"
+ vcenter_password: "pass"
+
+
+- name: set state to poweroff on all VMs
+ vmware_guest:
+ name: "{{ item.name }}"
+ state: poweredoff
+ with_items: "{{ virtual_machines + virtual_machines_in_cluster }}"
+ register: poweroff_d1_c1_f0
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/main.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/main.yml
new file mode 100644
index 0000000000..91a6b9ca09
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/main.yml
@@ -0,0 +1,25 @@
+---
+
+- name: load vmware common vars
+ include_vars:
+ file: common.yml
+
+- when: vcsim is not defined
+ block:
+ - when: esxi1_hostname is not defined and esxi2_hostname is not defined
+ include_vars:
+ file: vcenter_only.yml
+
+ - when: esxi1_hostname is defined and esxi2_hostname is not defined
+ include_vars:
+ file: vcenter_1esxi.yml
+
+ - when: esxi1_hostname is defined and esxi2_hostname is defined
+ include_vars:
+ file: vcenter_2esxi.yml
+
+ - when: vcsim is not defined
+ include_tasks: init_real_lab.yml
+
+- when: vcsim is defined
+ include_tasks: init_vcsim.yml
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_attach_hosts.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_attach_hosts.yml
new file mode 100644
index 0000000000..6ca19b95b5
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_attach_hosts.yml
@@ -0,0 +1,30 @@
+- fail:
+ msg: "No ESXi hosts defined. esxi_hosts is empty."
+ when: "esxi_hosts|length == 0"
+
+- name: Add ESXi Hosts to vCenter
+ vmware_host:
+ datacenter_name: '{{ dc1 }}'
+ cluster_name: '{{ ccr1 }}'
+ esxi_hostname: '{{ item }}'
+ esxi_username: '{{ esxi_user }}'
+ esxi_password: '{{ esxi_password }}'
+ state: add_or_reconnect
+ with_items: "{{ esxi_hosts }}"
+
+- name: Disable the Maintenance Mode
+ vmware_maintenancemode:
+ esxi_hostname: '{{ item }}'
+ state: absent
+ with_items: "{{ esxi_hosts }}"
+
+- name: Add Management Network VM Portgroup
+ vmware_portgroup:
+ hostname: '{{ item }}'
+ username: '{{ esxi_user }}'
+ password: '{{ esxi_password }}'
+ esxi_hostname: 'item'
+ switch: "vSwitch0"
+ portgroup: VM Network
+ validate_certs: no
+ with_items: "{{ esxi_hosts }}"
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_category.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_category.yml
new file mode 100644
index 0000000000..bfd680172c
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_category.yml
@@ -0,0 +1,5 @@
+- name: Create a category for cluster
+ vmware_category:
+ category_name: '{{ cluster_category }}'
+ category_description: '{{ cluster_category }} description'
+ state: present
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_cluster.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_cluster.yml
new file mode 100644
index 0000000000..81653a5377
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_cluster.yml
@@ -0,0 +1,10 @@
+- name: Create Cluster
+ vmware_cluster:
+ datacenter_name: '{{ dc1 }}'
+ cluster_name: '{{ ccr1 }}'
+
+- name: Enable DRS on Cluster
+ vmware_cluster_drs:
+ datacenter_name: '{{ dc1 }}'
+ cluster_name: '{{ ccr1 }}'
+ enable_drs: yes
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_content_library.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_content_library.yml
new file mode 100644
index 0000000000..26affd53fa
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_content_library.yml
@@ -0,0 +1,7 @@
+- name: Create Content Library
+ vmware_content_library_manager:
+ library_name: test-content-lib
+ library_description: 'Library created by the prepare_vmware_tests role'
+ library_type: local
+ datastore_name: '{{ rw_datastore }}'
+ state: present
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_datacenter.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_datacenter.yml
new file mode 100644
index 0000000000..a5f3eafe51
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_datacenter.yml
@@ -0,0 +1,11 @@
+- name: Create Datacenter
+ vmware_datacenter:
+ datacenter_name: '{{ dc1 }}'
+ state: present
+
+- name: Create a VM folder on given Datacenter
+ vcenter_folder:
+ datacenter: '{{ dc1 }}'
+ folder_name: '{{ f0 }}'
+ folder_type: vm
+ state: present
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_datastore.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_datastore.yml
new file mode 100644
index 0000000000..5a62816016
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_datastore.yml
@@ -0,0 +1,42 @@
+---
+- name: Mount NFS (ro_datastore) datastores to ESXi
+ vmware_host_datastore:
+ hostname: '{{ item }}'
+ username: '{{ esxi_user }}'
+ password: '{{ esxi_password }}'
+ datastore_name: '{{ ro_datastore }}'
+ datastore_type: '{{ infra.datastores[ro_datastore].type }}'
+ nfs_server: '{{ infra.datastores[ro_datastore].server }}'
+ nfs_path: '{{ infra.datastores[ro_datastore].path }}'
+ nfs_ro: '{{ infra.datastores[ro_datastore].ro }}'
+ state: present
+ validate_certs: no
+ with_items: "{{ esxi_hosts }}"
+
+- name: Mount NFS (rw_datastore) datastores on the ESXi
+ vmware_host_datastore:
+ hostname: '{{ item }}'
+ username: '{{ esxi_user }}'
+ password: '{{ esxi_password }}'
+ datastore_name: '{{ rw_datastore }}'
+ datastore_type: '{{ infra.datastores[rw_datastore].type }}'
+ nfs_server: '{{ infra.datastores[rw_datastore].server }}'
+ nfs_path: '{{ infra.datastores[rw_datastore].path }}'
+ nfs_ro: '{{ infra.datastores[rw_datastore].ro }}'
+ state: present
+ validate_certs: no
+ with_items: "{{ esxi_hosts }}"
+
+- vmware_host_scanhba:
+ refresh_storage: true
+ cluster_name: '{{ ccr1 }}'
+
+- name: The vcenter needs a bit of time to refresh the DS list
+ vmware_datastore_info:
+ validate_certs: false
+ cluster: '{{ ccr1 }}'
+ register: setup_datastore_datatstore_info
+ failed_when: setup_datastore_datatstore_info.datastores|selectattr('type', 'equalto', 'NFS')|list|length != 2
+ until: setup_datastore_datatstore_info is succeeded
+ retries: 60
+ delay: 1
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_dvs_portgroup.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_dvs_portgroup.yml
new file mode 100644
index 0000000000..c0e14fe539
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_dvs_portgroup.yml
@@ -0,0 +1,18 @@
+---
+- name: create basic DVS portgroup
+ vmware_dvs_portgroup:
+ switch_name: "{{ dvswitch1 }}"
+ portgroup_name: '{{ dvpg1 }}'
+ vlan_id: 0
+ num_ports: 32
+ portgroup_type: earlyBinding
+ state: present
+
+- name: Create the DVS PG with slash in name
+ vmware_dvs_portgroup:
+ portgroup_name: '{{ dvpg_with_slash }}'
+ switch_name: '{{ dvswitch1 }}'
+ vlan_id: 0
+ num_ports: 120
+ portgroup_type: earlyBinding
+ state: present
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_dvswitch.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_dvswitch.yml
new file mode 100644
index 0000000000..ba36a46196
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_dvswitch.yml
@@ -0,0 +1,20 @@
+---
+- name: Create the DVSwitch
+ vmware_dvswitch:
+ datacenter_name: '{{ dc1 }}'
+ switch_name: '{{ dvswitch1 }}'
+ switch_version: 6.0.0
+ mtu: 9000
+ uplink_quantity: 2
+ discovery_proto: lldp
+ discovery_operation: both
+ state: present
+- name: Attach the hosts to the DVSwitch
+ vmware_dvs_host:
+ esxi_hostname: "{{ item }}"
+ switch_name: '{{ dvswitch1 }}'
+ vmnics:
+ - vmnic1
+ state: present
+ with_items: "{{ esxi_hosts }}"
+ when: setup_attach_host is defined
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_resource_pool.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_resource_pool.yml
new file mode 100644
index 0000000000..060785d17a
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_resource_pool.yml
@@ -0,0 +1,15 @@
+---
+- name: Add resource pool to vCenter
+ vmware_resource_pool:
+ datacenter: '{{ dc1 }}'
+ cluster: '{{ ccr1 }}'
+ resource_pool: DC0_C0_RP1
+ mem_shares: normal
+ mem_limit: -1
+ mem_reservation: 0
+ mem_expandable_reservations: yes
+ cpu_shares: normal
+ cpu_limit: -1
+ cpu_reservation: 0
+ cpu_expandable_reservations: yes
+ state: present
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_switch.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_switch.yml
new file mode 100644
index 0000000000..c63a28c52b
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_switch.yml
@@ -0,0 +1,7 @@
+---
+- name: Add a VMware vSwitchs
+ vmware_vswitch:
+ esxi_hostname: '{{ item }}'
+ switch_name: "{{ switch1 }}"
+ state: present
+ with_items: "{{ esxi_hosts }}"
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_tag.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_tag.yml
new file mode 100644
index 0000000000..31b495c0f4
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_tag.yml
@@ -0,0 +1,15 @@
+- name: Get Category facts
+ vmware_category_info:
+ register: cat_info
+
+- name: Get Category id for {{ cluster_category }}
+ set_fact:
+ cluster_category_id: "{{ cat_info.tag_category_info[0].category_id }}"
+
+- name: Create a tag for cluster
+ vmware_tag:
+ category_id: '{{ cluster_category_id }}'
+ tag_name: '{{ cluster_tag }}'
+ tag_description: '{{ cluster_tag }} Description'
+ state: present
+ when: cluster_category_id is defined
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_virtualmachines.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_virtualmachines.yml
new file mode 100644
index 0000000000..8368f4b813
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/setup_virtualmachines.yml
@@ -0,0 +1,46 @@
+---
+- name: Create VMs
+ vmware_guest:
+ datacenter: "{{ dc1 }}"
+ folder: '{{ item.folder }}'
+ name: '{{ item.name }}'
+ state: poweredoff
+ guest_id: debian8_64Guest
+ disk:
+ - size_gb: 1
+ type: thin
+ datastore: '{{ rw_datastore }}'
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ scsi: paravirtual
+ cdrom:
+ type: iso
+ iso_path: "[{{ ro_datastore }}] fedora.iso"
+ networks:
+ - name: VM Network
+ with_items: '{{ virtual_machines }}'
+
+
+- name: Create VMs in cluster
+ vmware_guest:
+ datacenter: "{{ dc1 }}"
+ folder: '{{ item.folder }}'
+ cluster: '{{ item.cluster }}'
+ name: '{{ item.name }}'
+ state: poweredoff
+ guest_id: debian8_64Guest
+ disk:
+ - size_gb: 1
+ type: thin
+ datastore: '{{ rw_datastore }}'
+ hardware:
+ memory_mb: 128
+ num_cpus: 1
+ scsi: paravirtual
+ cdrom:
+ type: iso
+ iso_path: "[{{ ro_datastore }}] fedora.iso"
+ networks:
+ - name: VM Network
+ with_items: '{{ virtual_machines_in_cluster }}'
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/teardown.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/teardown.yml
new file mode 100644
index 0000000000..aba390592e
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/teardown.yml
@@ -0,0 +1,24 @@
+---
+- name: Delete a datastore cluster to datacenter
+ vmware_datastore_cluster:
+ datacenter_name: "{{ dc1 }}"
+ datastore_cluster_name: '{{ item }}'
+ state: absent
+ with_items:
+ - DSC1
+ - DSC2
+ ignore_errors: yes
+
+- name: Remove the datacenter
+ vmware_datacenter:
+ datacenter_name: '{{ item }}'
+ state: absent
+ when: vcsim is not defined
+ with_items:
+ - '{{ dc1 }}'
+ - datacenter_0001
+
+- name: kill vcsim
+ uri:
+ url: "http://{{ vcsim }}:5000/killall"
+ when: vcsim is defined
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/tasks/teardown_with_esxi.yml b/test/integration/targets/incidental_vmware_prepare_tests/tasks/teardown_with_esxi.yml
new file mode 100644
index 0000000000..2707847ef2
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/tasks/teardown_with_esxi.yml
@@ -0,0 +1,96 @@
+---
+- name: Clean up the firewall rules
+ vmware_host_firewall_manager:
+ cluster_name: '{{ ccr1 }}'
+ rules:
+ - name: vvold
+ enabled: False
+ - name: CIMHttpServer
+ enabled: True
+ allowed_hosts:
+ all_ip: True
+ - name: NFC
+ enabled: True
+ allowed_hosts:
+ all_ip: True
+ ignore_errors: yes
+
+- name: Remove the VM prepared by prepare_vmware_tests
+ vmware_guest:
+ name: "{{ item.name }}"
+ force: yes
+ state: absent
+ with_items: '{{ virtual_machines + virtual_machines_in_cluster }}'
+
+- name: Remove the test_vm* VMs
+ vmware_guest:
+ name: "{{ item }}"
+ force: yes
+ state: absent
+ with_items:
+ - test_vm1
+ - test_vm2
+ - test_vm3
+
+- name: Remove the DVS portgroups
+ vmware_dvs_portgroup:
+ switch_name: "{{ dvswitch1 }}"
+ portgroup_name: '{{ item }}'
+ vlan_id: 0
+ num_ports: 32
+ portgroup_type: earlyBinding
+ state: absent
+ loop:
+ - DC0_DVPG0
+ - DVPG/1
+ ignore_errors: yes
+
+- name: Remove the DVSwitch
+ vmware_dvswitch:
+ datacenter_name: '{{ dc1 }}'
+ state: absent
+ switch_name: '{{ item }}'
+ loop:
+ - '{{ dvswitch1 }}'
+ - dvswitch_0001
+ - dvswitch_0002
+ ignore_errors: yes
+
+- name: Remove the vSwitches
+ vmware_vswitch:
+ hostname: '{{ item }}'
+ username: '{{ esxi_user }}'
+ password: '{{ esxi_password }}'
+ switch_name: "{{ switch1 }}"
+ state: absent
+ with_items: "{{ esxi_hosts }}"
+ ignore_errors: yes
+
+- name: Remove ESXi Hosts to vCenter
+ vmware_host:
+ datacenter_name: '{{ dc1 }}'
+ cluster_name: ccr1
+ esxi_hostname: '{{ item }}'
+ esxi_username: '{{ esxi_user }}'
+ esxi_password: '{{ esxi_password }}'
+ state: absent
+ with_items: "{{ esxi_hosts }}"
+ ignore_errors: yes
+
+- name: Umount NFS datastores to ESXi (1/2)
+ vmware_host_datastore:
+ hostname: '{{ item }}'
+ username: '{{ esxi_user }}'
+ password: '{{ esxi_password }}'
+ datastore_name: '{{ ro_datastore }}'
+ state: absent
+ with_items: "{{ esxi_hosts }}"
+
+- name: Umount NFS datastores to ESXi (2/2)
+ vmware_host_datastore:
+ hostname: '{{ item }}'
+ username: '{{ esxi_user }}'
+ password: '{{ esxi_password }}'
+ datastore_name: '{{ rw_datastore }}'
+ state: absent
+ with_items: "{{ esxi_hosts }}"
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/vars/common.yml b/test/integration/targets/incidental_vmware_prepare_tests/vars/common.yml
new file mode 100644
index 0000000000..bfe5a30fb5
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/vars/common.yml
@@ -0,0 +1,12 @@
+---
+dc1: DC0
+ccr1: DC0_C0
+f0: F0
+switch1: switch1
+esxi1: '{{ esxi_hosts[0] }}'
+esxi2: '{{ esxi_hosts[1] }}'
+esxi3: '{{ esxi_hosts[2] }}'
+dvswitch1: DVS0
+esxi_user: root
+dvpg1: DC0_DVPG0
+dvpg_with_slash: DVPG/1
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/vars/vcenter_1esxi.yml b/test/integration/targets/incidental_vmware_prepare_tests/vars/vcenter_1esxi.yml
new file mode 100644
index 0000000000..45e94331f9
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/vars/vcenter_1esxi.yml
@@ -0,0 +1,33 @@
+---
+esxi_hosts:
+ - esxi1.test
+rw_datastore: rw_datastore
+ro_datastore: ro_datastore
+esxi_password: '{{ esxi1_password }}'
+esxi_user: '{{ esxi1_username }}'
+infra:
+ datastores:
+ rw_datastore:
+ type: nfs
+ server: datastore.test
+ path: /srv/share/vms
+ ro: false
+ ro_datastore:
+ type: nfs
+ server: datastore.test
+ path: /srv/share/isos
+ ro: true
+virtual_machines:
+ - name: DC0_H0_VM0
+ folder: '{{ f0 }}'
+ - name: DC0_H0_VM1
+ folder: '{{ f0 }}'
+virtual_machines_in_cluster:
+ - name: DC0_C0_RP0_VM0
+ folder: '{{ f0 }}'
+ cluster: '{{ ccr1 }}'
+ - name: DC0_C0_RP0_VM1
+ folder: '{{ f0 }}'
+ cluster: '{{ ccr1 }}'
+cluster_tag: test_cluster_tag_0001
+cluster_category: test_cluster_cat_0001
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/vars/vcenter_2esxi.yml b/test/integration/targets/incidental_vmware_prepare_tests/vars/vcenter_2esxi.yml
new file mode 100644
index 0000000000..6b0203b4ac
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/vars/vcenter_2esxi.yml
@@ -0,0 +1,34 @@
+---
+esxi_hosts:
+ - esxi1.test
+ - esxi2.test
+rw_datastore: rw_datastore
+ro_datastore: ro_datastore
+esxi_password: '{{ esxi1_password }}'
+esxi_user: '{{ esxi1_username }}'
+infra:
+ datastores:
+ rw_datastore:
+ type: nfs
+ server: datastore.test
+ path: /srv/share/vms
+ ro: false
+ ro_datastore:
+ type: nfs
+ server: datastore.test
+ path: /srv/share/isos
+ ro: true
+virtual_machines:
+ - name: DC0_H0_VM0
+ folder: '{{ f0 }}'
+ - name: DC0_H0_VM1
+ folder: '{{ f0 }}'
+virtual_machines_in_cluster:
+ - name: DC0_C0_RP0_VM0
+ folder: '{{ f0 }}'
+ cluster: '{{ ccr1 }}'
+ - name: DC0_C0_RP0_VM1
+ folder: '{{ f0 }}'
+ cluster: '{{ ccr1 }}'
+cluster_tag: test_cluster_tag_0001
+cluster_category: test_cluster_cat_0001
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/vars/vcenter_only.yml b/test/integration/targets/incidental_vmware_prepare_tests/vars/vcenter_only.yml
new file mode 100644
index 0000000000..58560b5619
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/vars/vcenter_only.yml
@@ -0,0 +1,6 @@
+---
+esxi_hosts: []
+infra:
+virtual_machines: []
+virtual_machines_in_cluster: []
+#esxi_password: '' \ No newline at end of file
diff --git a/test/integration/targets/incidental_vmware_prepare_tests/vars/vcsim.yml b/test/integration/targets/incidental_vmware_prepare_tests/vars/vcsim.yml
new file mode 100644
index 0000000000..ee783c1038
--- /dev/null
+++ b/test/integration/targets/incidental_vmware_prepare_tests/vars/vcsim.yml
@@ -0,0 +1,19 @@
+---
+esxi_hosts:
+ - DC0_C0_H0
+ - DC0_C0_H1
+ - DC0_C0_H2
+esxi_password: 'pass'
+esxi_user: 'user'
+rw_datastore: LocalDS_0
+ro_datastore: LocalDS_1
+virtual_machines:
+ - name: DC0_H0_VM0
+ folder: /F0/DC0/vm/F0
+ - name: DC0_H0_VM1
+ folder: /F0/DC0/vm/F0
+virtual_machines_in_cluster:
+ - name: DC0_C0_RP0_VM0
+ cluster: '{{ ccr1 }}'
+ - name: DC0_C0_RP0_VM1
+ cluster: '{{ ccr1 }}'
diff --git a/test/lib/ansible_test/_internal/sanity/integration_aliases.py b/test/lib/ansible_test/_internal/sanity/integration_aliases.py
index 9f30f612a6..ee9adb11f8 100644
--- a/test/lib/ansible_test/_internal/sanity/integration_aliases.py
+++ b/test/lib/ansible_test/_internal/sanity/integration_aliases.py
@@ -227,6 +227,7 @@ class IntegrationAliasesTest(SanityVersionNeutral):
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['cloud/%s/' % cloud], include=True, directories=False, errors=False)),
find=self.format_shippable_group_alias(cloud, 'cloud'),
+ find_incidental=['shippable/%s/incidental/' % cloud, 'shippable/cloud/incidental/'],
)
return messages
@@ -246,17 +247,23 @@ class IntegrationAliasesTest(SanityVersionNeutral):
return messages
- def check_ci_group(self, targets, find):
+ def check_ci_group(self, targets, find, find_incidental=None):
"""
:type targets: tuple[CompletionTarget]
:type find: str
+ :type find_incidental: list[str] | None
:rtype: list[SanityMessage]
"""
all_paths = set(target.path for target in targets)
supported_paths = set(target.path for target in filter_targets(targets, [find], include=True, directories=False, errors=False))
unsupported_paths = set(target.path for target in filter_targets(targets, [self.UNSUPPORTED], include=True, directories=False, errors=False))
- unassigned_paths = all_paths - supported_paths - unsupported_paths
+ if find_incidental:
+ incidental_paths = set(target.path for target in filter_targets(targets, find_incidental, include=True, directories=False, errors=False))
+ else:
+ incidental_paths = set()
+
+ unassigned_paths = all_paths - supported_paths - unsupported_paths - incidental_paths
conflicting_paths = supported_paths & unsupported_paths
unassigned_message = 'missing alias `%s` or `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
diff --git a/test/sanity/ignore.txt b/test/sanity/ignore.txt
index 8b3d25440a..67e273a1fe 100644
--- a/test/sanity/ignore.txt
+++ b/test/sanity/ignore.txt
@@ -8120,6 +8120,8 @@ test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py future-i
test/integration/targets/ignore_unreachable/fake_connectors/bad_exec.py metaclass-boilerplate
test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py future-import-boilerplate
test/integration/targets/ignore_unreachable/fake_connectors/bad_put_file.py metaclass-boilerplate
+test/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.py future-import-boilerplate
+test/integration/targets/incidental_script_inventory_vmware_inventory/vmware_inventory.py metaclass-boilerplate
test/integration/targets/inventory_kubevirt/inventory_diff.py future-import-boilerplate
test/integration/targets/inventory_kubevirt/inventory_diff.py metaclass-boilerplate
test/integration/targets/inventory_kubevirt/server.py future-import-boilerplate
@@ -8241,6 +8243,25 @@ test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt test-constr
test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py use-compat-six
test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1 pslint:PSCustomUseLiteralPath
test/lib/ansible_test/_data/setup/windows-httptester.ps1 pslint:PSCustomUseLiteralPath
+test/support/integration/plugins/module_utils/ansible_tower.py future-import-boilerplate
+test/support/integration/plugins/module_utils/ansible_tower.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/azure_rm_common.py future-import-boilerplate
+test/support/integration/plugins/module_utils/azure_rm_common.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/azure_rm_common_rest.py future-import-boilerplate
+test/support/integration/plugins/module_utils/azure_rm_common_rest.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/cloud.py future-import-boilerplate
+test/support/integration/plugins/module_utils/cloud.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/common/network.py future-import-boilerplate
+test/support/integration/plugins/module_utils/common/network.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/compat/ipaddress.py future-import-boilerplate
+test/support/integration/plugins/module_utils/compat/ipaddress.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/compat/ipaddress.py no-unicode-literals
+test/support/integration/plugins/module_utils/k8s/common.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/k8s/raw.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/net_tools/nios/api.py future-import-boilerplate
+test/support/integration/plugins/module_utils/net_tools/nios/api.py metaclass-boilerplate
+test/support/integration/plugins/module_utils/network/common/utils.py future-import-boilerplate
+test/support/integration/plugins/module_utils/network/common/utils.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py future-import-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/netconf.py metaclass-boilerplate
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/doc_fragments/network_agnostic.py future-import-boilerplate
diff --git a/test/support/integration/plugins/cache/jsonfile.py b/test/support/integration/plugins/cache/jsonfile.py
new file mode 100644
index 0000000000..80b16f55b5
--- /dev/null
+++ b/test/support/integration/plugins/cache/jsonfile.py
@@ -0,0 +1,63 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ cache: jsonfile
+ short_description: JSON formatted files.
+ description:
+ - This cache uses JSON formatted, per host, files saved to the filesystem.
+ version_added: "1.9"
+ author: Ansible Core (@ansible-core)
+ options:
+ _uri:
+ required: True
+ description:
+ - Path in which the cache plugin will save the JSON files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the JSON files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+import codecs
+import json
+
+from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.plugins.cache import BaseFileCacheModule
+
+
+class CacheModule(BaseFileCacheModule):
+ """
+ A caching module backed by json files.
+ """
+
+ def _load(self, filepath):
+ # Valid JSON is always UTF-8 encoded.
+ with codecs.open(filepath, 'r', encoding='utf-8') as f:
+ return json.load(f, cls=AnsibleJSONDecoder)
+
+ def _dump(self, value, filepath):
+ with codecs.open(filepath, 'w', encoding='utf-8') as f:
+ f.write(json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4))
diff --git a/test/support/integration/plugins/filter/json_query.py b/test/support/integration/plugins/filter/json_query.py
new file mode 100644
index 0000000000..d1da71b476
--- /dev/null
+++ b/test/support/integration/plugins/filter/json_query.py
@@ -0,0 +1,53 @@
+# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleFilterError
+
+try:
+ import jmespath
+ HAS_LIB = True
+except ImportError:
+ HAS_LIB = False
+
+
+def json_query(data, expr):
+ '''Query data using jmespath query language ( http://jmespath.org ). Example:
+ - debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
+ '''
+ if not HAS_LIB:
+ raise AnsibleError('You need to install "jmespath" prior to running '
+ 'json_query filter')
+
+ try:
+ return jmespath.search(expr, data)
+ except jmespath.exceptions.JMESPathError as e:
+ raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e)
+ except Exception as e:
+ # For older jmespath, we can get ValueError and TypeError without much info.
+ raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e)
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+ return {
+ 'json_query': json_query
+ }
diff --git a/test/support/integration/plugins/inventory/aws_ec2.py b/test/support/integration/plugins/inventory/aws_ec2.py
new file mode 100644
index 0000000000..09c42cf99b
--- /dev/null
+++ b/test/support/integration/plugins/inventory/aws_ec2.py
@@ -0,0 +1,760 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: aws_ec2
+ plugin_type: inventory
+ short_description: EC2 inventory source
+ requirements:
+ - boto3
+ - botocore
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ description:
+ - Get inventory hosts from Amazon Web Services EC2.
+ - Uses a YAML configuration file that ends with C(aws_ec2.(yml|yaml)).
+ notes:
+ - If no credentials are provided and the control node has an associated IAM instance profile then the
+ role will be used for authentication.
+ author:
+ - Sloane Hertel (@s-hertel)
+ options:
+ aws_profile:
+ description: The AWS profile
+ type: str
+ aliases: [ boto_profile ]
+ env:
+ - name: AWS_DEFAULT_PROFILE
+ - name: AWS_PROFILE
+ aws_access_key:
+ description: The AWS access key to use.
+ type: str
+ aliases: [ aws_access_key_id ]
+ env:
+ - name: EC2_ACCESS_KEY
+ - name: AWS_ACCESS_KEY
+ - name: AWS_ACCESS_KEY_ID
+ aws_secret_key:
+ description: The AWS secret key that corresponds to the access key.
+ type: str
+ aliases: [ aws_secret_access_key ]
+ env:
+ - name: EC2_SECRET_KEY
+ - name: AWS_SECRET_KEY
+ - name: AWS_SECRET_ACCESS_KEY
+ aws_security_token:
+ description: The AWS security token if using temporary access and secret keys.
+ type: str
+ env:
+ - name: EC2_SECURITY_TOKEN
+ - name: AWS_SESSION_TOKEN
+ - name: AWS_SECURITY_TOKEN
+ plugin:
+ description: Token that ensures this is a source file for the plugin.
+ required: True
+ choices: ['aws_ec2']
+ iam_role_arn:
+ description: The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS
+ credentials with enough privilege to perform the AssumeRole action.
+ version_added: '2.9'
+ regions:
+ description:
+ - A list of regions in which to describe EC2 instances.
+ - If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1.
+ type: list
+ default: []
+ hostnames:
+ description:
+ - A list in order of precedence for hostname variables.
+ - You can use the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
+ - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag.
+ type: list
+ default: []
+ filters:
+ description:
+ - A dictionary of filter value pairs.
+ - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
+ type: dict
+ default: {}
+ include_extra_api_calls:
+ description:
+ - Add two additional API calls for every instance to include 'persistent' and 'events' host variables.
+ - Spot instances may be persistent and instances may have associated events.
+ type: bool
+ default: False
+ version_added: '2.8'
+ strict_permissions:
+ description:
+ - By default if a 403 (Forbidden) error code is encountered this plugin will fail.
+ - You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped.
+ type: bool
+ default: True
+ use_contrib_script_compatible_sanitization:
+ description:
+ - By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
+ This option allows you to override that, in efforts to allow migration from the old inventory script and
+ matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``.
+ To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups,
+ you will need to replace hyphens with underscores via the regex_replace filter for those entries.
+ - For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
+ otherwise the core engine will just use the standard sanitization on top.
+ - This is not the default as such names break certain functionality as not all characters are valid Python identifiers
+ which group names end up being used as.
+ type: bool
+ default: False
+ version_added: '2.8'
+'''
+
+EXAMPLES = '''
+# Minimal example using environment vars or instance role credentials
+# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address
+plugin: aws_ec2
+regions:
+ - us-east-1
+
+# Example using filters, ignoring permission errors, and specifying the hostname precedence
+plugin: aws_ec2
+boto_profile: aws_profile
+# Populate inventory with instances in these regions
+regions:
+ - us-east-1
+ - us-east-2
+filters:
+ # All instances with their `Environment` tag set to `dev`
+ tag:Environment: dev
+ # All dev and QA hosts
+ tag:Environment:
+ - dev
+ - qa
+ instance.group-id: sg-xxxxxxxx
+# Ignores 403 errors rather than failing
+strict_permissions: False
+# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying
+# inventory_hostname use compose (see example below).
+hostnames:
+ - tag:Name=Tag1,Name=Tag2 # Return specific hosts only
+ - tag:CustomDNSName
+ - dns-name
+ - private-ip-address
+
+# Example using constructed features to create groups and set ansible_host
+plugin: aws_ec2
+regions:
+ - us-east-1
+ - us-west-1
+# keyed_groups may be used to create custom groups
+strict: False
+keyed_groups:
+ # Add e.g. x86_64 hosts to an arch_x86_64 group
+ - prefix: arch
+ key: 'architecture'
+ # Add hosts to tag_Name_Value groups for each Name/Value tag pair
+ - prefix: tag
+ key: tags
+ # Add hosts to e.g. instance_type_z3_tiny
+ - prefix: instance_type
+ key: instance_type
+ # Create security_groups_sg_abcd1234 group for each SG
+ - key: 'security_groups|json_query("[].group_id")'
+ prefix: 'security_groups'
+ # Create a group for each value of the Application tag
+ - key: tags.Application
+ separator: ''
+ # Create a group per region e.g. aws_region_us_east_2
+ - key: placement.region
+ prefix: aws_region
+ # Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project"
+ - key: tags['Role']
+ prefix: foo
+ parent_group: "project"
+# Set individual variables with compose
+compose:
+ # Use the private IP address to connect to the host
+ # (note: this does not modify inventory_hostname, which is set via I(hostnames))
+ ansible_host: private_ip_address
+'''
+
+import re
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.utils.display import Display
+from ansible.module_utils.six import string_types
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ raise AnsibleError('The ec2 dynamic inventory plugin requires boto3 and botocore.')
+
+display = Display()
+
+# The mappings give an array of keys to get from the filter name to the value
+# returned by boto3's EC2 describe_instances method.
+
+instance_meta_filter_to_boto_attr = {
+ 'group-id': ('Groups', 'GroupId'),
+ 'group-name': ('Groups', 'GroupName'),
+ 'network-interface.attachment.instance-owner-id': ('OwnerId',),
+ 'owner-id': ('OwnerId',),
+ 'requester-id': ('RequesterId',),
+ 'reservation-id': ('ReservationId',),
+}
+
+instance_data_filter_to_boto_attr = {
+ 'affinity': ('Placement', 'Affinity'),
+ 'architecture': ('Architecture',),
+ 'availability-zone': ('Placement', 'AvailabilityZone'),
+ 'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'),
+ 'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'),
+ 'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'),
+ 'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'),
+ 'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'),
+ 'client-token': ('ClientToken',),
+ 'dns-name': ('PublicDnsName',),
+ 'host-id': ('Placement', 'HostId'),
+ 'hypervisor': ('Hypervisor',),
+ 'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'),
+ 'image-id': ('ImageId',),
+ 'instance-id': ('InstanceId',),
+ 'instance-lifecycle': ('InstanceLifecycle',),
+ 'instance-state-code': ('State', 'Code'),
+ 'instance-state-name': ('State', 'Name'),
+ 'instance-type': ('InstanceType',),
+ 'instance.group-id': ('SecurityGroups', 'GroupId'),
+ 'instance.group-name': ('SecurityGroups', 'GroupName'),
+ 'ip-address': ('PublicIpAddress',),
+ 'kernel-id': ('KernelId',),
+ 'key-name': ('KeyName',),
+ 'launch-index': ('AmiLaunchIndex',),
+ 'launch-time': ('LaunchTime',),
+ 'monitoring-state': ('Monitoring', 'State'),
+ 'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'),
+ 'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'),
+ 'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'),
+ 'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'),
+ 'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'),
+ 'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
+ 'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'),
+ 'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'),
+ 'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'),
+ 'network-interface.attachment.instance-id': ('InstanceId',),
+ 'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'),
+ 'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'),
+ 'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'),
+ 'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'),
+ 'network-interface.availability-zone': ('Placement', 'AvailabilityZone'),
+ 'network-interface.description': ('NetworkInterfaces', 'Description'),
+ 'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'),
+ 'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'),
+ 'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'),
+ 'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'),
+ 'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'),
+ 'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'),
+ 'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'),
+ # 'network-interface.requester-id': (),
+ 'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
+ 'network-interface.status': ('NetworkInterfaces', 'Status'),
+ 'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'),
+ 'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'),
+ 'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'),
+ 'placement-group-name': ('Placement', 'GroupName'),
+ 'platform': ('Platform',),
+ 'private-dns-name': ('PrivateDnsName',),
+ 'private-ip-address': ('PrivateIpAddress',),
+ 'product-code': ('ProductCodes', 'ProductCodeId'),
+ 'product-code.type': ('ProductCodes', 'ProductCodeType'),
+ 'ramdisk-id': ('RamdiskId',),
+ 'reason': ('StateTransitionReason',),
+ 'root-device-name': ('RootDeviceName',),
+ 'root-device-type': ('RootDeviceType',),
+ 'source-dest-check': ('SourceDestCheck',),
+ 'spot-instance-request-id': ('SpotInstanceRequestId',),
+ 'state-reason-code': ('StateReason', 'Code'),
+ 'state-reason-message': ('StateReason', 'Message'),
+ 'subnet-id': ('SubnetId',),
+ 'tag': ('Tags',),
+ 'tag-key': ('Tags',),
+ 'tag-value': ('Tags',),
+ 'tenancy': ('Placement', 'Tenancy'),
+ 'virtualization-type': ('VirtualizationType',),
+ 'vpc-id': ('VpcId',),
+}
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'aws_ec2'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+
+ self.group_prefix = 'aws_ec2_'
+
+ # credentials
+ self.boto_profile = None
+ self.aws_secret_access_key = None
+ self.aws_access_key_id = None
+ self.aws_security_token = None
+ self.iam_role_arn = None
+
+ def _compile_values(self, obj, attr):
+ '''
+ :param obj: A list or dict of instance attributes
+ :param attr: A key
+ :return The value(s) found via the attr
+ '''
+ if obj is None:
+ return
+
+ temp_obj = []
+
+ if isinstance(obj, list) or isinstance(obj, tuple):
+ for each in obj:
+ value = self._compile_values(each, attr)
+ if value:
+ temp_obj.append(value)
+ else:
+ temp_obj = obj.get(attr)
+
+ has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)])
+ if has_indexes and len(temp_obj) == 1:
+ return temp_obj[0]
+
+ return temp_obj
+
+ def _get_boto_attr_chain(self, filter_name, instance):
+ '''
+ :param filter_name: The filter
+ :param instance: instance dict returned by boto3 ec2 describe_instances()
+ '''
+ allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()))
+ if filter_name not in allowed_filters:
+ raise AnsibleError("Invalid filter '%s' provided; filter must be one of %s." % (filter_name,
+ allowed_filters))
+ if filter_name in instance_data_filter_to_boto_attr:
+ boto_attr_list = instance_data_filter_to_boto_attr[filter_name]
+ else:
+ boto_attr_list = instance_meta_filter_to_boto_attr[filter_name]
+
+ instance_value = instance
+ for attribute in boto_attr_list:
+ instance_value = self._compile_values(instance_value, attribute)
+ return instance_value
+
+ def _get_credentials(self):
+ '''
+ :return A dictionary of boto client credentials
+ '''
+ boto_params = {}
+ for credential in (('aws_access_key_id', self.aws_access_key_id),
+ ('aws_secret_access_key', self.aws_secret_access_key),
+ ('aws_session_token', self.aws_security_token)):
+ if credential[1]:
+ boto_params[credential[0]] = credential[1]
+
+ return boto_params
+
+ def _get_connection(self, credentials, region='us-east-1'):
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ return connection
+
+ def _boto3_assume_role(self, credentials, region):
+ """
+ Assume an IAM role passed by iam_role_arn parameter
+
+ :return: a dict containing the credentials of the assumed role
+ """
+
+ iam_role_arn = self.iam_role_arn
+
+ try:
+ sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
+ sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory')
+ return dict(
+ aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
+ aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
+ aws_session_token=sts_session['Credentials']['SessionToken']
+ )
+ except botocore.exceptions.ClientError as e:
+ raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
+
+ def _boto3_conn(self, regions):
+ '''
+ :param regions: A list of regions to create a boto3 client
+
+ Generator that yields a boto3 client and the region
+ '''
+
+ credentials = self._get_credentials()
+ iam_role_arn = self.iam_role_arn
+
+ if not regions:
+ try:
+ # as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html
+ client = self._get_connection(credentials)
+ resp = client.describe_regions()
+ regions = [x['RegionName'] for x in resp.get('Regions', [])]
+ except botocore.exceptions.NoRegionError:
+ # above seems to fail depending on boto3 version, ignore and lets try something else
+ pass
+
+ # fallback to local list hardcoded in boto3 if still no regions
+ if not regions:
+ session = boto3.Session()
+ regions = session.get_available_regions('ec2')
+
+ # I give up, now you MUST give me regions
+ if not regions:
+ raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.')
+
+ for region in regions:
+ connection = self._get_connection(credentials, region)
+ try:
+ if iam_role_arn is not None:
+ assumed_credentials = self._boto3_assume_role(credentials, region)
+ else:
+ assumed_credentials = credentials
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ if self.boto_profile:
+ try:
+ connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ else:
+ raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
+ yield connection, region
+
+ def _get_instances_by_region(self, regions, filters, strict_permissions):
+ '''
+ :param regions: a list of regions in which to describe instances
+ :param filters: a list of boto3 filter dictionaries
+ :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
+ :return A list of instance dictionaries
+ '''
+ all_instances = []
+
+ for connection, region in self._boto3_conn(regions):
+ try:
+ # By default find non-terminated/terminating instances
+ if not any([f['Name'] == 'instance-state-name' for f in filters]):
+ filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']})
+ paginator = connection.get_paginator('describe_instances')
+ reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations')
+ instances = []
+ for r in reservations:
+ new_instances = r['Instances']
+ for instance in new_instances:
+ instance.update(self._get_reservation_details(r))
+ if self.get_option('include_extra_api_calls'):
+ instance.update(self._get_event_set_and_persistence(connection, instance['InstanceId'], instance.get('SpotInstanceRequestId')))
+ instances.extend(new_instances)
+ except botocore.exceptions.ClientError as e:
+ if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions:
+ instances = []
+ else:
+ raise AnsibleError("Failed to describe instances: %s" % to_native(e))
+ except botocore.exceptions.BotoCoreError as e:
+ raise AnsibleError("Failed to describe instances: %s" % to_native(e))
+
+ all_instances.extend(instances)
+
+ return sorted(all_instances, key=lambda x: x['InstanceId'])
+
+ def _get_reservation_details(self, reservation):
+ return {
+ 'OwnerId': reservation['OwnerId'],
+ 'RequesterId': reservation.get('RequesterId', ''),
+ 'ReservationId': reservation['ReservationId']
+ }
+
+ def _get_event_set_and_persistence(self, connection, instance_id, spot_instance):
+ host_vars = {'Events': '', 'Persistent': False}
+ try:
+ kwargs = {'InstanceIds': [instance_id]}
+ host_vars['Events'] = connection.describe_instance_status(**kwargs)['InstanceStatuses'][0].get('Events', '')
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if not self.get_option('strict_permissions'):
+ pass
+ else:
+ raise AnsibleError("Failed to describe instance status: %s" % to_native(e))
+ if spot_instance:
+ try:
+ kwargs = {'SpotInstanceRequestIds': [spot_instance]}
+ host_vars['Persistent'] = bool(
+ connection.describe_spot_instance_requests(**kwargs)['SpotInstanceRequests'][0].get('Type') == 'persistent'
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ if not self.get_option('strict_permissions'):
+ pass
+ else:
+ raise AnsibleError("Failed to describe spot instance requests: %s" % to_native(e))
+ return host_vars
+
+ def _get_tag_hostname(self, preference, instance):
+ tag_hostnames = preference.split('tag:', 1)[1]
+ if ',' in tag_hostnames:
+ tag_hostnames = tag_hostnames.split(',')
+ else:
+ tag_hostnames = [tag_hostnames]
+ tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', []))
+ for v in tag_hostnames:
+ if '=' in v:
+ tag_name, tag_value = v.split('=')
+ if tags.get(tag_name) == tag_value:
+ return to_text(tag_name) + "_" + to_text(tag_value)
+ else:
+ tag_value = tags.get(v)
+ if tag_value:
+ return to_text(tag_value)
+ return None
+
+ def _get_hostname(self, instance, hostnames):
+ '''
+ :param instance: an instance dict returned by boto3 ec2 describe_instances()
+ :param hostnames: a list of hostname destination variables in order of preference
+ :return the preferred identifer for the host
+ '''
+ if not hostnames:
+ hostnames = ['dns-name', 'private-dns-name']
+
+ hostname = None
+ for preference in hostnames:
+ if 'tag' in preference:
+ if not preference.startswith('tag:'):
+ raise AnsibleError("To name a host by tags name_value, use 'tag:name=value'.")
+ hostname = self._get_tag_hostname(preference, instance)
+ else:
+ hostname = self._get_boto_attr_chain(preference, instance)
+ if hostname:
+ break
+ if hostname:
+ if ':' in to_text(hostname):
+ return self._sanitize_group_name((to_text(hostname)))
+ else:
+ return to_text(hostname)
+
+ def _query(self, regions, filters, strict_permissions):
+ '''
+ :param regions: a list of regions to query
+ :param filters: a list of boto3 filter dictionaries
+ :param hostnames: a list of hostname destination variables in order of preference
+ :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
+ '''
+ return {'aws_ec2': self._get_instances_by_region(regions, filters, strict_permissions)}
+
+ def _populate(self, groups, hostnames):
+ for group in groups:
+ group = self.inventory.add_group(group)
+ self._add_hosts(hosts=groups[group], group=group, hostnames=hostnames)
+ self.inventory.add_child('all', group)
+
+ def _add_hosts(self, hosts, group, hostnames):
+ '''
+ :param hosts: a list of hosts to be added to a group
+ :param group: the name of the group to which the hosts belong
+ :param hostnames: a list of hostname destination variables in order of preference
+ '''
+ for host in hosts:
+ hostname = self._get_hostname(host, hostnames)
+
+ host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
+ host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
+
+ # Allow easier grouping by region
+ host['placement']['region'] = host['placement']['availability_zone'][:-1]
+
+ if not hostname:
+ continue
+ self.inventory.add_host(hostname, group=group)
+ for hostvar, hostval in host.items():
+ self.inventory.set_variable(hostname, hostvar, hostval)
+
+ # Use constructed if applicable
+
+ strict = self.get_option('strict')
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
+
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
+
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
+
+ def _set_credentials(self):
+ '''
+ :param config_data: contents of the inventory config file
+ '''
+
+ self.boto_profile = self.get_option('aws_profile')
+ self.aws_access_key_id = self.get_option('aws_access_key')
+ self.aws_secret_access_key = self.get_option('aws_secret_key')
+ self.aws_security_token = self.get_option('aws_security_token')
+ self.iam_role_arn = self.get_option('iam_role_arn')
+
+ if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
+ session = botocore.session.get_session()
+ try:
+ credentials = session.get_credentials().get_frozen_credentials()
+ except AttributeError:
+ pass
+ else:
+ self.aws_access_key_id = credentials.access_key
+ self.aws_secret_access_key = credentials.secret_key
+ self.aws_security_token = credentials.token
+
+ if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
+ raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
+ "inventory configuration file or set them as environment variables.")
+
+ def verify_file(self, path):
+ '''
+ :param loader: an ansible.parsing.dataloader.DataLoader object
+ :param path: the path to the inventory config file
+ :return the contents of the config file
+ '''
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')):
+ return True
+ display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'")
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self._read_config_data(path)
+
+ if self.get_option('use_contrib_script_compatible_sanitization'):
+ self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
+
+ self._set_credentials()
+
+ # get user specifications
+ regions = self.get_option('regions')
+ filters = ansible_dict_to_boto3_filter_list(self.get_option('filters'))
+ hostnames = self.get_option('hostnames')
+ strict_permissions = self.get_option('strict_permissions')
+
+ cache_key = self.get_cache_key(path)
+ # false when refresh_cache or --flush-cache is used
+ if cache:
+ # get the user-specified directive
+ cache = self.get_option('cache')
+
+ # Generate inventory
+ cache_needs_update = False
+ if cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # if cache expires or cache file doesn't exist
+ cache_needs_update = True
+
+ if not cache or cache_needs_update:
+ results = self._query(regions, filters, strict_permissions)
+
+ self._populate(results, hostnames)
+
+ # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
+ # when the user is using caching, update the cached inventory
+ if cache_needs_update or (not cache and self.get_option('cache')):
+ self._cache[cache_key] = results
+
+ @staticmethod
+ def _legacy_script_compatible_group_sanitization(name):
+
+ # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python
+ regex = re.compile(r"[^A-Za-z0-9\_\-]")
+
+ return regex.sub('_', name)
+
+
+def ansible_dict_to_boto3_filter_list(filters_dict):
+
+ """ Convert an Ansible dict of filters to list of dicts that boto3 can use
+ Args:
+ filters_dict (dict): Dict of AWS filters.
+ Basic Usage:
+ >>> filters = {'some-aws-id': 'i-01234567'}
+ >>> ansible_dict_to_boto3_filter_list(filters)
+ {
+ 'some-aws-id': 'i-01234567'
+ }
+ Returns:
+ List: List of AWS filters and their values
+ [
+ {
+ 'Name': 'some-aws-id',
+ 'Values': [
+ 'i-01234567',
+ ]
+ }
+ ]
+ """
+
+ filters_list = []
+ for k, v in filters_dict.items():
+ filter_dict = {'Name': k}
+ if isinstance(v, string_types):
+ filter_dict['Values'] = [v]
+ else:
+ filter_dict['Values'] = v
+
+ filters_list.append(filter_dict)
+
+ return filters_list
+
+
+def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
+
+ """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
+ Args:
+ tags_list (list): List of dicts representing AWS tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
+ >>> boto3_tag_list_to_ansible_dict(tags_list)
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ Returns:
+ Dict: Dict of key:value pairs representing AWS tags
+ {
+ 'MyTagKey': 'MyTagValue',
+ }
+ """
+
+ if tag_name_key_name and tag_value_key_name:
+ tag_candidates = {tag_name_key_name: tag_value_key_name}
+ else:
+ tag_candidates = {'key': 'value', 'Key': 'Value'}
+
+ if not tags_list:
+ return {}
+ for k, v in tag_candidates.items():
+ if k in tags_list[0] and v in tags_list[0]:
+ return dict((tag[k], tag[v]) for tag in tags_list)
+ raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
diff --git a/test/support/integration/plugins/inventory/foreman.py b/test/support/integration/plugins/inventory/foreman.py
new file mode 100644
index 0000000000..43073f81ad
--- /dev/null
+++ b/test/support/integration/plugins/inventory/foreman.py
@@ -0,0 +1,295 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>, Daniel Lobato Garcia <dlobatog@redhat.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: foreman
+ plugin_type: inventory
+ short_description: foreman inventory source
+ version_added: "2.6"
+ requirements:
+ - requests >= 1.1
+ description:
+ - Get inventory hosts from the foreman service.
+ - "Uses a configuration file as an inventory source, it must end in ``.foreman.yml`` or ``.foreman.yaml`` and has a ``plugin: foreman`` entry."
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ options:
+ plugin:
+ description: the name of this plugin, it should always be set to 'foreman' for this plugin to recognize it as it's own.
+ required: True
+ choices: ['foreman']
+ url:
+ description: url to foreman
+ default: 'http://localhost:3000'
+ env:
+ - name: FOREMAN_SERVER
+ version_added: "2.8"
+ user:
+ description: foreman authentication user
+ required: True
+ env:
+ - name: FOREMAN_USER
+ version_added: "2.8"
+ password:
+ description: foreman authentication password
+ required: True
+ env:
+ - name: FOREMAN_PASSWORD
+ version_added: "2.8"
+ validate_certs:
+ description: verify SSL certificate if using https
+ type: boolean
+ default: False
+ group_prefix:
+ description: prefix to apply to foreman groups
+ default: foreman_
+ vars_prefix:
+ description: prefix to apply to host variables, does not include facts nor params
+ default: foreman_
+ want_facts:
+ description: Toggle, if True the plugin will retrieve host facts from the server
+ type: boolean
+ default: False
+ want_params:
+ description: Toggle, if true the inventory will retrieve 'all_parameters' information as host vars
+ type: boolean
+ default: False
+ want_hostcollections:
+ description: Toggle, if true the plugin will create Ansible groups for host collections
+ type: boolean
+ default: False
+ version_added: '2.10'
+ want_ansible_ssh_host:
+ description: Toggle, if true the plugin will populate the ansible_ssh_host variable to explicitly specify the connection target
+ type: boolean
+ default: False
+ version_added: '2.10'
+
+'''
+
+EXAMPLES = '''
+# my.foreman.yml
+plugin: foreman
+url: http://localhost:2222
+user: ansible-tester
+password: secure
+validate_certs: False
+'''
+
+from distutils.version import LooseVersion
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name, Constructable
+
+# 3rd party imports
+try:
+ import requests
+ if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
+ raise ImportError
+except ImportError:
+ raise AnsibleError('This script requires python-requests 1.1 as a minimum version')
+
+from requests.auth import HTTPBasicAuth
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable, Constructable):
+ ''' Host inventory parser for ansible using foreman as source. '''
+
+ NAME = 'foreman'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ # from config
+ self.foreman_url = None
+
+ self.session = None
+ self.cache_key = None
+ self.use_cache = None
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('foreman.yaml', 'foreman.yml')):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not ending in "foreman.yaml" nor "foreman.yml"')
+ return valid
+
+ def _get_session(self):
+ if not self.session:
+ self.session = requests.session()
+ self.session.auth = HTTPBasicAuth(self.get_option('user'), to_bytes(self.get_option('password')))
+ self.session.verify = self.get_option('validate_certs')
+ return self.session
+
+ def _get_json(self, url, ignore_errors=None):
+
+ if not self.use_cache or url not in self._cache.get(self.cache_key, {}):
+
+ if self.cache_key not in self._cache:
+ self._cache[self.cache_key] = {url: ''}
+
+ results = []
+ s = self._get_session()
+ params = {'page': 1, 'per_page': 250}
+ while True:
+ ret = s.get(url, params=params)
+ if ignore_errors and ret.status_code in ignore_errors:
+ break
+ ret.raise_for_status()
+ json = ret.json()
+
+ # process results
+ # FIXME: This assumes 'return type' matches a specific query,
+ # it will break if we expand the queries and they dont have different types
+ if 'results' not in json:
+ # /hosts/:id dos not have a 'results' key
+ results = json
+ break
+ elif isinstance(json['results'], MutableMapping):
+ # /facts are returned as dict in 'results'
+ results = json['results']
+ break
+ else:
+ # /hosts 's 'results' is a list of all hosts, returned is paginated
+ results = results + json['results']
+
+ # check for end of paging
+ if len(results) >= json['subtotal']:
+ break
+ if len(json['results']) == 0:
+ self.display.warning("Did not make any progress during loop. expected %d got %d" % (json['subtotal'], len(results)))
+ break
+
+ # get next page
+ params['page'] += 1
+
+ self._cache[self.cache_key][url] = results
+
+ return self._cache[self.cache_key][url]
+
+ def _get_hosts(self):
+ return self._get_json("%s/api/v2/hosts" % self.foreman_url)
+
+ def _get_all_params_by_id(self, hid):
+ url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
+ ret = self._get_json(url, [404])
+ if not ret or not isinstance(ret, MutableMapping) or not ret.get('all_parameters', False):
+ return {}
+ return ret.get('all_parameters')
+
+ def _get_facts_by_id(self, hid):
+ url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid)
+ return self._get_json(url)
+
+ def _get_host_data_by_id(self, hid):
+ url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
+ return self._get_json(url)
+
+ def _get_facts(self, host):
+ """Fetch all host facts of the host"""
+
+ ret = self._get_facts_by_id(host['id'])
+ if len(ret.values()) == 0:
+ facts = {}
+ elif len(ret.values()) == 1:
+ facts = list(ret.values())[0]
+ else:
+ raise ValueError("More than one set of facts returned for '%s'" % host)
+ return facts
+
+ def _populate(self):
+
+ for host in self._get_hosts():
+
+ if host.get('name'):
+ host_name = self.inventory.add_host(host['name'])
+
+ # create directly mapped groups
+ group_name = host.get('hostgroup_title', host.get('hostgroup_name'))
+ if group_name:
+ group_name = to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group_name.lower().replace(" ", "")))
+ group_name = self.inventory.add_group(group_name)
+ self.inventory.add_child(group_name, host_name)
+
+ # set host vars from host info
+ try:
+ for k, v in host.items():
+ if k not in ('name', 'hostgroup_title', 'hostgroup_name'):
+ try:
+ self.inventory.set_variable(host_name, self.get_option('vars_prefix') + k, v)
+ except ValueError as e:
+ self.display.warning("Could not set host info hostvar for %s, skipping %s: %s" % (host, k, to_text(e)))
+ except ValueError as e:
+ self.display.warning("Could not get host info for %s, skipping: %s" % (host_name, to_text(e)))
+
+ # set host vars from params
+ if self.get_option('want_params'):
+ for p in self._get_all_params_by_id(host['id']):
+ try:
+ self.inventory.set_variable(host_name, p['name'], p['value'])
+ except ValueError as e:
+ self.display.warning("Could not set hostvar %s to '%s' for the '%s' host, skipping: %s" %
+ (p['name'], to_native(p['value']), host, to_native(e)))
+
+ # set host vars from facts
+ if self.get_option('want_facts'):
+ self.inventory.set_variable(host_name, 'foreman_facts', self._get_facts(host))
+
+ # create group for host collections
+ if self.get_option('want_hostcollections'):
+ host_data = self._get_host_data_by_id(host['id'])
+ hostcollections = host_data.get('host_collections')
+ if hostcollections:
+ # Create Ansible groups for host collections
+ for hostcollection in hostcollections:
+ try:
+ hostcollection_group = to_safe_group_name('%shostcollection_%s' % (self.get_option('group_prefix'),
+ hostcollection['name'].lower().replace(" ", "")))
+ hostcollection_group = self.inventory.add_group(hostcollection_group)
+ self.inventory.add_child(hostcollection_group, host_name)
+ except ValueError as e:
+ self.display.warning("Could not create groups for host collections for %s, skipping: %s" % (host_name, to_text(e)))
+
+ # put ansible_ssh_host as hostvar
+ if self.get_option('want_ansible_ssh_host'):
+ for key in ('ip', 'ipv4', 'ipv6'):
+ if host.get(key):
+ try:
+ self.inventory.set_variable(host_name, 'ansible_ssh_host', host[key])
+ break
+ except ValueError as e:
+ self.display.warning("Could not set hostvar ansible_ssh_host to '%s' for the '%s' host, skipping: %s" %
+ (host[key], host_name, to_text(e)))
+
+ strict = self.get_option('strict')
+
+ hostvars = self.inventory.get_host(host_name).get_vars()
+ self._set_composite_vars(self.get_option('compose'), hostvars, host_name, strict)
+ self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host_name, strict)
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host_name, strict)
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ # read config from file, this sets 'options'
+ self._read_config_data(path)
+
+ # get connection host
+ self.foreman_url = self.get_option('url')
+ self.cache_key = self.get_cache_key(path)
+ self.use_cache = cache and self.get_option('cache')
+
+ # actually populate inventory
+ self._populate()
diff --git a/test/support/integration/plugins/inventory/vmware_vm_inventory.py b/test/support/integration/plugins/inventory/vmware_vm_inventory.py
new file mode 100644
index 0000000000..816b6471c5
--- /dev/null
+++ b/test/support/integration/plugins/inventory/vmware_vm_inventory.py
@@ -0,0 +1,477 @@
+#
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: vmware_vm_inventory
+ plugin_type: inventory
+ short_description: VMware Guest inventory source
+ version_added: "2.7"
+ author:
+ - Abhijeet Kasurde (@Akasurde)
+ description:
+ - Get virtual machines as inventory hosts from VMware environment.
+ - Uses any file which ends with vmware.yml, vmware.yaml, vmware_vm_inventory.yml, or vmware_vm_inventory.yaml as a YAML configuration file.
+ - The inventory_hostname is always the 'Name' and UUID of the virtual machine. UUID is added as VMware allows virtual machines with the same name.
+ extends_documentation_fragment:
+ - inventory_cache
+ requirements:
+ - "Python >= 2.7"
+ - "PyVmomi"
+ - "requests >= 2.3"
+ - "vSphere Automation SDK - For tag feature"
+ - "vCloud Suite SDK - For tag feature"
+ options:
+ hostname:
+ description: Name of vCenter or ESXi server.
+ required: True
+ env:
+ - name: VMWARE_HOST
+ - name: VMWARE_SERVER
+ username:
+ description: Name of vSphere admin user.
+ required: True
+ env:
+ - name: VMWARE_USER
+ - name: VMWARE_USERNAME
+ password:
+ description: Password of vSphere admin user.
+ required: True
+ env:
+ - name: VMWARE_PASSWORD
+ port:
+ description: Port number used to connect to vCenter or ESXi Server.
+ default: 443
+ env:
+ - name: VMWARE_PORT
+ validate_certs:
+ description:
+ - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
+ default: True
+ type: boolean
+ env:
+ - name: VMWARE_VALIDATE_CERTS
+ with_tags:
+ description:
+ - Include tags and associated virtual machines.
+ - Requires 'vSphere Automation SDK' library to be installed on the given controller machine.
+ - Please refer following URLs for installation steps
+ - 'https://code.vmware.com/web/sdk/65/vsphere-automation-python'
+ default: False
+ type: boolean
+ properties:
+ description:
+ - Specify the list of VMware schema properties associated with the VM.
+ - These properties will be populated in hostvars of the given VM.
+ - Each value in the list specifies the path to a specific property in VM object.
+ type: list
+ default: [ 'name', 'config.cpuHotAddEnabled', 'config.cpuHotRemoveEnabled',
+ 'config.instanceUuid', 'config.hardware.numCPU', 'config.template',
+ 'config.name', 'guest.hostName', 'guest.ipAddress',
+ 'guest.guestId', 'guest.guestState', 'runtime.maxMemoryUsage',
+ 'customValue'
+ ]
+ version_added: "2.9"
+'''
+
+EXAMPLES = '''
+# Sample configuration file for VMware Guest dynamic inventory
+ plugin: vmware_vm_inventory
+ strict: False
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: False
+ with_tags: True
+
+# Gather minimum set of properties for VMware guest
+ plugin: vmware_vm_inventory
+ strict: False
+ hostname: 10.65.223.31
+ username: administrator@vsphere.local
+ password: Esxi@123$%
+ validate_certs: False
+ with_tags: False
+ properties:
+ - 'name'
+ - 'guest.ipAddress'
+'''
+
+import ssl
+import atexit
+from ansible.errors import AnsibleError, AnsibleParserError
+
+try:
+ # requests is required for exception handling of the ConnectionError
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+try:
+ from pyVim import connect
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+try:
+ from com.vmware.vapi.std_client import DynamicID
+ from vmware.vapi.vsphere.client import create_vsphere_client
+ HAS_VSPHERE = True
+except ImportError:
+ HAS_VSPHERE = False
+
+
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
+
+
+class BaseVMwareInventory:
+ def __init__(self, hostname, username, password, port, validate_certs, with_tags):
+ self.hostname = hostname
+ self.username = username
+ self.password = password
+ self.port = port
+ self.with_tags = with_tags
+ self.validate_certs = validate_certs
+ self.content = None
+ self.rest_content = None
+
+ def do_login(self):
+ """
+ Check requirements and do login
+ """
+ self.check_requirements()
+ self.content = self._login()
+ if self.with_tags:
+ self.rest_content = self._login_vapi()
+
+ def _login_vapi(self):
+ """
+ Login to vCenter API using REST call
+ Returns: connection object
+
+ """
+ session = requests.Session()
+ session.verify = self.validate_certs
+ if not self.validate_certs:
+ # Disable warning shown at stdout
+ requests.packages.urllib3.disable_warnings()
+
+ server = self.hostname
+ if self.port:
+ server += ":" + str(self.port)
+ client = create_vsphere_client(server=server,
+ username=self.username,
+ password=self.password,
+ session=session)
+ if client is None:
+ raise AnsibleError("Failed to login to %s using %s" % (server, self.username))
+ return client
+
+ def _login(self):
+ """
+ Login to vCenter or ESXi server
+ Returns: connection object
+
+ """
+ if self.validate_certs and not hasattr(ssl, 'SSLContext'):
+ raise AnsibleError('pyVim does not support changing verification mode with python < 2.7.9. Either update '
+ 'python or set validate_certs to false in configuration YAML file.')
+
+ ssl_context = None
+ if not self.validate_certs and hasattr(ssl, 'SSLContext'):
+ ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ ssl_context.verify_mode = ssl.CERT_NONE
+
+ service_instance = None
+ try:
+ service_instance = connect.SmartConnect(host=self.hostname, user=self.username,
+ pwd=self.password, sslContext=ssl_context,
+ port=self.port)
+ except vim.fault.InvalidLogin as e:
+ raise AnsibleParserError("Unable to log on to vCenter or ESXi API at %s:%s as %s: %s" % (self.hostname, self.port, self.username, e.msg))
+ except vim.fault.NoPermission as e:
+ raise AnsibleParserError("User %s does not have required permission"
+ " to log on to vCenter or ESXi API at %s:%s : %s" % (self.username, self.hostname, self.port, e.msg))
+ except (requests.ConnectionError, ssl.SSLError) as e:
+ raise AnsibleParserError("Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" % (self.hostname, self.port, e))
+ except vmodl.fault.InvalidRequest as e:
+ # Request is malformed
+ raise AnsibleParserError("Failed to get a response from server %s:%s as "
+ "request is malformed: %s" % (self.hostname, self.port, e.msg))
+ except Exception as e:
+ raise AnsibleParserError("Unknown error while connecting to vCenter or ESXi API at %s:%s : %s" % (self.hostname, self.port, e))
+
+ if service_instance is None:
+ raise AnsibleParserError("Unknown error while connecting to vCenter or ESXi API at %s:%s" % (self.hostname, self.port))
+
+ atexit.register(connect.Disconnect, service_instance)
+ return service_instance.RetrieveContent()
+
+ def check_requirements(self):
+ """ Check all requirements for this inventory are satisified"""
+ if not HAS_REQUESTS:
+ raise AnsibleParserError('Please install "requests" Python module as this is required'
+ ' for VMware Guest dynamic inventory plugin.')
+ elif not HAS_PYVMOMI:
+ raise AnsibleParserError('Please install "PyVmomi" Python module as this is required'
+ ' for VMware Guest dynamic inventory plugin.')
+ if HAS_REQUESTS:
+ # Pyvmomi 5.5 and onwards requires requests 2.3
+ # https://github.com/vmware/pyvmomi/blob/master/requirements.txt
+ required_version = (2, 3)
+ requests_version = requests.__version__.split(".")[:2]
+ try:
+ requests_major_minor = tuple(map(int, requests_version))
+ except ValueError:
+ raise AnsibleParserError("Failed to parse 'requests' library version.")
+
+ if requests_major_minor < required_version:
+ raise AnsibleParserError("'requests' library version should"
+ " be >= %s, found: %s." % (".".join([str(w) for w in required_version]),
+ requests.__version__))
+
+ if not HAS_VSPHERE and self.with_tags:
+ raise AnsibleError("Unable to find 'vSphere Automation SDK' Python library which is required."
+ " Please refer this URL for installation steps"
+ " - https://code.vmware.com/web/sdk/65/vsphere-automation-python")
+
+ if not all([self.hostname, self.username, self.password]):
+ raise AnsibleError("Missing one of the following : hostname, username, password. Please read "
+ "the documentation for more information.")
+
+ def _get_managed_objects_properties(self, vim_type, properties=None):
+ """
+ Look up a Managed Object Reference in vCenter / ESXi Environment
+ :param vim_type: Type of vim object e.g, for datacenter - vim.Datacenter
+ :param properties: List of properties related to vim object e.g. Name
+ :return: local content object
+ """
+ # Get Root Folder
+ root_folder = self.content.rootFolder
+
+ if properties is None:
+ properties = ['name']
+
+ # Create Container View with default root folder
+ mor = self.content.viewManager.CreateContainerView(root_folder, [vim_type], True)
+
+ # Create Traversal spec
+ traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
+ name="traversal_spec",
+ path='view',
+ skip=False,
+ type=vim.view.ContainerView
+ )
+
+ # Create Property Spec
+ property_spec = vmodl.query.PropertyCollector.PropertySpec(
+ type=vim_type, # Type of object to retrieved
+ all=False,
+ pathSet=properties
+ )
+
+ # Create Object Spec
+ object_spec = vmodl.query.PropertyCollector.ObjectSpec(
+ obj=mor,
+ skip=True,
+ selectSet=[traversal_spec]
+ )
+
+ # Create Filter Spec
+ filter_spec = vmodl.query.PropertyCollector.FilterSpec(
+ objectSet=[object_spec],
+ propSet=[property_spec],
+ reportMissingObjectsInResults=False
+ )
+
+ return self.content.propertyCollector.RetrieveContents([filter_spec])
+
+ @staticmethod
+ def _get_object_prop(vm, attributes):
+ """Safely get a property or return None"""
+ result = vm
+ for attribute in attributes:
+ try:
+ result = getattr(result, attribute)
+ except (AttributeError, IndexError):
+ return None
+ return result
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable):
+
+ NAME = 'vmware_vm_inventory'
+
+ def verify_file(self, path):
+ """
+ Verify plugin configuration file and mark this plugin active
+ Args:
+ path: Path of configuration YAML file
+ Returns: True if everything is correct, else False
+ """
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('vmware.yaml', 'vmware.yml', 'vmware_vm_inventory.yaml', 'vmware_vm_inventory.yml')):
+ valid = True
+
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+ """
+ Parses the inventory file
+ """
+ super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
+
+ cache_key = self.get_cache_key(path)
+
+ config_data = self._read_config_data(path)
+
+ # set _options from config data
+ self._consume_options(config_data)
+
+ self.pyv = BaseVMwareInventory(
+ hostname=self.get_option('hostname'),
+ username=self.get_option('username'),
+ password=self.get_option('password'),
+ port=self.get_option('port'),
+ with_tags=self.get_option('with_tags'),
+ validate_certs=self.get_option('validate_certs')
+ )
+
+ self.pyv.do_login()
+
+ self.pyv.check_requirements()
+
+ source_data = None
+ if cache:
+ cache = self.get_option('cache')
+
+ update_cache = False
+ if cache:
+ try:
+ source_data = self._cache[cache_key]
+ except KeyError:
+ update_cache = True
+
+ using_current_cache = cache and not update_cache
+ cacheable_results = self._populate_from_source(source_data, using_current_cache)
+
+ if update_cache:
+ self._cache[cache_key] = cacheable_results
+
+ def _populate_from_cache(self, source_data):
+ """ Populate cache using source data """
+ hostvars = source_data.pop('_meta', {}).get('hostvars', {})
+ for group in source_data:
+ if group == 'all':
+ continue
+ else:
+ self.inventory.add_group(group)
+ hosts = source_data[group].get('hosts', [])
+ for host in hosts:
+ self._populate_host_vars([host], hostvars.get(host, {}), group)
+ self.inventory.add_child('all', group)
+
+ def _populate_from_source(self, source_data, using_current_cache):
+ """
+ Populate inventory data from direct source
+
+ """
+ if using_current_cache:
+ self._populate_from_cache(source_data)
+ return source_data
+
+ cacheable_results = {'_meta': {'hostvars': {}}}
+ hostvars = {}
+ objects = self.pyv._get_managed_objects_properties(vim_type=vim.VirtualMachine,
+ properties=['name'])
+
+ if self.pyv.with_tags:
+ tag_svc = self.pyv.rest_content.tagging.Tag
+ tag_association = self.pyv.rest_content.tagging.TagAssociation
+
+ tags_info = dict()
+ tags = tag_svc.list()
+ for tag in tags:
+ tag_obj = tag_svc.get(tag)
+ tags_info[tag_obj.id] = tag_obj.name
+ if tag_obj.name not in cacheable_results:
+ cacheable_results[tag_obj.name] = {'hosts': []}
+ self.inventory.add_group(tag_obj.name)
+
+ for vm_obj in objects:
+ for vm_obj_property in vm_obj.propSet:
+ # VMware does not provide a way to uniquely identify VM by its name
+ # i.e. there can be two virtual machines with same name
+ # Appending "_" and VMware UUID to make it unique
+ if not vm_obj.obj.config:
+ # Sometime orphaned VMs return no configurations
+ continue
+
+ current_host = vm_obj_property.val + "_" + vm_obj.obj.config.uuid
+
+ if current_host not in hostvars:
+ hostvars[current_host] = {}
+ self.inventory.add_host(current_host)
+
+ host_ip = vm_obj.obj.guest.ipAddress
+ if host_ip:
+ self.inventory.set_variable(current_host, 'ansible_host', host_ip)
+
+ self._populate_host_properties(vm_obj, current_host)
+
+ # Only gather facts related to tag if vCloud and vSphere is installed.
+ if HAS_VSPHERE and self.pyv.with_tags:
+ # Add virtual machine to appropriate tag group
+ vm_mo_id = vm_obj.obj._GetMoId()
+ vm_dynamic_id = DynamicID(type='VirtualMachine', id=vm_mo_id)
+ attached_tags = tag_association.list_attached_tags(vm_dynamic_id)
+
+ for tag_id in attached_tags:
+ self.inventory.add_child(tags_info[tag_id], current_host)
+ cacheable_results[tags_info[tag_id]]['hosts'].append(current_host)
+
+ # Based on power state of virtual machine
+ vm_power = str(vm_obj.obj.summary.runtime.powerState)
+ if vm_power not in cacheable_results:
+ cacheable_results[vm_power] = {'hosts': []}
+ self.inventory.add_group(vm_power)
+ cacheable_results[vm_power]['hosts'].append(current_host)
+ self.inventory.add_child(vm_power, current_host)
+
+ # Based on guest id
+ vm_guest_id = vm_obj.obj.config.guestId
+ if vm_guest_id and vm_guest_id not in cacheable_results:
+ cacheable_results[vm_guest_id] = {'hosts': []}
+ self.inventory.add_group(vm_guest_id)
+ cacheable_results[vm_guest_id]['hosts'].append(current_host)
+ self.inventory.add_child(vm_guest_id, current_host)
+
+ for host in hostvars:
+ h = self.inventory.get_host(host)
+ cacheable_results['_meta']['hostvars'][h.name] = h.vars
+
+ return cacheable_results
+
+ def _populate_host_properties(self, vm_obj, current_host):
+ # Load VM properties in host_vars
+ vm_properties = self.get_option('properties') or []
+
+ field_mgr = self.pyv.content.customFieldsManager.field
+
+ for vm_prop in vm_properties:
+ if vm_prop == 'customValue':
+ for cust_value in vm_obj.obj.customValue:
+ self.inventory.set_variable(current_host,
+ [y.name for y in field_mgr if y.key == cust_value.key][0],
+ cust_value.value)
+ else:
+ vm_value = self.pyv._get_object_prop(vm_obj.obj, vm_prop.split("."))
+ self.inventory.set_variable(current_host, vm_prop, vm_value)
diff --git a/test/support/integration/plugins/module_utils/ansible_tower.py b/test/support/integration/plugins/module_utils/ansible_tower.py
new file mode 100644
index 0000000000..ef687a669c
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/ansible_tower.py
@@ -0,0 +1,113 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Wayne Witzel III <wayne@riotousliving.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import traceback
+
+TOWER_CLI_IMP_ERR = None
+try:
+ import tower_cli.utils.exceptions as exc
+ from tower_cli.utils import parser
+ from tower_cli.api import client
+
+ HAS_TOWER_CLI = True
+except ImportError:
+ TOWER_CLI_IMP_ERR = traceback.format_exc()
+ HAS_TOWER_CLI = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def tower_auth_config(module):
+ '''tower_auth_config attempts to load the tower-cli.cfg file
+ specified from the `tower_config_file` parameter. If found,
+ if returns the contents of the file as a dictionary, else
+ it will attempt to fetch values from the module params and
+ only pass those values that have been set.
+ '''
+ config_file = module.params.pop('tower_config_file', None)
+ if config_file:
+ if not os.path.exists(config_file):
+ module.fail_json(msg='file not found: %s' % config_file)
+ if os.path.isdir(config_file):
+ module.fail_json(msg='directory can not be used as config file: %s' % config_file)
+
+ with open(config_file, 'rb') as f:
+ return parser.string_to_dict(f.read())
+ else:
+ auth_config = {}
+ host = module.params.pop('tower_host', None)
+ if host:
+ auth_config['host'] = host
+ username = module.params.pop('tower_username', None)
+ if username:
+ auth_config['username'] = username
+ password = module.params.pop('tower_password', None)
+ if password:
+ auth_config['password'] = password
+ module.params.pop('tower_verify_ssl', None) # pop alias if used
+ verify_ssl = module.params.pop('validate_certs', None)
+ if verify_ssl is not None:
+ auth_config['verify_ssl'] = verify_ssl
+ return auth_config
+
+
+def tower_check_mode(module):
+ '''Execute check mode logic for Ansible Tower modules'''
+ if module.check_mode:
+ try:
+ result = client.get('/ping').json()
+ module.exit_json(changed=True, tower_version='{0}'.format(result['version']))
+ except (exc.ServerError, exc.ConnectionError, exc.BadRequest) as excinfo:
+ module.fail_json(changed=False, msg='Failed check mode: {0}'.format(excinfo))
+
+
+class TowerModule(AnsibleModule):
+ def __init__(self, argument_spec, **kwargs):
+ args = dict(
+ tower_host=dict(),
+ tower_username=dict(),
+ tower_password=dict(no_log=True),
+ validate_certs=dict(type='bool', aliases=['tower_verify_ssl']),
+ tower_config_file=dict(type='path'),
+ )
+ args.update(argument_spec)
+
+ mutually_exclusive = kwargs.get('mutually_exclusive', [])
+ kwargs['mutually_exclusive'] = mutually_exclusive.extend((
+ ('tower_config_file', 'tower_host'),
+ ('tower_config_file', 'tower_username'),
+ ('tower_config_file', 'tower_password'),
+ ('tower_config_file', 'validate_certs'),
+ ))
+
+ super(TowerModule, self).__init__(argument_spec=args, **kwargs)
+
+ if not HAS_TOWER_CLI:
+ self.fail_json(msg=missing_required_lib('ansible-tower-cli'),
+ exception=TOWER_CLI_IMP_ERR)
diff --git a/test/support/integration/plugins/module_utils/aws/core.py b/test/support/integration/plugins/module_utils/aws/core.py
new file mode 100644
index 0000000000..c4527b6deb
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/core.py
@@ -0,0 +1,335 @@
+#
+# Copyright 2017 Michael De La Rue | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+"""This module adds shared support for generic Amazon AWS modules
+
+**This code is not yet ready for use in user modules. As of 2017**
+**and through to 2018, the interface is likely to change**
+**aggressively as the exact correct interface for ansible AWS modules**
+**is identified. In particular, until this notice goes away or is**
+**changed, methods may disappear from the interface. Please don't**
+**publish modules using this except directly to the main Ansible**
+**development repository.**
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+ from ansible.module_utils.aws import AnsibleAWSModule
+ module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
+ mutually_exclusive=list1, required_together=list2)
+
+The 'AnsibleAWSModule' module provides similar, but more restricted,
+interfaces to the normal Ansible module. It also includes the
+additional methods for connecting to AWS using the standard module arguments
+
+ m.resource('lambda') # - get an AWS connection as a boto3 resource.
+
+or
+
+ m.client('sts') # - get an AWS connection as a boto3 client.
+
+To make use of AWSRetry easier, it can now be wrapped around any call from a
+module-created client. To add retries to a client, create a client:
+
+ m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+
+Any calls from that client can be made to use the decorator passed at call-time
+using the `aws_retry` argument. By default, no retries are used.
+
+ ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
+ ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
+
+The call will be retried the specified number of times, so the calling functions
+don't need to be wrapped in the backoff decorator.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import logging
+import traceback
+from functools import wraps
+from distutils.version import LooseVersion
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ # Python 3
+ from io import StringIO
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn
+from ansible.module_utils.ec2 import get_aws_connection_info, get_aws_region
+
+# We will also export HAS_BOTO3 so end user modules can use it.
+__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code')
+
+
+class AnsibleAWSModule(object):
+ """An ansible module class for AWS modules
+
+ AnsibleAWSModule provides an a class for building modules which
+ connect to Amazon Web Services. The interface is currently more
+ restricted than the basic module class with the aim that later the
+ basic module class can be reduced. If you find that any key
+ feature is missing please contact the author/Ansible AWS team
+ (available on #ansible-aws on IRC) to request the additional
+ features needed.
+ """
+ default_settings = {
+ "default_args": True,
+ "check_boto3": True,
+ "auto_retry": True,
+ "module_class": AnsibleModule
+ }
+
+ def __init__(self, **kwargs):
+ local_settings = {}
+ for key in AnsibleAWSModule.default_settings:
+ try:
+ local_settings[key] = kwargs.pop(key)
+ except KeyError:
+ local_settings[key] = AnsibleAWSModule.default_settings[key]
+ self.settings = local_settings
+
+ if local_settings["default_args"]:
+ # ec2_argument_spec contains the region so we use that; there's a patch coming which
+ # will add it to aws_argument_spec so if that's accepted then later we should change
+ # over
+ argument_spec_full = ec2_argument_spec()
+ try:
+ argument_spec_full.update(kwargs["argument_spec"])
+ except (TypeError, NameError):
+ pass
+ kwargs["argument_spec"] = argument_spec_full
+
+ self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs)
+
+ if local_settings["check_boto3"] and not HAS_BOTO3:
+ self._module.fail_json(
+ msg=missing_required_lib('botocore or boto3'))
+
+ self.check_mode = self._module.check_mode
+ self._diff = self._module._diff
+ self._name = self._module._name
+
+ self._botocore_endpoint_log_stream = StringIO()
+ self.logger = None
+ if self.params.get('debug_botocore_endpoint_logs'):
+ self.logger = logging.getLogger('botocore.endpoint')
+ self.logger.setLevel(logging.DEBUG)
+ self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream))
+
+ @property
+ def params(self):
+ return self._module.params
+
+ def _get_resource_action_list(self):
+ actions = []
+ for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'):
+ ln = ln.strip()
+ if not ln:
+ continue
+ found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln)
+ if found_operational_request:
+ operation_request = found_operational_request.group(0)[20:-1]
+ resource = re.search(r"https://.*?\.", ln).group(0)[8:-1]
+ actions.append("{0}:{1}".format(resource, operation_request))
+ return list(set(actions))
+
+ def exit_json(self, *args, **kwargs):
+ if self.params.get('debug_botocore_endpoint_logs'):
+ kwargs['resource_actions'] = self._get_resource_action_list()
+ return self._module.exit_json(*args, **kwargs)
+
+ def fail_json(self, *args, **kwargs):
+ if self.params.get('debug_botocore_endpoint_logs'):
+ kwargs['resource_actions'] = self._get_resource_action_list()
+ return self._module.fail_json(*args, **kwargs)
+
+ def debug(self, *args, **kwargs):
+ return self._module.debug(*args, **kwargs)
+
+ def warn(self, *args, **kwargs):
+ return self._module.warn(*args, **kwargs)
+
+ def deprecate(self, *args, **kwargs):
+ return self._module.deprecate(*args, **kwargs)
+
+ def boolean(self, *args, **kwargs):
+ return self._module.boolean(*args, **kwargs)
+
+ def md5(self, *args, **kwargs):
+ return self._module.md5(*args, **kwargs)
+
+ def client(self, service, retry_decorator=None):
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ conn = boto3_conn(self, conn_type='client', resource=service,
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator)
+
+ def resource(self, service):
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
+ return boto3_conn(self, conn_type='resource', resource=service,
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+
+ @property
+ def region(self, boto3=True):
+ return get_aws_region(self, boto3)
+
+ def fail_json_aws(self, exception, msg=None):
+ """call fail_json with processed exception
+
+ function for converting exceptions thrown by AWS SDK modules,
+ botocore, boto3 and boto, into nice error messages.
+ """
+ last_traceback = traceback.format_exc()
+
+ # to_native is trusted to handle exceptions that str() could
+ # convert to text.
+ try:
+ except_msg = to_native(exception.message)
+ except AttributeError:
+ except_msg = to_native(exception)
+
+ if msg is not None:
+ message = '{0}: {1}'.format(msg, except_msg)
+ else:
+ message = except_msg
+
+ try:
+ response = exception.response
+ except AttributeError:
+ response = None
+
+ failure = dict(
+ msg=message,
+ exception=last_traceback,
+ **self._gather_versions()
+ )
+
+ if response is not None:
+ failure.update(**camel_dict_to_snake_dict(response))
+
+ self.fail_json(**failure)
+
+ def _gather_versions(self):
+ """Gather AWS SDK (boto3 and botocore) dependency versions
+
+ Returns {'boto3_version': str, 'botocore_version': str}
+ Returns {} if neither are installed
+ """
+ if not HAS_BOTO3:
+ return {}
+ import boto3
+ import botocore
+ return dict(boto3_version=boto3.__version__,
+ botocore_version=botocore.__version__)
+
+ def boto3_at_least(self, desired):
+ """Check if the available boto3 version is greater than or equal to a desired version.
+
+ Usage:
+ if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'):
+ # conditionally fail on old boto3 versions if a specific feature is not supported
+ module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.")
+ """
+ existing = self._gather_versions()
+ return LooseVersion(existing['boto3_version']) >= LooseVersion(desired)
+
+ def botocore_at_least(self, desired):
+ """Check if the available botocore version is greater than or equal to a desired version.
+
+ Usage:
+ if not module.botocore_at_least('1.2.3'):
+ module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3')
+ if not module.botocore_at_least('1.5.3'):
+ module.warn('Botocore did not include waiters for Service X before 1.5.3. '
+ 'To wait until Service X resources are fully available, update botocore.')
+ """
+ existing = self._gather_versions()
+ return LooseVersion(existing['botocore_version']) >= LooseVersion(desired)
+
+
+class _RetryingBotoClientWrapper(object):
+ __never_wait = (
+ 'get_paginator', 'can_paginate',
+ 'get_waiter', 'generate_presigned_url',
+ )
+
+ def __init__(self, client, retry):
+ self.client = client
+ self.retry = retry
+
+ def _create_optional_retry_wrapper_function(self, unwrapped):
+ retrying_wrapper = self.retry(unwrapped)
+
+ @wraps(unwrapped)
+ def deciding_wrapper(aws_retry=False, *args, **kwargs):
+ if aws_retry:
+ return retrying_wrapper(*args, **kwargs)
+ else:
+ return unwrapped(*args, **kwargs)
+ return deciding_wrapper
+
+ def __getattr__(self, name):
+ unwrapped = getattr(self.client, name)
+ if name in self.__never_wait:
+ return unwrapped
+ elif callable(unwrapped):
+ wrapped = self._create_optional_retry_wrapper_function(unwrapped)
+ setattr(self, name, wrapped)
+ return wrapped
+ else:
+ return unwrapped
+
+
+def is_boto3_error_code(code, e=None):
+ """Check if the botocore exception is raised by a specific error code.
+
+ Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
+
+ Example:
+ try:
+ ec2.describe_instances(InstanceIds=['potato'])
+ except is_boto3_error_code('InvalidInstanceID.Malformed'):
+ # handle the error for that code case
+ except botocore.exceptions.ClientError as e:
+ # handle the generic error case for all other codes
+ """
+ from botocore.exceptions import ClientError
+ if e is None:
+ import sys
+ dummy, e, dummy = sys.exc_info()
+ if isinstance(e, ClientError) and e.response['Error']['Code'] == code:
+ return ClientError
+ return type('NeverEverRaisedException', (Exception,), {})
+
+
+def get_boto3_client_method_parameters(client, method_name, required=False):
+ op = client.meta.method_to_api_mapping.get(method_name)
+ input_shape = client._service_model.operation_model(op).input_shape
+ if not input_shape:
+ parameters = []
+ elif required:
+ parameters = list(input_shape.required_members)
+ else:
+ parameters = list(input_shape.members.keys())
+ return parameters
diff --git a/test/support/integration/plugins/module_utils/aws/iam.py b/test/support/integration/plugins/module_utils/aws/iam.py
new file mode 100644
index 0000000000..f05999aa37
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/iam.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+
+try:
+ from botocore.exceptions import ClientError, NoCredentialsError
+except ImportError:
+ pass # caught by HAS_BOTO3
+
+from ansible.module_utils._text import to_native
+
+
+def get_aws_account_id(module):
+ """ Given AnsibleAWSModule instance, get the active AWS account ID
+
+ get_account_id tries too find out the account that we are working
+ on. It's not guaranteed that this will be easy so we try in
+ several different ways. Giving either IAM or STS privilages to
+ the account should be enough to permit this.
+ """
+ account_id = None
+ try:
+ sts_client = module.client('sts')
+ account_id = sts_client.get_caller_identity().get('Account')
+ # non-STS sessions may also get NoCredentialsError from this STS call, so
+ # we must catch that too and try the IAM version
+ except (ClientError, NoCredentialsError):
+ try:
+ iam_client = module.client('iam')
+ account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
+ except ClientError as e:
+ if (e.response['Error']['Code'] == 'AccessDenied'):
+ except_msg = to_native(e)
+ # don't match on `arn:aws` because of China region `arn:aws-cn` and similar
+ account_id = except_msg.search(r"arn:\w+:iam::([0-9]{12,32}):\w+/").group(1)
+ if account_id is None:
+ module.fail_json_aws(e, msg="Could not get AWS account information")
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.",
+ exception=traceback.format_exc()
+ )
+ if not account_id:
+ module.fail_json(msg="Failed while determining AWS account ID. Try allowing sts:GetCallerIdentity or iam:GetUser permissions.")
+ return to_native(account_id)
diff --git a/test/support/integration/plugins/module_utils/aws/s3.py b/test/support/integration/plugins/module_utils/aws/s3.py
new file mode 100644
index 0000000000..2185869d49
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/s3.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # Handled by the calling module
+
+HAS_MD5 = True
+try:
+ from hashlib import md5
+except ImportError:
+ try:
+ from md5 import md5
+ except ImportError:
+ HAS_MD5 = False
+
+
+def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
+ if not HAS_MD5:
+ return None
+
+ if '-' in etag:
+ # Multi-part ETag; a hash of the hashes of each part.
+ parts = int(etag[1:-1].split('-')[1])
+ digests = []
+
+ s3_kwargs = dict(
+ Bucket=bucket,
+ Key=obj,
+ )
+ if version:
+ s3_kwargs['VersionId'] = version
+
+ with open(filename, 'rb') as f:
+ for part_num in range(1, parts + 1):
+ s3_kwargs['PartNumber'] = part_num
+ try:
+ head = s3.head_object(**s3_kwargs)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get head object")
+ digests.append(md5(f.read(int(head['ContentLength']))))
+
+ digest_squared = md5(b''.join(m.digest() for m in digests))
+ return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
+ else: # Compute the MD5 sum normally
+ return '"{0}"'.format(module.md5(filename))
diff --git a/test/support/integration/plugins/module_utils/aws/waiters.py b/test/support/integration/plugins/module_utils/aws/waiters.py
new file mode 100644
index 0000000000..25db598bcb
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/aws/waiters.py
@@ -0,0 +1,405 @@
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import botocore.waiter as core_waiter
+except ImportError:
+ pass # caught by HAS_BOTO3
+
+
+ec2_data = {
+ "version": 2,
+ "waiters": {
+ "InternetGatewayExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeInternetGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(InternetGateways) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidInternetGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "RouteTableExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeRouteTables",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(RouteTables[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidRouteTableID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SecurityGroupExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSecurityGroups",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(SecurityGroups[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidGroup.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SubnetExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(Subnets[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidSubnetID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "SubnetHasMapPublic": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": True,
+ "argument": "Subnets[].MapPublicIpOnLaunch",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetNoMapPublic": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": False,
+ "argument": "Subnets[].MapPublicIpOnLaunch",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetHasAssignIpv6": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": True,
+ "argument": "Subnets[].AssignIpv6AddressOnCreation",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetNoAssignIpv6": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "pathAll",
+ "expected": False,
+ "argument": "Subnets[].AssignIpv6AddressOnCreation",
+ "state": "success"
+ },
+ ]
+ },
+ "SubnetDeleted": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeSubnets",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(Subnets[]) > `0`",
+ "state": "retry"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidSubnetID.NotFound",
+ "state": "success"
+ },
+ ]
+ },
+ "VpnGatewayExists": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpnGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "length(VpnGateways[]) > `0`",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "InvalidVpnGatewayID.NotFound",
+ "state": "retry"
+ },
+ ]
+ },
+ "VpnGatewayDetached": {
+ "delay": 5,
+ "maxAttempts": 40,
+ "operation": "DescribeVpnGateways",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "VpnGateways[0].State == 'available'",
+ "state": "success"
+ },
+ ]
+ },
+ }
+}
+
+
+waf_data = {
+ "version": 2,
+ "waiters": {
+ "ChangeTokenInSync": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "GetChangeTokenStatus",
+ "acceptors": [
+ {
+ "matcher": "path",
+ "expected": True,
+ "argument": "ChangeTokenStatus == 'INSYNC'",
+ "state": "success"
+ },
+ {
+ "matcher": "error",
+ "expected": "WAFInternalErrorException",
+ "state": "retry"
+ }
+ ]
+ }
+ }
+}
+
+eks_data = {
+ "version": 2,
+ "waiters": {
+ "ClusterActive": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeCluster",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "path",
+ "argument": "cluster.status",
+ "expected": "ACTIVE"
+ },
+ {
+ "state": "retry",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ },
+ "ClusterDeleted": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeCluster",
+ "acceptors": [
+ {
+ "state": "retry",
+ "matcher": "path",
+ "argument": "cluster.status != 'DELETED'",
+ "expected": True
+ },
+ {
+ "state": "success",
+ "matcher": "error",
+ "expected": "ResourceNotFoundException"
+ }
+ ]
+ }
+ }
+}
+
+
+rds_data = {
+ "version": 2,
+ "waiters": {
+ "DBInstanceStopped": {
+ "delay": 20,
+ "maxAttempts": 60,
+ "operation": "DescribeDBInstances",
+ "acceptors": [
+ {
+ "state": "success",
+ "matcher": "pathAll",
+ "argument": "DBInstances[].DBInstanceStatus",
+ "expected": "stopped"
+ },
+ ]
+ }
+ }
+}
+
+
+def ec2_model(name):
+ ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data)
+ return ec2_models.get_waiter(name)
+
+
+def waf_model(name):
+ waf_models = core_waiter.WaiterModel(waiter_config=waf_data)
+ return waf_models.get_waiter(name)
+
+
+def eks_model(name):
+ eks_models = core_waiter.WaiterModel(waiter_config=eks_data)
+ return eks_models.get_waiter(name)
+
+
+def rds_model(name):
+ rds_models = core_waiter.WaiterModel(waiter_config=rds_data)
+ return rds_models.get_waiter(name)
+
+
+waiters_by_name = {
+ ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter(
+ 'internet_gateway_exists',
+ ec2_model('InternetGatewayExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_internet_gateways
+ )),
+ ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
+ 'route_table_exists',
+ ec2_model('RouteTableExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_route_tables
+ )),
+ ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
+ 'security_group_exists',
+ ec2_model('SecurityGroupExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_security_groups
+ )),
+ ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
+ 'subnet_exists',
+ ec2_model('SubnetExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
+ 'subnet_has_map_public',
+ ec2_model('SubnetHasMapPublic'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
+ 'subnet_no_map_public',
+ ec2_model('SubnetNoMapPublic'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
+ 'subnet_has_assign_ipv6',
+ ec2_model('SubnetHasAssignIpv6'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
+ 'subnet_no_assign_ipv6',
+ ec2_model('SubnetNoAssignIpv6'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
+ 'subnet_deleted',
+ ec2_model('SubnetDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_subnets
+ )),
+ ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter(
+ 'vpn_gateway_exists',
+ ec2_model('VpnGatewayExists'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpn_gateways
+ )),
+ ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter(
+ 'vpn_gateway_detached',
+ ec2_model('VpnGatewayDetached'),
+ core_waiter.NormalizedOperationMethod(
+ ec2.describe_vpn_gateways
+ )),
+ ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
+ 'change_token_in_sync',
+ waf_model('ChangeTokenInSync'),
+ core_waiter.NormalizedOperationMethod(
+ waf.get_change_token_status
+ )),
+ ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
+ 'change_token_in_sync',
+ waf_model('ChangeTokenInSync'),
+ core_waiter.NormalizedOperationMethod(
+ waf.get_change_token_status
+ )),
+ ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
+ 'cluster_active',
+ eks_model('ClusterActive'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_cluster
+ )),
+ ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter(
+ 'cluster_deleted',
+ eks_model('ClusterDeleted'),
+ core_waiter.NormalizedOperationMethod(
+ eks.describe_cluster
+ )),
+ ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter(
+ 'db_instance_stopped',
+ rds_model('DBInstanceStopped'),
+ core_waiter.NormalizedOperationMethod(
+ rds.describe_db_instances
+ )),
+}
+
+
+def get_waiter(client, waiter_name):
+ try:
+ return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
+ except KeyError:
+ raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
+ waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))
diff --git a/test/support/integration/plugins/module_utils/azure_rm_common.py b/test/support/integration/plugins/module_utils/azure_rm_common.py
new file mode 100644
index 0000000000..e995daa02e
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/azure_rm_common.py
@@ -0,0 +1,1473 @@
+# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
+# Chris Houseknecht, <house@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+import os
+import re
+import types
+import copy
+import inspect
+import traceback
+import json
+
+from os.path import expanduser
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+try:
+ from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
+except Exception:
+ ANSIBLE_VERSION = 'unknown'
+from ansible.module_utils.six.moves import configparser
+import ansible.module_utils.six.moves.urllib.parse as urlparse
+
+AZURE_COMMON_ARGS = dict(
+ auth_source=dict(
+ type='str',
+ choices=['auto', 'cli', 'env', 'credential_file', 'msi']
+ ),
+ profile=dict(type='str'),
+ subscription_id=dict(type='str'),
+ client_id=dict(type='str', no_log=True),
+ secret=dict(type='str', no_log=True),
+ tenant=dict(type='str', no_log=True),
+ ad_user=dict(type='str', no_log=True),
+ password=dict(type='str', no_log=True),
+ cloud_environment=dict(type='str', default='AzureCloud'),
+ cert_validation_mode=dict(type='str', choices=['validate', 'ignore']),
+ api_profile=dict(type='str', default='latest'),
+ adfs_authority_url=dict(type='str', default=None)
+)
+
+AZURE_CREDENTIAL_ENV_MAPPING = dict(
+ profile='AZURE_PROFILE',
+ subscription_id='AZURE_SUBSCRIPTION_ID',
+ client_id='AZURE_CLIENT_ID',
+ secret='AZURE_SECRET',
+ tenant='AZURE_TENANT',
+ ad_user='AZURE_AD_USER',
+ password='AZURE_PASSWORD',
+ cloud_environment='AZURE_CLOUD_ENVIRONMENT',
+ cert_validation_mode='AZURE_CERT_VALIDATION_MODE',
+ adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
+)
+
+
+class SDKProfile(object): # pylint: disable=too-few-public-methods
+
+ def __init__(self, default_api_version, profile=None):
+ """Constructor.
+
+ :param str default_api_version: Default API version if not overridden by a profile. Nullable.
+ :param profile: A dict operation group name to API version.
+ :type profile: dict[str, str]
+ """
+ self.profile = profile if profile is not None else {}
+ self.profile[None] = default_api_version
+
+ @property
+ def default_api_version(self):
+ return self.profile[None]
+
+
+# FUTURE: this should come from the SDK or an external location.
+# For now, we have to copy from azure-cli
+AZURE_API_PROFILES = {
+ 'latest': {
+ 'ContainerInstanceManagementClient': '2018-02-01-preview',
+ 'ComputeManagementClient': dict(
+ default_api_version='2018-10-01',
+ resource_skus='2018-10-01',
+ disks='2018-06-01',
+ snapshots='2018-10-01',
+ virtual_machine_run_commands='2018-10-01'
+ ),
+ 'NetworkManagementClient': '2018-08-01',
+ 'ResourceManagementClient': '2017-05-10',
+ 'StorageManagementClient': '2017-10-01',
+ 'WebSiteManagementClient': '2018-02-01',
+ 'PostgreSQLManagementClient': '2017-12-01',
+ 'MySQLManagementClient': '2017-12-01',
+ 'MariaDBManagementClient': '2019-03-01',
+ 'ManagementLockClient': '2016-09-01'
+ },
+ '2019-03-01-hybrid': {
+ 'StorageManagementClient': '2017-10-01',
+ 'NetworkManagementClient': '2017-10-01',
+ 'ComputeManagementClient': SDKProfile('2017-12-01', {
+ 'resource_skus': '2017-09-01',
+ 'disks': '2017-03-30',
+ 'snapshots': '2017-03-30'
+ }),
+ 'ManagementLinkClient': '2016-09-01',
+ 'ManagementLockClient': '2016-09-01',
+ 'PolicyClient': '2016-12-01',
+ 'ResourceManagementClient': '2018-05-01',
+ 'SubscriptionClient': '2016-06-01',
+ 'DnsManagementClient': '2016-04-01',
+ 'KeyVaultManagementClient': '2016-10-01',
+ 'AuthorizationManagementClient': SDKProfile('2015-07-01', {
+ 'classic_administrators': '2015-06-01',
+ 'policy_assignments': '2016-12-01',
+ 'policy_definitions': '2016-12-01'
+ }),
+ 'KeyVaultClient': '2016-10-01',
+ 'azure.multiapi.storage': '2017-11-09',
+ 'azure.multiapi.cosmosdb': '2017-04-17'
+ },
+ '2018-03-01-hybrid': {
+ 'StorageManagementClient': '2016-01-01',
+ 'NetworkManagementClient': '2017-10-01',
+ 'ComputeManagementClient': SDKProfile('2017-03-30'),
+ 'ManagementLinkClient': '2016-09-01',
+ 'ManagementLockClient': '2016-09-01',
+ 'PolicyClient': '2016-12-01',
+ 'ResourceManagementClient': '2018-02-01',
+ 'SubscriptionClient': '2016-06-01',
+ 'DnsManagementClient': '2016-04-01',
+ 'KeyVaultManagementClient': '2016-10-01',
+ 'AuthorizationManagementClient': SDKProfile('2015-07-01', {
+ 'classic_administrators': '2015-06-01'
+ }),
+ 'KeyVaultClient': '2016-10-01',
+ 'azure.multiapi.storage': '2017-04-17',
+ 'azure.multiapi.cosmosdb': '2017-04-17'
+ },
+ '2017-03-09-profile': {
+ 'StorageManagementClient': '2016-01-01',
+ 'NetworkManagementClient': '2015-06-15',
+ 'ComputeManagementClient': SDKProfile('2016-03-30'),
+ 'ManagementLinkClient': '2016-09-01',
+ 'ManagementLockClient': '2015-01-01',
+ 'PolicyClient': '2015-10-01-preview',
+ 'ResourceManagementClient': '2016-02-01',
+ 'SubscriptionClient': '2016-06-01',
+ 'DnsManagementClient': '2016-04-01',
+ 'KeyVaultManagementClient': '2016-10-01',
+ 'AuthorizationManagementClient': SDKProfile('2015-07-01', {
+ 'classic_administrators': '2015-06-01'
+ }),
+ 'KeyVaultClient': '2016-10-01',
+ 'azure.multiapi.storage': '2015-04-05'
+ }
+}
+
+AZURE_TAG_ARGS = dict(
+ tags=dict(type='dict'),
+ append_tags=dict(type='bool', default=True),
+)
+
+AZURE_COMMON_REQUIRED_IF = [
+ ('log_mode', 'file', ['log_path'])
+]
+
+ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
+CLOUDSHELL_USER_AGENT_KEY = 'AZURE_HTTP_USER_AGENT'
+VSCODEEXT_USER_AGENT_KEY = 'VSCODEEXT_USER_AGENT'
+
+CIDR_PATTERN = re.compile(r"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1"
+ r"[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2]))")
+
+AZURE_SUCCESS_STATE = "Succeeded"
+AZURE_FAILED_STATE = "Failed"
+
+HAS_AZURE = True
+HAS_AZURE_EXC = None
+HAS_AZURE_CLI_CORE = True
+HAS_AZURE_CLI_CORE_EXC = None
+
+HAS_MSRESTAZURE = True
+HAS_MSRESTAZURE_EXC = None
+
+try:
+ import importlib
+except ImportError:
+ # This passes the sanity import test, but does not provide a user friendly error message.
+ # Doing so would require catching Exception for all imports of Azure dependencies in modules and module_utils.
+ importlib = None
+
+try:
+ from packaging.version import Version
+ HAS_PACKAGING_VERSION = True
+ HAS_PACKAGING_VERSION_EXC = None
+except ImportError:
+ Version = None
+ HAS_PACKAGING_VERSION = False
+ HAS_PACKAGING_VERSION_EXC = traceback.format_exc()
+
+# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
+try:
+ from msrest.serialization import Serializer
+except ImportError:
+ HAS_MSRESTAZURE_EXC = traceback.format_exc()
+ HAS_MSRESTAZURE = False
+
+try:
+ from enum import Enum
+ from msrestazure.azure_active_directory import AADTokenCredentials
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_active_directory import MSIAuthentication
+ from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id
+ from msrestazure import azure_cloud
+ from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
+ from azure.mgmt.monitor.version import VERSION as monitor_client_version
+ from azure.mgmt.network.version import VERSION as network_client_version
+ from azure.mgmt.storage.version import VERSION as storage_client_version
+ from azure.mgmt.compute.version import VERSION as compute_client_version
+ from azure.mgmt.resource.version import VERSION as resource_client_version
+ from azure.mgmt.dns.version import VERSION as dns_client_version
+ from azure.mgmt.web.version import VERSION as web_client_version
+ from azure.mgmt.network import NetworkManagementClient
+ from azure.mgmt.resource.resources import ResourceManagementClient
+ from azure.mgmt.resource.subscriptions import SubscriptionClient
+ from azure.mgmt.storage import StorageManagementClient
+ from azure.mgmt.compute import ComputeManagementClient
+ from azure.mgmt.dns import DnsManagementClient
+ from azure.mgmt.monitor import MonitorManagementClient
+ from azure.mgmt.web import WebSiteManagementClient
+ from azure.mgmt.containerservice import ContainerServiceClient
+ from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements
+ from azure.mgmt.trafficmanager import TrafficManagerManagementClient
+ from azure.storage.cloudstorageaccount import CloudStorageAccount
+ from azure.storage.blob import PageBlobService, BlockBlobService
+ from adal.authentication_context import AuthenticationContext
+ from azure.mgmt.sql import SqlManagementClient
+ from azure.mgmt.servicebus import ServiceBusManagementClient
+ import azure.mgmt.servicebus.models as ServicebusModel
+ from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
+ from azure.mgmt.rdbms.mysql import MySQLManagementClient
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from azure.mgmt.containerregistry import ContainerRegistryManagementClient
+ from azure.mgmt.containerinstance import ContainerInstanceManagementClient
+ from azure.mgmt.loganalytics import LogAnalyticsManagementClient
+ import azure.mgmt.loganalytics.models as LogAnalyticsModels
+ from azure.mgmt.automation import AutomationClient
+ import azure.mgmt.automation.models as AutomationModel
+ from azure.mgmt.iothub import IotHubClient
+ from azure.mgmt.iothub import models as IoTHubModels
+ from msrest.service_client import ServiceClient
+ from msrestazure import AzureConfiguration
+ from msrest.authentication import Authentication
+ from azure.mgmt.resource.locks import ManagementLockClient
+except ImportError as exc:
+ Authentication = object
+ HAS_AZURE_EXC = traceback.format_exc()
+ HAS_AZURE = False
+
+from base64 import b64encode, b64decode
+from hashlib import sha256
+from hmac import HMAC
+from time import time
+
+try:
+ from urllib import (urlencode, quote_plus)
+except ImportError:
+ from urllib.parse import (urlencode, quote_plus)
+
+try:
+ from azure.cli.core.util import CLIError
+ from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
+ from azure.common.cloud import get_cli_active_cloud
+except ImportError:
+ HAS_AZURE_CLI_CORE = False
+ HAS_AZURE_CLI_CORE_EXC = None
+ CLIError = Exception
+
+
+def azure_id_to_dict(id):
+ pieces = re.sub(r'^\/', '', id).split('/')
+ result = {}
+ index = 0
+ while index < len(pieces) - 1:
+ result[pieces[index]] = pieces[index + 1]
+ index += 1
+ return result
+
+
+def format_resource_id(val, subscription_id, namespace, types, resource_group):
+ return resource_id(name=val,
+ resource_group=resource_group,
+ namespace=namespace,
+ type=types,
+ subscription=subscription_id) if not is_valid_resource_id(val) else val
+
+
+def normalize_location_name(name):
+ return name.replace(' ', '').lower()
+
+
+# FUTURE: either get this from the requirements file (if we can be sure it's always available at runtime)
+# or generate the requirements files from this so we only have one source of truth to maintain...
+AZURE_PKG_VERSIONS = {
+ 'StorageManagementClient': {
+ 'package_name': 'storage',
+ 'expected_version': '3.1.0'
+ },
+ 'ComputeManagementClient': {
+ 'package_name': 'compute',
+ 'expected_version': '4.4.0'
+ },
+ 'ContainerInstanceManagementClient': {
+ 'package_name': 'containerinstance',
+ 'expected_version': '0.4.0'
+ },
+ 'NetworkManagementClient': {
+ 'package_name': 'network',
+ 'expected_version': '2.3.0'
+ },
+ 'ResourceManagementClient': {
+ 'package_name': 'resource',
+ 'expected_version': '2.1.0'
+ },
+ 'DnsManagementClient': {
+ 'package_name': 'dns',
+ 'expected_version': '2.1.0'
+ },
+ 'WebSiteManagementClient': {
+ 'package_name': 'web',
+ 'expected_version': '0.41.0'
+ },
+ 'TrafficManagerManagementClient': {
+ 'package_name': 'trafficmanager',
+ 'expected_version': '0.50.0'
+ },
+} if HAS_AZURE else {}
+
+
+AZURE_MIN_RELEASE = '2.0.0'
+
+
+class AzureRMModuleBase(object):
+ def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
+ mutually_exclusive=None, required_together=None,
+ required_one_of=None, add_file_common_args=False, supports_check_mode=False,
+ required_if=None, supports_tags=True, facts_module=False, skip_exec=False):
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(AZURE_COMMON_ARGS)
+ if supports_tags:
+ merged_arg_spec.update(AZURE_TAG_ARGS)
+
+ if derived_arg_spec:
+ merged_arg_spec.update(derived_arg_spec)
+
+ merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
+ if required_if:
+ merged_required_if += required_if
+
+ self.module = AnsibleModule(argument_spec=merged_arg_spec,
+ bypass_checks=bypass_checks,
+ no_log=no_log,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
+ required_one_of=required_one_of,
+ add_file_common_args=add_file_common_args,
+ supports_check_mode=supports_check_mode,
+ required_if=merged_required_if)
+
+ if not HAS_PACKAGING_VERSION:
+ self.fail(msg=missing_required_lib('packaging'),
+ exception=HAS_PACKAGING_VERSION_EXC)
+
+ if not HAS_MSRESTAZURE:
+ self.fail(msg=missing_required_lib('msrestazure'),
+ exception=HAS_MSRESTAZURE_EXC)
+
+ if not HAS_AZURE:
+ self.fail(msg=missing_required_lib('ansible[azure] (azure >= {0})'.format(AZURE_MIN_RELEASE)),
+ exception=HAS_AZURE_EXC)
+
+ self._network_client = None
+ self._storage_client = None
+ self._resource_client = None
+ self._compute_client = None
+ self._dns_client = None
+ self._web_client = None
+ self._marketplace_client = None
+ self._sql_client = None
+ self._mysql_client = None
+ self._mariadb_client = None
+ self._postgresql_client = None
+ self._containerregistry_client = None
+ self._containerinstance_client = None
+ self._containerservice_client = None
+ self._managedcluster_client = None
+ self._traffic_manager_management_client = None
+ self._monitor_client = None
+ self._resource = None
+ self._log_analytics_client = None
+ self._servicebus_client = None
+ self._automation_client = None
+ self._IoThub_client = None
+ self._lock_client = None
+
+ self.check_mode = self.module.check_mode
+ self.api_profile = self.module.params.get('api_profile')
+ self.facts_module = facts_module
+ # self.debug = self.module.params.get('debug')
+
+ # delegate auth to AzureRMAuth class (shared with all plugin types)
+ self.azure_auth = AzureRMAuth(fail_impl=self.fail, **self.module.params)
+
+ # common parameter validation
+ if self.module.params.get('tags'):
+ self.validate_tags(self.module.params['tags'])
+
+ if not skip_exec:
+ res = self.exec_module(**self.module.params)
+ self.module.exit_json(**res)
+
+ def check_client_version(self, client_type):
+ # Ensure Azure modules are at least 2.0.0rc5.
+ package_version = AZURE_PKG_VERSIONS.get(client_type.__name__, None)
+ if package_version is not None:
+ client_name = package_version.get('package_name')
+ try:
+ client_module = importlib.import_module(client_type.__module__)
+ client_version = client_module.VERSION
+ except (RuntimeError, AttributeError):
+ # can't get at the module version for some reason, just fail silently...
+ return
+ expected_version = package_version.get('expected_version')
+ if Version(client_version) < Version(expected_version):
+ self.fail("Installed azure-mgmt-{0} client version is {1}. The minimum supported version is {2}. Try "
+ "`pip install ansible[azure]`".format(client_name, client_version, expected_version))
+ if Version(client_version) != Version(expected_version):
+ self.module.warn("Installed azure-mgmt-{0} client version is {1}. The expected version is {2}. Try "
+ "`pip install ansible[azure]`".format(client_name, client_version, expected_version))
+
+ def exec_module(self, **kwargs):
+ self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
+
+ def fail(self, msg, **kwargs):
+ '''
+ Shortcut for calling module.fail()
+
+ :param msg: Error message text.
+ :param kwargs: Any key=value pairs
+ :return: None
+ '''
+ self.module.fail_json(msg=msg, **kwargs)
+
+ def deprecate(self, msg, version=None):
+ self.module.deprecate(msg, version)
+
+ def log(self, msg, pretty_print=False):
+ if pretty_print:
+ self.module.debug(json.dumps(msg, indent=4, sort_keys=True))
+ else:
+ self.module.debug(msg)
+
+ def validate_tags(self, tags):
+ '''
+ Check if tags dictionary contains string:string pairs.
+
+ :param tags: dictionary of string:string pairs
+ :return: None
+ '''
+ if not self.facts_module:
+ if not isinstance(tags, dict):
+ self.fail("Tags must be a dictionary of string:string values.")
+ for key, value in tags.items():
+ if not isinstance(value, str):
+ self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value)))
+
+ def update_tags(self, tags):
+ '''
+ Call from the module to update metadata tags. Returns tuple
+ with bool indicating if there was a change and dict of new
+ tags to assign to the object.
+
+ :param tags: metadata tags from the object
+ :return: bool, dict
+ '''
+ tags = tags or dict()
+ new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
+ param_tags = self.module.params.get('tags') if isinstance(self.module.params.get('tags'), dict) else dict()
+ append_tags = self.module.params.get('append_tags') if self.module.params.get('append_tags') is not None else True
+ changed = False
+ # check add or update
+ for key, value in param_tags.items():
+ if not new_tags.get(key) or new_tags[key] != value:
+ changed = True
+ new_tags[key] = value
+ # check remove
+ if not append_tags:
+ for key, value in tags.items():
+ if not param_tags.get(key):
+ new_tags.pop(key)
+ changed = True
+ return changed, new_tags
+
+ def has_tags(self, obj_tags, tag_list):
+ '''
+ Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags
+ exists in object tags.
+
+ :param obj_tags: dictionary of tags from an Azure object.
+ :param tag_list: list of tag keys or tag key:value pairs
+ :return: bool
+ '''
+
+ if not obj_tags and tag_list:
+ return False
+
+ if not tag_list:
+ return True
+
+ matches = 0
+ result = False
+ for tag in tag_list:
+ tag_key = tag
+ tag_value = None
+ if ':' in tag:
+ tag_key, tag_value = tag.split(':')
+ if tag_value and obj_tags.get(tag_key) == tag_value:
+ matches += 1
+ elif not tag_value and obj_tags.get(tag_key):
+ matches += 1
+ if matches == len(tag_list):
+ result = True
+ return result
+
+ def get_resource_group(self, resource_group):
+ '''
+ Fetch a resource group.
+
+ :param resource_group: name of a resource group
+ :return: resource group object
+ '''
+ try:
+ return self.rm_client.resource_groups.get(resource_group)
+ except CloudError as cloud_error:
+ self.fail("Error retrieving resource group {0} - {1}".format(resource_group, cloud_error.message))
+ except Exception as exc:
+ self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc)))
+
+ def parse_resource_to_dict(self, resource):
+ '''
+ Return a dict of the give resource, which contains name and resource group.
+
+ :param resource: It can be a resource name, id or a dict contains name and resource group.
+ '''
+ resource_dict = parse_resource_id(resource) if not isinstance(resource, dict) else resource
+ resource_dict['resource_group'] = resource_dict.get('resource_group', self.resource_group)
+ resource_dict['subscription_id'] = resource_dict.get('subscription_id', self.subscription_id)
+ return resource_dict
+
+ def serialize_obj(self, obj, class_name, enum_modules=None):
+ '''
+ Return a JSON representation of an Azure object.
+
+ :param obj: Azure object
+ :param class_name: Name of the object's class
+ :param enum_modules: List of module names to build enum dependencies from.
+ :return: serialized result
+ '''
+ enum_modules = [] if enum_modules is None else enum_modules
+
+ dependencies = dict()
+ if enum_modules:
+ for module_name in enum_modules:
+ mod = importlib.import_module(module_name)
+ for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
+ dependencies[mod_class_name] = mod_class_obj
+ self.log("dependencies: ")
+ self.log(str(dependencies))
+ serializer = Serializer(classes=dependencies)
+ return serializer.body(obj, class_name, keep_readonly=True)
+
+ def get_poller_result(self, poller, wait=5):
+ '''
+ Consistent method of waiting on and retrieving results from Azure's long poller
+
+ :param poller Azure poller object
+ :return object resulting from the original request
+ '''
+ try:
+ delay = wait
+ while not poller.done():
+ self.log("Waiting for {0} sec".format(delay))
+ poller.wait(timeout=delay)
+ return poller.result()
+ except Exception as exc:
+ self.log(str(exc))
+ raise
+
+ def check_provisioning_state(self, azure_object, requested_state='present'):
+ '''
+ Check an Azure object's provisioning state. If something did not complete the provisioning
+ process, then we cannot operate on it.
+
+ :param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state
+ and name attributes.
+ :return None
+ '''
+
+ if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \
+ hasattr(azure_object, 'name'):
+ # resource group object fits this model
+ if isinstance(azure_object.properties.provisioning_state, Enum):
+ if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \
+ requested_state != 'absent':
+ self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
+ azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
+ return
+ if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \
+ requested_state != 'absent':
+ self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
+ azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
+ return
+
+ if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'):
+ if isinstance(azure_object.provisioning_state, Enum):
+ if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent':
+ self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
+ azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
+ return
+ if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent':
+ self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
+ azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
+
+ def get_blob_client(self, resource_group_name, storage_account_name, storage_blob_type='block'):
+ keys = dict()
+ try:
+ # Get keys from the storage account
+ self.log('Getting keys')
+ account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
+ except Exception as exc:
+ self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc)))
+
+ try:
+ self.log('Create blob service')
+ if storage_blob_type == 'page':
+ return PageBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint,
+ account_name=storage_account_name,
+ account_key=account_keys.keys[0].value)
+ elif storage_blob_type == 'block':
+ return BlockBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint,
+ account_name=storage_account_name,
+ account_key=account_keys.keys[0].value)
+ else:
+ raise Exception("Invalid storage blob type defined.")
+ except Exception as exc:
+ self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name,
+ str(exc)))
+
+ def create_default_pip(self, resource_group, location, public_ip_name, allocation_method='Dynamic', sku=None):
+ '''
+ Create a default public IP address <public_ip_name> to associate with a network interface.
+ If a PIP address matching <public_ip_name> exists, return it. Otherwise, create one.
+
+ :param resource_group: name of an existing resource group
+ :param location: a valid azure location
+ :param public_ip_name: base name to assign the public IP address
+ :param allocation_method: one of 'Static' or 'Dynamic'
+ :param sku: sku
+ :return: PIP object
+ '''
+ pip = None
+
+ self.log("Starting create_default_pip {0}".format(public_ip_name))
+ self.log("Check to see if public IP {0} exists".format(public_ip_name))
+ try:
+ pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name)
+ except CloudError:
+ pass
+
+ if pip:
+ self.log("Public ip {0} found.".format(public_ip_name))
+ self.check_provisioning_state(pip)
+ return pip
+
+ params = self.network_models.PublicIPAddress(
+ location=location,
+ public_ip_allocation_method=allocation_method,
+ sku=sku
+ )
+ self.log('Creating default public IP {0}'.format(public_ip_name))
+ try:
+ poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)
+ except Exception as exc:
+ self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc)))
+
+ return self.get_poller_result(poller)
+
+ def create_default_securitygroup(self, resource_group, location, security_group_name, os_type, open_ports):
+ '''
+ Create a default security group <security_group_name> to associate with a network interface. If a security group matching
+ <security_group_name> exists, return it. Otherwise, create one.
+
+ :param resource_group: Resource group name
+ :param location: azure location name
+ :param security_group_name: base name to use for the security group
+ :param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group.
+ :param ssh_port: for os_type 'Linux' port used in rule allowing SSH access.
+ :param rdp_port: for os_type 'Windows' port used in rule allowing RDP access.
+ :return: security_group object
+ '''
+ group = None
+
+ self.log("Create security group {0}".format(security_group_name))
+ self.log("Check to see if security group {0} exists".format(security_group_name))
+ try:
+ group = self.network_client.network_security_groups.get(resource_group, security_group_name)
+ except CloudError:
+ pass
+
+ if group:
+ self.log("Security group {0} found.".format(security_group_name))
+ self.check_provisioning_state(group)
+ return group
+
+ parameters = self.network_models.NetworkSecurityGroup()
+ parameters.location = location
+
+ if not open_ports:
+ # Open default ports based on OS type
+ if os_type == 'Linux':
+ # add an inbound SSH rule
+ parameters.security_rules = [
+ self.network_models.SecurityRule(protocol='Tcp',
+ source_address_prefix='*',
+ destination_address_prefix='*',
+ access='Allow',
+ direction='Inbound',
+ description='Allow SSH Access',
+ source_port_range='*',
+ destination_port_range='22',
+ priority=100,
+ name='SSH')
+ ]
+ parameters.location = location
+ else:
+ # for windows add inbound RDP and WinRM rules
+ parameters.security_rules = [
+ self.network_models.SecurityRule(protocol='Tcp',
+ source_address_prefix='*',
+ destination_address_prefix='*',
+ access='Allow',
+ direction='Inbound',
+ description='Allow RDP port 3389',
+ source_port_range='*',
+ destination_port_range='3389',
+ priority=100,
+ name='RDP01'),
+ self.network_models.SecurityRule(protocol='Tcp',
+ source_address_prefix='*',
+ destination_address_prefix='*',
+ access='Allow',
+ direction='Inbound',
+ description='Allow WinRM HTTPS port 5986',
+ source_port_range='*',
+ destination_port_range='5986',
+ priority=101,
+ name='WinRM01'),
+ ]
+ else:
+ # Open custom ports
+ parameters.security_rules = []
+ priority = 100
+ for port in open_ports:
+ priority += 1
+ rule_name = "Rule_{0}".format(priority)
+ parameters.security_rules.append(
+ self.network_models.SecurityRule(protocol='Tcp',
+ source_address_prefix='*',
+ destination_address_prefix='*',
+ access='Allow',
+ direction='Inbound',
+ source_port_range='*',
+ destination_port_range=str(port),
+ priority=priority,
+ name=rule_name)
+ )
+
+ self.log('Creating default security group {0}'.format(security_group_name))
+ try:
+ poller = self.network_client.network_security_groups.create_or_update(resource_group,
+ security_group_name,
+ parameters)
+ except Exception as exc:
+ self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc)))
+
+ return self.get_poller_result(poller)
+
+ @staticmethod
+ def _validation_ignore_callback(session, global_config, local_config, **kwargs):
+ session.verify = False
+
+ def get_api_profile(self, client_type_name, api_profile_name):
+ profile_all_clients = AZURE_API_PROFILES.get(api_profile_name)
+
+ if not profile_all_clients:
+ raise KeyError("unknown Azure API profile: {0}".format(api_profile_name))
+
+ profile_raw = profile_all_clients.get(client_type_name, None)
+
+ if not profile_raw:
+ self.module.warn("Azure API profile {0} does not define an entry for {1}".format(api_profile_name, client_type_name))
+
+ if isinstance(profile_raw, dict):
+ if not profile_raw.get('default_api_version'):
+ raise KeyError("Azure API profile {0} does not define 'default_api_version'".format(api_profile_name))
+ return profile_raw
+
+ # wrap basic strings in a dict that just defines the default
+ return dict(default_api_version=profile_raw)
+
+ def get_mgmt_svc_client(self, client_type, base_url=None, api_version=None):
+ self.log('Getting management service client {0}'.format(client_type.__name__))
+ self.check_client_version(client_type)
+
+ client_argspec = inspect.getargspec(client_type.__init__)
+
+ if not base_url:
+ # most things are resource_manager, don't make everyone specify
+ base_url = self.azure_auth._cloud_environment.endpoints.resource_manager
+
+ client_kwargs = dict(credentials=self.azure_auth.azure_credentials, subscription_id=self.azure_auth.subscription_id, base_url=base_url)
+
+ api_profile_dict = {}
+
+ if self.api_profile:
+ api_profile_dict = self.get_api_profile(client_type.__name__, self.api_profile)
+
+ # unversioned clients won't accept profile; only send it if necessary
+ # clients without a version specified in the profile will use the default
+ if api_profile_dict and 'profile' in client_argspec.args:
+ client_kwargs['profile'] = api_profile_dict
+
+ # If the client doesn't accept api_version, it's unversioned.
+ # If it does, favor explicitly-specified api_version, fall back to api_profile
+ if 'api_version' in client_argspec.args:
+ profile_default_version = api_profile_dict.get('default_api_version', None)
+ if api_version or profile_default_version:
+ client_kwargs['api_version'] = api_version or profile_default_version
+ if 'profile' in client_kwargs:
+ # remove profile; only pass API version if specified
+ client_kwargs.pop('profile')
+
+ client = client_type(**client_kwargs)
+
+ # FUTURE: remove this once everything exposes models directly (eg, containerinstance)
+ try:
+ getattr(client, "models")
+ except AttributeError:
+ def _ansible_get_models(self, *arg, **kwarg):
+ return self._ansible_models
+
+ setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models)
+ client.models = types.MethodType(_ansible_get_models, client)
+
+ client.config = self.add_user_agent(client.config)
+
+ if self.azure_auth._cert_validation_mode == 'ignore':
+ client.config.session_configuration_callback = self._validation_ignore_callback
+
+ return client
+
+ def add_user_agent(self, config):
+ # Add user agent for Ansible
+ config.add_user_agent(ANSIBLE_USER_AGENT)
+ # Add user agent when running from Cloud Shell
+ if CLOUDSHELL_USER_AGENT_KEY in os.environ:
+ config.add_user_agent(os.environ[CLOUDSHELL_USER_AGENT_KEY])
+ # Add user agent when running from VSCode extension
+ if VSCODEEXT_USER_AGENT_KEY in os.environ:
+ config.add_user_agent(os.environ[VSCODEEXT_USER_AGENT_KEY])
+ return config
+
+ def generate_sas_token(self, **kwags):
+ base_url = kwags.get('base_url', None)
+ expiry = kwags.get('expiry', time() + 3600)
+ key = kwags.get('key', None)
+ policy = kwags.get('policy', None)
+ url = quote_plus(base_url)
+ ttl = int(expiry)
+ sign_key = '{0}\n{1}'.format(url, ttl)
+ signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest())
+ result = {
+ 'sr': url,
+ 'sig': signature,
+ 'se': str(ttl),
+ }
+ if policy:
+ result['skn'] = policy
+ return 'SharedAccessSignature ' + urlencode(result)
+
+ def get_data_svc_client(self, **kwags):
+ url = kwags.get('base_url', None)
+ config = AzureConfiguration(base_url='https://{0}'.format(url))
+ config.credentials = AzureSASAuthentication(token=self.generate_sas_token(**kwags))
+ config = self.add_user_agent(config)
+ return ServiceClient(creds=config.credentials, config=config)
+
+ # passthru methods to AzureAuth instance for backcompat
+ @property
+ def credentials(self):
+ return self.azure_auth.credentials
+
+ @property
+ def _cloud_environment(self):
+ return self.azure_auth._cloud_environment
+
+ @property
+ def subscription_id(self):
+ return self.azure_auth.subscription_id
+
+ @property
+ def storage_client(self):
+ self.log('Getting storage client...')
+ if not self._storage_client:
+ self._storage_client = self.get_mgmt_svc_client(StorageManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-07-01')
+ return self._storage_client
+
+ @property
+ def storage_models(self):
+ return StorageManagementClient.models("2018-07-01")
+
+ @property
+ def network_client(self):
+ self.log('Getting network client')
+ if not self._network_client:
+ self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2019-06-01')
+ return self._network_client
+
+ @property
+ def network_models(self):
+ self.log("Getting network models...")
+ return NetworkManagementClient.models("2018-08-01")
+
+ @property
+ def rm_client(self):
+ self.log('Getting resource manager client')
+ if not self._resource_client:
+ self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2017-05-10')
+ return self._resource_client
+
+ @property
+ def rm_models(self):
+ self.log("Getting resource manager models")
+ return ResourceManagementClient.models("2017-05-10")
+
+ @property
+ def compute_client(self):
+ self.log('Getting compute client')
+ if not self._compute_client:
+ self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2019-07-01')
+ return self._compute_client
+
+ @property
+ def compute_models(self):
+ self.log("Getting compute models")
+ return ComputeManagementClient.models("2019-07-01")
+
+ @property
+ def dns_client(self):
+ self.log('Getting dns client')
+ if not self._dns_client:
+ self._dns_client = self.get_mgmt_svc_client(DnsManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-05-01')
+ return self._dns_client
+
+ @property
+ def dns_models(self):
+ self.log("Getting dns models...")
+ return DnsManagementClient.models('2018-05-01')
+
+ @property
+ def web_client(self):
+ self.log('Getting web client')
+ if not self._web_client:
+ self._web_client = self.get_mgmt_svc_client(WebSiteManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-02-01')
+ return self._web_client
+
+ @property
+ def containerservice_client(self):
+ self.log('Getting container service client')
+ if not self._containerservice_client:
+ self._containerservice_client = self.get_mgmt_svc_client(ContainerServiceClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2017-07-01')
+ return self._containerservice_client
+
+ @property
+ def managedcluster_models(self):
+ self.log("Getting container service models")
+ return ContainerServiceClient.models('2018-03-31')
+
+ @property
+ def managedcluster_client(self):
+ self.log('Getting container service client')
+ if not self._managedcluster_client:
+ self._managedcluster_client = self.get_mgmt_svc_client(ContainerServiceClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-03-31')
+ return self._managedcluster_client
+
+ @property
+ def sql_client(self):
+ self.log('Getting SQL client')
+ if not self._sql_client:
+ self._sql_client = self.get_mgmt_svc_client(SqlManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._sql_client
+
+ @property
+ def postgresql_client(self):
+ self.log('Getting PostgreSQL client')
+ if not self._postgresql_client:
+ self._postgresql_client = self.get_mgmt_svc_client(PostgreSQLManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._postgresql_client
+
+ @property
+ def mysql_client(self):
+ self.log('Getting MySQL client')
+ if not self._mysql_client:
+ self._mysql_client = self.get_mgmt_svc_client(MySQLManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._mysql_client
+
+ @property
+ def mariadb_client(self):
+ self.log('Getting MariaDB client')
+ if not self._mariadb_client:
+ self._mariadb_client = self.get_mgmt_svc_client(MariaDBManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._mariadb_client
+
+ @property
+ def sql_client(self):
+ self.log('Getting SQL client')
+ if not self._sql_client:
+ self._sql_client = self.get_mgmt_svc_client(SqlManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._sql_client
+
+ @property
+ def containerregistry_client(self):
+ self.log('Getting container registry mgmt client')
+ if not self._containerregistry_client:
+ self._containerregistry_client = self.get_mgmt_svc_client(ContainerRegistryManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2017-10-01')
+
+ return self._containerregistry_client
+
+ @property
+ def containerinstance_client(self):
+ self.log('Getting container instance mgmt client')
+ if not self._containerinstance_client:
+ self._containerinstance_client = self.get_mgmt_svc_client(ContainerInstanceManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2018-06-01')
+
+ return self._containerinstance_client
+
+ @property
+ def marketplace_client(self):
+ self.log('Getting marketplace agreement client')
+ if not self._marketplace_client:
+ self._marketplace_client = self.get_mgmt_svc_client(MarketplaceOrderingAgreements,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._marketplace_client
+
+ @property
+ def traffic_manager_management_client(self):
+ self.log('Getting traffic manager client')
+ if not self._traffic_manager_management_client:
+ self._traffic_manager_management_client = self.get_mgmt_svc_client(TrafficManagerManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._traffic_manager_management_client
+
+ @property
+ def monitor_client(self):
+ self.log('Getting monitor client')
+ if not self._monitor_client:
+ self._monitor_client = self.get_mgmt_svc_client(MonitorManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._monitor_client
+
+ @property
+ def log_analytics_client(self):
+ self.log('Getting log analytics client')
+ if not self._log_analytics_client:
+ self._log_analytics_client = self.get_mgmt_svc_client(LogAnalyticsManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._log_analytics_client
+
+ @property
+ def log_analytics_models(self):
+ self.log('Getting log analytics models')
+ return LogAnalyticsModels
+
+ @property
+ def servicebus_client(self):
+ self.log('Getting servicebus client')
+ if not self._servicebus_client:
+ self._servicebus_client = self.get_mgmt_svc_client(ServiceBusManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._servicebus_client
+
+ @property
+ def servicebus_models(self):
+ return ServicebusModel
+
+ @property
+ def automation_client(self):
+ self.log('Getting automation client')
+ if not self._automation_client:
+ self._automation_client = self.get_mgmt_svc_client(AutomationClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._automation_client
+
+ @property
+ def automation_models(self):
+ return AutomationModel
+
+ @property
+ def IoThub_client(self):
+ self.log('Getting iothub client')
+ if not self._IoThub_client:
+ self._IoThub_client = self.get_mgmt_svc_client(IotHubClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._IoThub_client
+
+ @property
+ def IoThub_models(self):
+ return IoTHubModels
+
+ @property
+ def automation_client(self):
+ self.log('Getting automation client')
+ if not self._automation_client:
+ self._automation_client = self.get_mgmt_svc_client(AutomationClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+ return self._automation_client
+
+ @property
+ def automation_models(self):
+ return AutomationModel
+
+ @property
+ def lock_client(self):
+ self.log('Getting lock client')
+ if not self._lock_client:
+ self._lock_client = self.get_mgmt_svc_client(ManagementLockClient,
+ base_url=self._cloud_environment.endpoints.resource_manager,
+ api_version='2016-09-01')
+ return self._lock_client
+
+ @property
+ def lock_models(self):
+ self.log("Getting lock models")
+ return ManagementLockClient.models('2016-09-01')
+
+
+class AzureSASAuthentication(Authentication):
+ """Simple SAS Authentication.
+ An implementation of Authentication in
+ https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/authentication.py
+
+ :param str token: SAS token
+ """
+ def __init__(self, token):
+ self.token = token
+
+ def signed_session(self):
+ session = super(AzureSASAuthentication, self).signed_session()
+ session.headers['Authorization'] = self.token
+ return session
+
+
+class AzureRMAuthException(Exception):
+ pass
+
+
+class AzureRMAuth(object):
+ def __init__(self, auth_source='auto', profile=None, subscription_id=None, client_id=None, secret=None,
+ tenant=None, ad_user=None, password=None, cloud_environment='AzureCloud', cert_validation_mode='validate',
+ api_profile='latest', adfs_authority_url=None, fail_impl=None, **kwargs):
+
+ if fail_impl:
+ self._fail_impl = fail_impl
+ else:
+ self._fail_impl = self._default_fail_impl
+
+ self._cloud_environment = None
+ self._adfs_authority_url = None
+
+ # authenticate
+ self.credentials = self._get_credentials(
+ dict(auth_source=auth_source, profile=profile, subscription_id=subscription_id, client_id=client_id, secret=secret,
+ tenant=tenant, ad_user=ad_user, password=password, cloud_environment=cloud_environment,
+ cert_validation_mode=cert_validation_mode, api_profile=api_profile, adfs_authority_url=adfs_authority_url))
+
+ if not self.credentials:
+ if HAS_AZURE_CLI_CORE:
+ self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
+ "define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`).")
+ else:
+ self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
+ "define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`).")
+
+ # cert validation mode precedence: module-arg, credential profile, env, "validate"
+ self._cert_validation_mode = cert_validation_mode or self.credentials.get('cert_validation_mode') or \
+ os.environ.get('AZURE_CERT_VALIDATION_MODE') or 'validate'
+
+ if self._cert_validation_mode not in ['validate', 'ignore']:
+ self.fail('invalid cert_validation_mode: {0}'.format(self._cert_validation_mode))
+
+ # if cloud_environment specified, look up/build Cloud object
+ raw_cloud_env = self.credentials.get('cloud_environment')
+ if self.credentials.get('credentials') is not None and raw_cloud_env is not None:
+ self._cloud_environment = raw_cloud_env
+ elif not raw_cloud_env:
+ self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
+ else:
+ # try to look up "well-known" values via the name attribute on azure_cloud members
+ all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
+ matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
+ if len(matched_clouds) == 1:
+ self._cloud_environment = matched_clouds[0]
+ elif len(matched_clouds) > 1:
+ self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
+ else:
+ if not urlparse.urlparse(raw_cloud_env).scheme:
+ self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
+ try:
+ self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
+ except Exception as e:
+ self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message), exception=traceback.format_exc())
+
+ if self.credentials.get('subscription_id', None) is None and self.credentials.get('credentials') is None:
+ self.fail("Credentials did not include a subscription_id value.")
+ self.log("setting subscription_id")
+ self.subscription_id = self.credentials['subscription_id']
+
+ # get authentication authority
+ # for adfs, user could pass in authority or not.
+ # for others, use default authority from cloud environment
+ if self.credentials.get('adfs_authority_url') is None:
+ self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
+ else:
+ self._adfs_authority_url = self.credentials.get('adfs_authority_url')
+
+ # get resource from cloud environment
+ self._resource = self._cloud_environment.endpoints.active_directory_resource_id
+
+ if self.credentials.get('credentials') is not None:
+ # AzureCLI credentials
+ self.azure_credentials = self.credentials['credentials']
+ elif self.credentials.get('client_id') is not None and \
+ self.credentials.get('secret') is not None and \
+ self.credentials.get('tenant') is not None:
+ self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
+ secret=self.credentials['secret'],
+ tenant=self.credentials['tenant'],
+ cloud_environment=self._cloud_environment,
+ verify=self._cert_validation_mode == 'validate')
+
+ elif self.credentials.get('ad_user') is not None and \
+ self.credentials.get('password') is not None and \
+ self.credentials.get('client_id') is not None and \
+ self.credentials.get('tenant') is not None:
+
+ self.azure_credentials = self.acquire_token_with_username_password(
+ self._adfs_authority_url,
+ self._resource,
+ self.credentials['ad_user'],
+ self.credentials['password'],
+ self.credentials['client_id'],
+ self.credentials['tenant'])
+
+ elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
+ tenant = self.credentials.get('tenant')
+ if not tenant:
+ tenant = 'common' # SDK default
+
+ self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
+ self.credentials['password'],
+ tenant=tenant,
+ cloud_environment=self._cloud_environment,
+ verify=self._cert_validation_mode == 'validate')
+ else:
+ self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
+ "Credentials must include client_id, secret and tenant or ad_user and password, or "
+ "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
+ "be logged in using AzureCLI.")
+
+ def fail(self, msg, exception=None, **kwargs):
+ self._fail_impl(msg)
+
+ def _default_fail_impl(self, msg, exception=None, **kwargs):
+ raise AzureRMAuthException(msg)
+
+ def _get_profile(self, profile="default"):
+ path = expanduser("~/.azure/credentials")
+ try:
+ config = configparser.ConfigParser()
+ config.read(path)
+ except Exception as exc:
+ self.fail("Failed to access {0}. Check that the file exists and you have read "
+ "access. {1}".format(path, str(exc)))
+ credentials = dict()
+ for key in AZURE_CREDENTIAL_ENV_MAPPING:
+ try:
+ credentials[key] = config.get(profile, key, raw=True)
+ except Exception:
+ pass
+
+ if credentials.get('subscription_id'):
+ return credentials
+
+ return None
+
+ def _get_msi_credentials(self, subscription_id_param=None, **kwargs):
+ client_id = kwargs.get('client_id', None)
+ credentials = MSIAuthentication(client_id=client_id)
+ subscription_id = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None)
+ if not subscription_id:
+ try:
+ # use the first subscription of the MSI
+ subscription_client = SubscriptionClient(credentials)
+ subscription = next(subscription_client.subscriptions.list())
+ subscription_id = str(subscription.subscription_id)
+ except Exception as exc:
+ self.fail("Failed to get MSI token: {0}. "
+ "Please check whether your machine enabled MSI or grant access to any subscription.".format(str(exc)))
+ return {
+ 'credentials': credentials,
+ 'subscription_id': subscription_id
+ }
+
+ def _get_azure_cli_credentials(self):
+ credentials, subscription_id = get_azure_cli_credentials()
+ cloud_environment = get_cli_active_cloud()
+
+ cli_credentials = {
+ 'credentials': credentials,
+ 'subscription_id': subscription_id,
+ 'cloud_environment': cloud_environment
+ }
+ return cli_credentials
+
+ def _get_env_credentials(self):
+ env_credentials = dict()
+ for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
+ env_credentials[attribute] = os.environ.get(env_variable, None)
+
+ if env_credentials['profile']:
+ credentials = self._get_profile(env_credentials['profile'])
+ return credentials
+
+ if env_credentials.get('subscription_id') is not None:
+ return env_credentials
+
+ return None
+
+ # TODO: use explicit kwargs instead of intermediate dict
+ def _get_credentials(self, params):
+ # Get authentication credentials.
+ self.log('Getting credentials')
+
+ arg_credentials = dict()
+ for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
+ arg_credentials[attribute] = params.get(attribute, None)
+
+ auth_source = params.get('auth_source', None)
+ if not auth_source:
+ auth_source = os.environ.get('ANSIBLE_AZURE_AUTH_SOURCE', 'auto')
+
+ if auth_source == 'msi':
+ self.log('Retrieving credenitals from MSI')
+ return self._get_msi_credentials(arg_credentials['subscription_id'], client_id=params.get('client_id', None))
+
+ if auth_source == 'cli':
+ if not HAS_AZURE_CLI_CORE:
+ self.fail(msg=missing_required_lib('azure-cli', reason='for `cli` auth_source'),
+ exception=HAS_AZURE_CLI_CORE_EXC)
+ try:
+ self.log('Retrieving credentials from Azure CLI profile')
+ cli_credentials = self._get_azure_cli_credentials()
+ return cli_credentials
+ except CLIError as err:
+ self.fail("Azure CLI profile cannot be loaded - {0}".format(err))
+
+ if auth_source == 'env':
+ self.log('Retrieving credentials from environment')
+ env_credentials = self._get_env_credentials()
+ return env_credentials
+
+ if auth_source == 'credential_file':
+ self.log("Retrieving credentials from credential file")
+ profile = params.get('profile') or 'default'
+ default_credentials = self._get_profile(profile)
+ return default_credentials
+
+ # auto, precedence: module parameters -> environment variables -> default profile in ~/.azure/credentials
+ # try module params
+ if arg_credentials['profile'] is not None:
+ self.log('Retrieving credentials with profile parameter.')
+ credentials = self._get_profile(arg_credentials['profile'])
+ return credentials
+
+ if arg_credentials['subscription_id']:
+ self.log('Received credentials from parameters.')
+ return arg_credentials
+
+ # try environment
+ env_credentials = self._get_env_credentials()
+ if env_credentials:
+ self.log('Received credentials from env.')
+ return env_credentials
+
+ # try default profile from ~./azure/credentials
+ default_credentials = self._get_profile()
+ if default_credentials:
+ self.log('Retrieved default profile credentials from ~/.azure/credentials.')
+ return default_credentials
+
+ try:
+ if HAS_AZURE_CLI_CORE:
+ self.log('Retrieving credentials from AzureCLI profile')
+ cli_credentials = self._get_azure_cli_credentials()
+ return cli_credentials
+ except CLIError as ce:
+ self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
+
+ return None
+
+ def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
+ authority_uri = authority
+
+ if tenant is not None:
+ authority_uri = authority + '/' + tenant
+
+ context = AuthenticationContext(authority_uri)
+ token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
+
+ return AADTokenCredentials(token_response)
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # Use only during module development
+ # if self.debug:
+ # log_file = open('azure_rm.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, indent=4, sort_keys=True))
+ # else:
+ # log_file.write(msg + u'\n')
diff --git a/test/support/integration/plugins/module_utils/azure_rm_common_rest.py b/test/support/integration/plugins/module_utils/azure_rm_common_rest.py
new file mode 100644
index 0000000000..4fd7eaa3b4
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/azure_rm_common_rest.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_configuration import AzureConfiguration
+ from msrest.service_client import ServiceClient
+ from msrest.pipeline import ClientRawResponse
+ from msrest.polling import LROPoller
+ from msrestazure.polling.arm_polling import ARMPolling
+ import uuid
+ import json
+except ImportError:
+ # This is handled in azure_rm_common
+ AzureConfiguration = object
+
+ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
+
+
+class GenericRestClientConfiguration(AzureConfiguration):
+
+ def __init__(self, credentials, subscription_id, base_url=None):
+
+ if credentials is None:
+ raise ValueError("Parameter 'credentials' must not be None.")
+ if subscription_id is None:
+ raise ValueError("Parameter 'subscription_id' must not be None.")
+ if not base_url:
+ base_url = 'https://management.azure.com'
+
+ super(GenericRestClientConfiguration, self).__init__(base_url)
+
+ self.add_user_agent(ANSIBLE_USER_AGENT)
+
+ self.credentials = credentials
+ self.subscription_id = subscription_id
+
+
+class GenericRestClient(object):
+
+ def __init__(self, credentials, subscription_id, base_url=None):
+ self.config = GenericRestClientConfiguration(credentials, subscription_id, base_url)
+ self._client = ServiceClient(self.config.credentials, self.config)
+ self.models = None
+
+ def query(self, url, method, query_parameters, header_parameters, body, expected_status_codes, polling_timeout, polling_interval):
+ # Construct and send request
+ operation_config = {}
+
+ request = None
+
+ if header_parameters is None:
+ header_parameters = {}
+
+ header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
+
+ if method == 'GET':
+ request = self._client.get(url, query_parameters)
+ elif method == 'PUT':
+ request = self._client.put(url, query_parameters)
+ elif method == 'POST':
+ request = self._client.post(url, query_parameters)
+ elif method == 'HEAD':
+ request = self._client.head(url, query_parameters)
+ elif method == 'PATCH':
+ request = self._client.patch(url, query_parameters)
+ elif method == 'DELETE':
+ request = self._client.delete(url, query_parameters)
+ elif method == 'MERGE':
+ request = self._client.merge(url, query_parameters)
+
+ response = self._client.send(request, header_parameters, body, **operation_config)
+
+ if response.status_code not in expected_status_codes:
+ exp = CloudError(response)
+ exp.request_id = response.headers.get('x-ms-request-id')
+ raise exp
+ elif response.status_code == 202 and polling_timeout > 0:
+ def get_long_running_output(response):
+ return response
+ poller = LROPoller(self._client,
+ ClientRawResponse(None, response),
+ get_long_running_output,
+ ARMPolling(polling_interval, **operation_config))
+ response = self.get_poller_result(poller, polling_timeout)
+
+ return response
+
+ def get_poller_result(self, poller, timeout):
+ try:
+ poller.wait(timeout=timeout)
+ return poller.result()
+ except Exception as exc:
+ raise
diff --git a/test/support/integration/plugins/module_utils/cloud.py b/test/support/integration/plugins/module_utils/cloud.py
new file mode 100644
index 0000000000..0d29071fe1
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/cloud.py
@@ -0,0 +1,217 @@
+#
+# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+"""
+This module adds shared support for generic cloud modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+from ansible.module_utils.cloud import CloudRetry
+
+The 'cloud' module provides the following common classes:
+
+ * CloudRetry
+ - The base class to be used by other cloud providers, in order to
+ provide a backoff/retry decorator based on status codes.
+
+ - Example using the AWSRetry class which inherits from CloudRetry.
+
+ @AWSRetry.exponential_backoff(retries=10, delay=3)
+ get_ec2_security_group_ids_from_names()
+
+ @AWSRetry.jittered_backoff()
+ get_ec2_security_group_ids_from_names()
+
+"""
+import random
+from functools import wraps
+import syslog
+import time
+
+
+def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
+ """ Customizable exponential backoff strategy.
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Initial (base) delay.
+ backoff (float): base of the exponent to use for exponential
+ backoff.
+ max_delay (int): Optional. If provided each delay generated is capped
+ at this amount. Defaults to 60 seconds.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for an exponential backoff strategy.
+ Usage:
+ >>> backoff = _exponential_backoff()
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ sleep = delay * backoff ** retry
+ yield sleep if max_delay is None else min(sleep, max_delay)
+ return backoff_gen
+
+
+def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
+ """ Implements the "Full Jitter" backoff strategy described here
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Approximate number of seconds to sleep for the first
+ retry.
+ max_delay (int): The maximum number of seconds to sleep for any retry.
+ _random (random.Random or None): Makes this generator testable by
+ allowing developers to explicitly pass in the a seeded Random.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for a full jitter backoff strategy.
+ Usage:
+ >>> backoff = _full_jitter_backoff(retries=5)
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [3, 6, 5, 23, 38]
+ >>> list(backoff())
+ [2, 1, 6, 6, 31]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ yield _random.randint(0, min(max_delay, delay * 2 ** retry))
+ return backoff_gen
+
+
+class CloudRetry(object):
+ """ CloudRetry can be used by any cloud provider, in order to implement a
+ backoff algorithm/retry effect based on Status Code from Exceptions.
+ """
+ # This is the base class of the exception.
+ # AWS Example botocore.exceptions.ClientError
+ base_class = None
+
+ @staticmethod
+ def status_code_from_exception(error):
+ """ Return the status code from the exception object
+ Args:
+ error (object): The exception itself.
+ """
+ pass
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ """ Return True if the Response Code to retry on was found.
+ Args:
+ response_code (str): This is the Response Code that is being matched against.
+ """
+ pass
+
+ @classmethod
+ def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
+ """ Retry calling the Cloud decorated function using the provided
+ backoff strategy.
+ Args:
+ backoff_strategy (callable): Callable that returns a generator. The
+ generator should yield sleep times for each retry of the decorated
+ function.
+ """
+ def deco(f):
+ @wraps(f)
+ def retry_func(*args, **kwargs):
+ for delay in backoff_strategy():
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ if isinstance(e, cls.base_class):
+ response_code = cls.status_code_from_exception(e)
+ if cls.found(response_code, catch_extra_error_codes):
+ msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
+ syslog.syslog(syslog.LOG_INFO, msg)
+ time.sleep(delay)
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ return f(*args, **kwargs)
+
+ return retry_func # true decorator
+
+ return deco
+
+ @classmethod
+ def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_exponential_backoff(
+ retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using a jittered backoff
+ strategy. More on this strategy here:
+
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int): Initial delay between retries in seconds
+ default=3
+ max_delay (int): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_full_jitter_backoff(
+ retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Compatibility for the original implementation of CloudRetry.backoff that
+ did not provide configurable backoff strategies. Developers should use
+ CloudRetry.exponential_backoff instead.
+
+ Kwargs:
+ tries (int): Number of times to try (not retry) before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ """
+ return cls.exponential_backoff(
+ retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
diff --git a/test/support/integration/plugins/module_utils/cloudstack.py b/test/support/integration/plugins/module_utils/cloudstack.py
new file mode 100644
index 0000000000..85a53b6b6e
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/cloudstack.py
@@ -0,0 +1,664 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015, René Moser <mail@renemoser.net>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import os
+import sys
+import time
+import traceback
+
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.basic import missing_required_lib
+
+CS_IMP_ERR = None
+try:
+ from cs import CloudStack, CloudStackException, read_config
+ HAS_LIB_CS = True
+except ImportError:
+ CS_IMP_ERR = traceback.format_exc()
+ HAS_LIB_CS = False
+
+
+if sys.version_info > (3,):
+ long = int
+
+
+def cs_argument_spec():
+ return dict(
+ api_key=dict(default=os.environ.get('CLOUDSTACK_KEY')),
+ api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True),
+ api_url=dict(default=os.environ.get('CLOUDSTACK_ENDPOINT')),
+ api_http_method=dict(choices=['get', 'post'], default=os.environ.get('CLOUDSTACK_METHOD')),
+ api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT')),
+ api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'),
+ )
+
+
+def cs_required_together():
+ return [['api_key', 'api_secret']]
+
+
+class AnsibleCloudStack:
+
+ def __init__(self, module):
+ if not HAS_LIB_CS:
+ module.fail_json(msg=missing_required_lib('cs'), exception=CS_IMP_ERR)
+
+ self.result = {
+ 'changed': False,
+ 'diff': {
+ 'before': dict(),
+ 'after': dict()
+ }
+ }
+
+ # Common returns, will be merged with self.returns
+ # search_for_key: replace_with_key
+ self.common_returns = {
+ 'id': 'id',
+ 'name': 'name',
+ 'created': 'created',
+ 'zonename': 'zone',
+ 'state': 'state',
+ 'project': 'project',
+ 'account': 'account',
+ 'domain': 'domain',
+ 'displaytext': 'display_text',
+ 'displayname': 'display_name',
+ 'description': 'description',
+ }
+
+ # Init returns dict for use in subclasses
+ self.returns = {}
+ # these values will be casted to int
+ self.returns_to_int = {}
+ # these keys will be compared case sensitive in self.has_changed()
+ self.case_sensitive_keys = [
+ 'id',
+ 'displaytext',
+ 'displayname',
+ 'description',
+ ]
+
+ self.module = module
+ self._cs = None
+
+ # Helper for VPCs
+ self._vpc_networks_ids = None
+
+ self.domain = None
+ self.account = None
+ self.project = None
+ self.ip_address = None
+ self.network = None
+ self.physical_network = None
+ self.vpc = None
+ self.zone = None
+ self.vm = None
+ self.vm_default_nic = None
+ self.os_type = None
+ self.hypervisor = None
+ self.capabilities = None
+ self.network_acl = None
+
+ @property
+ def cs(self):
+ if self._cs is None:
+ api_config = self.get_api_config()
+ self._cs = CloudStack(**api_config)
+ return self._cs
+
+ def get_api_config(self):
+ api_region = self.module.params.get('api_region') or os.environ.get('CLOUDSTACK_REGION')
+ try:
+ config = read_config(api_region)
+ except KeyError:
+ config = {}
+
+ api_config = {
+ 'endpoint': self.module.params.get('api_url') or config.get('endpoint'),
+ 'key': self.module.params.get('api_key') or config.get('key'),
+ 'secret': self.module.params.get('api_secret') or config.get('secret'),
+ 'timeout': self.module.params.get('api_timeout') or config.get('timeout') or 10,
+ 'method': self.module.params.get('api_http_method') or config.get('method') or 'get',
+ }
+ self.result.update({
+ 'api_region': api_region,
+ 'api_url': api_config['endpoint'],
+ 'api_key': api_config['key'],
+ 'api_timeout': int(api_config['timeout']),
+ 'api_http_method': api_config['method'],
+ })
+ if not all([api_config['endpoint'], api_config['key'], api_config['secret']]):
+ self.fail_json(msg="Missing api credentials: can not authenticate")
+ return api_config
+
+ def fail_json(self, **kwargs):
+ self.result.update(kwargs)
+ self.module.fail_json(**self.result)
+
+ def get_or_fallback(self, key=None, fallback_key=None):
+ value = self.module.params.get(key)
+ if not value:
+ value = self.module.params.get(fallback_key)
+ return value
+
+ def has_changed(self, want_dict, current_dict, only_keys=None, skip_diff_for_keys=None):
+ result = False
+ for key, value in want_dict.items():
+
+ # Optionally limit by a list of keys
+ if only_keys and key not in only_keys:
+ continue
+
+ # Skip None values
+ if value is None:
+ continue
+
+ if key in current_dict:
+ if isinstance(value, (int, float, long, complex)):
+
+ # ensure we compare the same type
+ if isinstance(value, int):
+ current_dict[key] = int(current_dict[key])
+ elif isinstance(value, float):
+ current_dict[key] = float(current_dict[key])
+ elif isinstance(value, long):
+ current_dict[key] = long(current_dict[key])
+ elif isinstance(value, complex):
+ current_dict[key] = complex(current_dict[key])
+
+ if value != current_dict[key]:
+ if skip_diff_for_keys and key not in skip_diff_for_keys:
+ self.result['diff']['before'][key] = current_dict[key]
+ self.result['diff']['after'][key] = value
+ result = True
+ else:
+ before_value = to_text(current_dict[key])
+ after_value = to_text(value)
+
+ if self.case_sensitive_keys and key in self.case_sensitive_keys:
+ if before_value != after_value:
+ if skip_diff_for_keys and key not in skip_diff_for_keys:
+ self.result['diff']['before'][key] = before_value
+ self.result['diff']['after'][key] = after_value
+ result = True
+
+ # Test for diff in case insensitive way
+ elif before_value.lower() != after_value.lower():
+ if skip_diff_for_keys and key not in skip_diff_for_keys:
+ self.result['diff']['before'][key] = before_value
+ self.result['diff']['after'][key] = after_value
+ result = True
+ else:
+ if skip_diff_for_keys and key not in skip_diff_for_keys:
+ self.result['diff']['before'][key] = None
+ self.result['diff']['after'][key] = to_text(value)
+ result = True
+ return result
+
+ def _get_by_key(self, key=None, my_dict=None):
+ if my_dict is None:
+ my_dict = {}
+ if key:
+ if key in my_dict:
+ return my_dict[key]
+ self.fail_json(msg="Something went wrong: %s not found" % key)
+ return my_dict
+
+ def query_api(self, command, **args):
+ try:
+ res = getattr(self.cs, command)(**args)
+
+ if 'errortext' in res:
+ self.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ except CloudStackException as e:
+ self.fail_json(msg='CloudStackException: %s' % to_native(e))
+
+ except Exception as e:
+ self.fail_json(msg=to_native(e))
+
+ return res
+
+ def get_network_acl(self, key=None):
+ if self.network_acl is None:
+ args = {
+ 'name': self.module.params.get('network_acl'),
+ 'vpcid': self.get_vpc(key='id'),
+ }
+ network_acls = self.query_api('listNetworkACLLists', **args)
+ if network_acls:
+ self.network_acl = network_acls['networkacllist'][0]
+ self.result['network_acl'] = self.network_acl['name']
+ if self.network_acl:
+ return self._get_by_key(key, self.network_acl)
+ else:
+ self.fail_json(msg="Network ACL %s not found" % self.module.params.get('network_acl'))
+
+ def get_vpc(self, key=None):
+ """Return a VPC dictionary or the value of given key of."""
+ if self.vpc:
+ return self._get_by_key(key, self.vpc)
+
+ vpc = self.module.params.get('vpc')
+ if not vpc:
+ vpc = os.environ.get('CLOUDSTACK_VPC')
+ if not vpc:
+ return None
+
+ args = {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'zoneid': self.get_zone(key='id'),
+ }
+ vpcs = self.query_api('listVPCs', **args)
+ if not vpcs:
+ self.fail_json(msg="No VPCs available.")
+
+ for v in vpcs['vpc']:
+ if vpc in [v['name'], v['displaytext'], v['id']]:
+ # Fail if the identifyer matches more than one VPC
+ if self.vpc:
+ self.fail_json(msg="More than one VPC found with the provided identifyer '%s'" % vpc)
+ else:
+ self.vpc = v
+ self.result['vpc'] = v['name']
+ if self.vpc:
+ return self._get_by_key(key, self.vpc)
+ self.fail_json(msg="VPC '%s' not found" % vpc)
+
+ def is_vpc_network(self, network_id):
+ """Returns True if network is in VPC."""
+ # This is an efficient way to query a lot of networks at a time
+ if self._vpc_networks_ids is None:
+ args = {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'zoneid': self.get_zone(key='id'),
+ }
+ vpcs = self.query_api('listVPCs', **args)
+ self._vpc_networks_ids = []
+ if vpcs:
+ for vpc in vpcs['vpc']:
+ for n in vpc.get('network', []):
+ self._vpc_networks_ids.append(n['id'])
+ return network_id in self._vpc_networks_ids
+
+ def get_physical_network(self, key=None):
+ if self.physical_network:
+ return self._get_by_key(key, self.physical_network)
+ physical_network = self.module.params.get('physical_network')
+ args = {
+ 'zoneid': self.get_zone(key='id')
+ }
+ physical_networks = self.query_api('listPhysicalNetworks', **args)
+ if not physical_networks:
+ self.fail_json(msg="No physical networks available.")
+
+ for net in physical_networks['physicalnetwork']:
+ if physical_network in [net['name'], net['id']]:
+ self.physical_network = net
+ self.result['physical_network'] = net['name']
+ return self._get_by_key(key, self.physical_network)
+ self.fail_json(msg="Physical Network '%s' not found" % physical_network)
+
+ def get_network(self, key=None):
+ """Return a network dictionary or the value of given key of."""
+ if self.network:
+ return self._get_by_key(key, self.network)
+
+ network = self.module.params.get('network')
+ if not network:
+ vpc_name = self.get_vpc(key='name')
+ if vpc_name:
+ self.fail_json(msg="Could not find network for VPC '%s' due missing argument: network" % vpc_name)
+ return None
+
+ args = {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'zoneid': self.get_zone(key='id'),
+ 'vpcid': self.get_vpc(key='id')
+ }
+ networks = self.query_api('listNetworks', **args)
+ if not networks:
+ self.fail_json(msg="No networks available.")
+
+ for n in networks['network']:
+ # ignore any VPC network if vpc param is not given
+ if 'vpcid' in n and not self.get_vpc(key='id'):
+ continue
+ if network in [n['displaytext'], n['name'], n['id']]:
+ self.result['network'] = n['name']
+ self.network = n
+ return self._get_by_key(key, self.network)
+ self.fail_json(msg="Network '%s' not found" % network)
+
+ def get_project(self, key=None):
+ if self.project:
+ return self._get_by_key(key, self.project)
+
+ project = self.module.params.get('project')
+ if not project:
+ project = os.environ.get('CLOUDSTACK_PROJECT')
+ if not project:
+ return None
+ args = {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id')
+ }
+ projects = self.query_api('listProjects', **args)
+ if projects:
+ for p in projects['project']:
+ if project.lower() in [p['name'].lower(), p['id']]:
+ self.result['project'] = p['name']
+ self.project = p
+ return self._get_by_key(key, self.project)
+ self.fail_json(msg="project '%s' not found" % project)
+
+ def get_ip_address(self, key=None):
+ if self.ip_address:
+ return self._get_by_key(key, self.ip_address)
+
+ ip_address = self.module.params.get('ip_address')
+ if not ip_address:
+ self.fail_json(msg="IP address param 'ip_address' is required")
+
+ args = {
+ 'ipaddress': ip_address,
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'vpcid': self.get_vpc(key='id'),
+ }
+
+ ip_addresses = self.query_api('listPublicIpAddresses', **args)
+
+ if not ip_addresses:
+ self.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
+
+ self.ip_address = ip_addresses['publicipaddress'][0]
+ return self._get_by_key(key, self.ip_address)
+
+ def get_vm_guest_ip(self):
+ vm_guest_ip = self.module.params.get('vm_guest_ip')
+ default_nic = self.get_vm_default_nic()
+
+ if not vm_guest_ip:
+ return default_nic['ipaddress']
+
+ for secondary_ip in default_nic['secondaryip']:
+ if vm_guest_ip == secondary_ip['ipaddress']:
+ return vm_guest_ip
+ self.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip)
+
+ def get_vm_default_nic(self):
+ if self.vm_default_nic:
+ return self.vm_default_nic
+
+ nics = self.query_api('listNics', virtualmachineid=self.get_vm(key='id'))
+ if nics:
+ for n in nics['nic']:
+ if n['isdefault']:
+ self.vm_default_nic = n
+ return self.vm_default_nic
+ self.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm'))
+
+ def get_vm(self, key=None, filter_zone=True):
+ if self.vm:
+ return self._get_by_key(key, self.vm)
+
+ vm = self.module.params.get('vm')
+ if not vm:
+ self.fail_json(msg="Virtual machine param 'vm' is required")
+
+ args = {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'zoneid': self.get_zone(key='id') if filter_zone else None,
+ 'fetch_list': True,
+ }
+ vms = self.query_api('listVirtualMachines', **args)
+ if vms:
+ for v in vms:
+ if vm.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]:
+ self.vm = v
+ return self._get_by_key(key, self.vm)
+ self.fail_json(msg="Virtual machine '%s' not found" % vm)
+
+ def get_disk_offering(self, key=None):
+ disk_offering = self.module.params.get('disk_offering')
+ if not disk_offering:
+ return None
+
+ # Do not add domain filter for disk offering listing.
+ disk_offerings = self.query_api('listDiskOfferings')
+ if disk_offerings:
+ for d in disk_offerings['diskoffering']:
+ if disk_offering in [d['displaytext'], d['name'], d['id']]:
+ return self._get_by_key(key, d)
+ self.fail_json(msg="Disk offering '%s' not found" % disk_offering)
+
+ def get_zone(self, key=None):
+ if self.zone:
+ return self._get_by_key(key, self.zone)
+
+ zone = self.module.params.get('zone')
+ if not zone:
+ zone = os.environ.get('CLOUDSTACK_ZONE')
+ zones = self.query_api('listZones')
+
+ if not zones:
+ self.fail_json(msg="No zones available. Please create a zone first")
+
+ # use the first zone if no zone param given
+ if not zone:
+ self.zone = zones['zone'][0]
+ self.result['zone'] = self.zone['name']
+ return self._get_by_key(key, self.zone)
+
+ if zones:
+ for z in zones['zone']:
+ if zone.lower() in [z['name'].lower(), z['id']]:
+ self.result['zone'] = z['name']
+ self.zone = z
+ return self._get_by_key(key, self.zone)
+ self.fail_json(msg="zone '%s' not found" % zone)
+
+ def get_os_type(self, key=None):
+ if self.os_type:
+ return self._get_by_key(key, self.zone)
+
+ os_type = self.module.params.get('os_type')
+ if not os_type:
+ return None
+
+ os_types = self.query_api('listOsTypes')
+ if os_types:
+ for o in os_types['ostype']:
+ if os_type in [o['description'], o['id']]:
+ self.os_type = o
+ return self._get_by_key(key, self.os_type)
+ self.fail_json(msg="OS type '%s' not found" % os_type)
+
+ def get_hypervisor(self):
+ if self.hypervisor:
+ return self.hypervisor
+
+ hypervisor = self.module.params.get('hypervisor')
+ hypervisors = self.query_api('listHypervisors')
+
+ # use the first hypervisor if no hypervisor param given
+ if not hypervisor:
+ self.hypervisor = hypervisors['hypervisor'][0]['name']
+ return self.hypervisor
+
+ for h in hypervisors['hypervisor']:
+ if hypervisor.lower() == h['name'].lower():
+ self.hypervisor = h['name']
+ return self.hypervisor
+ self.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
+
+ def get_account(self, key=None):
+ if self.account:
+ return self._get_by_key(key, self.account)
+
+ account = self.module.params.get('account')
+ if not account:
+ account = os.environ.get('CLOUDSTACK_ACCOUNT')
+ if not account:
+ return None
+
+ domain = self.module.params.get('domain')
+ if not domain:
+ self.fail_json(msg="Account must be specified with Domain")
+
+ args = {
+ 'name': account,
+ 'domainid': self.get_domain(key='id'),
+ 'listall': True
+ }
+ accounts = self.query_api('listAccounts', **args)
+ if accounts:
+ self.account = accounts['account'][0]
+ self.result['account'] = self.account['name']
+ return self._get_by_key(key, self.account)
+ self.fail_json(msg="Account '%s' not found" % account)
+
+ def get_domain(self, key=None):
+ if self.domain:
+ return self._get_by_key(key, self.domain)
+
+ domain = self.module.params.get('domain')
+ if not domain:
+ domain = os.environ.get('CLOUDSTACK_DOMAIN')
+ if not domain:
+ return None
+
+ args = {
+ 'listall': True,
+ }
+ domains = self.query_api('listDomains', **args)
+ if domains:
+ for d in domains['domain']:
+ if d['path'].lower() in [domain.lower(), "root/" + domain.lower(), "root" + domain.lower()]:
+ self.domain = d
+ self.result['domain'] = d['path']
+ return self._get_by_key(key, self.domain)
+ self.fail_json(msg="Domain '%s' not found" % domain)
+
+ def query_tags(self, resource, resource_type):
+ args = {
+ 'resourceid': resource['id'],
+ 'resourcetype': resource_type,
+ }
+ tags = self.query_api('listTags', **args)
+ return self.get_tags(resource=tags, key='tag')
+
+ def get_tags(self, resource=None, key='tags'):
+ existing_tags = []
+ for tag in resource.get(key) or []:
+ existing_tags.append({'key': tag['key'], 'value': tag['value']})
+ return existing_tags
+
+ def _process_tags(self, resource, resource_type, tags, operation="create"):
+ if tags:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ args = {
+ 'resourceids': resource['id'],
+ 'resourcetype': resource_type,
+ 'tags': tags,
+ }
+ if operation == "create":
+ response = self.query_api('createTags', **args)
+ else:
+ response = self.query_api('deleteTags', **args)
+ self.poll_job(response)
+
+ def _tags_that_should_exist_or_be_updated(self, resource, tags):
+ existing_tags = self.get_tags(resource)
+ return [tag for tag in tags if tag not in existing_tags]
+
+ def _tags_that_should_not_exist(self, resource, tags):
+ existing_tags = self.get_tags(resource)
+ return [tag for tag in existing_tags if tag not in tags]
+
+ def ensure_tags(self, resource, resource_type=None):
+ if not resource_type or not resource:
+ self.fail_json(msg="Error: Missing resource or resource_type for tags.")
+
+ if 'tags' in resource:
+ tags = self.module.params.get('tags')
+ if tags is not None:
+ self._process_tags(resource, resource_type, self._tags_that_should_not_exist(resource, tags), operation="delete")
+ self._process_tags(resource, resource_type, self._tags_that_should_exist_or_be_updated(resource, tags))
+ resource['tags'] = self.query_tags(resource=resource, resource_type=resource_type)
+ return resource
+
+ def get_capabilities(self, key=None):
+ if self.capabilities:
+ return self._get_by_key(key, self.capabilities)
+ capabilities = self.query_api('listCapabilities')
+ self.capabilities = capabilities['capability']
+ return self._get_by_key(key, self.capabilities)
+
+ def poll_job(self, job=None, key=None):
+ if 'jobid' in job:
+ while True:
+ res = self.query_api('queryAsyncJobResult', jobid=job['jobid'])
+ if res['jobstatus'] != 0 and 'jobresult' in res:
+
+ if 'errortext' in res['jobresult']:
+ self.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
+
+ if key and key in res['jobresult']:
+ job = res['jobresult'][key]
+
+ break
+ time.sleep(2)
+ return job
+
+ def update_result(self, resource, result=None):
+ if result is None:
+ result = dict()
+ if resource:
+ returns = self.common_returns.copy()
+ returns.update(self.returns)
+ for search_key, return_key in returns.items():
+ if search_key in resource:
+ result[return_key] = resource[search_key]
+
+ # Bad bad API does not always return int when it should.
+ for search_key, return_key in self.returns_to_int.items():
+ if search_key in resource:
+ result[return_key] = int(resource[search_key])
+
+ if 'tags' in resource:
+ result['tags'] = resource['tags']
+ return result
+
+ def get_result(self, resource):
+ return self.update_result(resource, self.result)
+
+ def get_result_and_facts(self, facts_name, resource):
+ result = self.get_result(resource)
+
+ ansible_facts = {
+ facts_name: result.copy()
+ }
+ for k in ['diff', 'changed']:
+ if k in ansible_facts[facts_name]:
+ del ansible_facts[facts_name][k]
+
+ result.update(ansible_facts=ansible_facts)
+ return result
diff --git a/test/support/integration/plugins/module_utils/common/network.py b/test/support/integration/plugins/module_utils/common/network.py
new file mode 100644
index 0000000000..cf79db511e
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/common/network.py
@@ -0,0 +1,158 @@
+# Copyright (c) 2016 Red Hat Inc
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+# General networking tools that may be used by all modules
+
+import re
+from struct import pack
+from socket import inet_ntoa
+
+from ansible.module_utils.six.moves import zip
+
+
+VALID_MASKS = [2**8 - 2**i for i in range(0, 9)]
+
+
+def is_netmask(val):
+ parts = str(val).split('.')
+ if not len(parts) == 4:
+ return False
+ for part in parts:
+ try:
+ if int(part) not in VALID_MASKS:
+ raise ValueError
+ except ValueError:
+ return False
+ return True
+
+
+def is_masklen(val):
+ try:
+ return 0 <= int(val) <= 32
+ except ValueError:
+ return False
+
+
+def to_netmask(val):
+ """ converts a masklen to a netmask """
+ if not is_masklen(val):
+ raise ValueError('invalid value for masklen')
+
+ bits = 0
+ for i in range(32 - int(val), 32):
+ bits |= (1 << i)
+
+ return inet_ntoa(pack('>I', bits))
+
+
+def to_masklen(val):
+ """ converts a netmask to a masklen """
+ if not is_netmask(val):
+ raise ValueError('invalid value for netmask: %s' % val)
+
+ bits = list()
+ for x in val.split('.'):
+ octet = bin(int(x)).count('1')
+ bits.append(octet)
+
+ return sum(bits)
+
+
+def to_subnet(addr, mask, dotted_notation=False):
+ """ coverts an addr / mask pair to a subnet in cidr notation """
+ try:
+ if not is_masklen(mask):
+ raise ValueError
+ cidr = int(mask)
+ mask = to_netmask(mask)
+ except ValueError:
+ cidr = to_masklen(mask)
+
+ addr = addr.split('.')
+ mask = mask.split('.')
+
+ network = list()
+ for s_addr, s_mask in zip(addr, mask):
+ network.append(str(int(s_addr) & int(s_mask)))
+
+ if dotted_notation:
+ return '%s %s' % ('.'.join(network), to_netmask(cidr))
+ return '%s/%s' % ('.'.join(network), cidr)
+
+
+def to_ipv6_subnet(addr):
+ """ IPv6 addresses are eight groupings. The first four groupings (64 bits) comprise the subnet address. """
+
+ # https://tools.ietf.org/rfc/rfc2374.txt
+
+ # Split by :: to identify omitted zeros
+ ipv6_prefix = addr.split('::')[0]
+
+ # Get the first four groups, or as many as are found + ::
+ found_groups = []
+ for group in ipv6_prefix.split(':'):
+ found_groups.append(group)
+ if len(found_groups) == 4:
+ break
+ if len(found_groups) < 4:
+ found_groups.append('::')
+
+ # Concatenate network address parts
+ network_addr = ''
+ for group in found_groups:
+ if group != '::':
+ network_addr += str(group)
+ network_addr += str(':')
+
+ # Ensure network address ends with ::
+ if not network_addr.endswith('::'):
+ network_addr += str(':')
+ return network_addr
+
+
+def to_ipv6_network(addr):
+ """ IPv6 addresses are eight groupings. The first three groupings (48 bits) comprise the network address. """
+
+ # Split by :: to identify omitted zeros
+ ipv6_prefix = addr.split('::')[0]
+
+ # Get the first three groups, or as many as are found + ::
+ found_groups = []
+ for group in ipv6_prefix.split(':'):
+ found_groups.append(group)
+ if len(found_groups) == 3:
+ break
+ if len(found_groups) < 3:
+ found_groups.append('::')
+
+ # Concatenate network address parts
+ network_addr = ''
+ for group in found_groups:
+ if group != '::':
+ network_addr += str(group)
+ network_addr += str(':')
+
+ # Ensure network address ends with ::
+ if not network_addr.endswith('::'):
+ network_addr += str(':')
+ return network_addr
+
+
+def to_bits(val):
+ """ converts a netmask to bits """
+ bits = ''
+ for octet in val.split('.'):
+ bits += bin(int(octet))[2:].zfill(8)
+ return str
+
+
+def is_mac(mac_address):
+ """
+ Validate MAC address for given string
+ Args:
+ mac_address: string to validate as MAC address
+
+ Returns: (Boolean) True if string is valid MAC address, otherwise False
+ """
+ mac_addr_regex = re.compile('[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$')
+ return bool(mac_addr_regex.match(mac_address.lower()))
diff --git a/test/support/integration/plugins/module_utils/compat/ipaddress.py b/test/support/integration/plugins/module_utils/compat/ipaddress.py
new file mode 100644
index 0000000000..c46ad72a09
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/compat/ipaddress.py
@@ -0,0 +1,2476 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file, and this file only, is based on
+# Lib/ipaddress.py of cpython
+# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
+# are retained in Python alone or in any derivative version prepared by Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+
+# Copyright 2007 Google Inc.
+# Licensed to PSF under a Contributor Agreement.
+
+"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
+
+This library is used to create/poke/manipulate IPv4 and IPv6 addresses
+and networks.
+
+"""
+
+from __future__ import unicode_literals
+
+
+import itertools
+import struct
+
+
+# The following makes it easier for us to script updates of the bundled code and is not part of
+# upstream
+_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"}
+
+__version__ = '1.0.22'
+
+# Compatibility functions
+_compat_int_types = (int,)
+try:
+ _compat_int_types = (int, long)
+except NameError:
+ pass
+try:
+ _compat_str = unicode
+except NameError:
+ _compat_str = str
+ assert bytes != str
+if b'\0'[0] == 0: # Python 3 semantics
+ def _compat_bytes_to_byte_vals(byt):
+ return byt
+else:
+ def _compat_bytes_to_byte_vals(byt):
+ return [struct.unpack(b'!B', b)[0] for b in byt]
+try:
+ _compat_int_from_byte_vals = int.from_bytes
+except AttributeError:
+ def _compat_int_from_byte_vals(bytvals, endianess):
+ assert endianess == 'big'
+ res = 0
+ for bv in bytvals:
+ assert isinstance(bv, _compat_int_types)
+ res = (res << 8) + bv
+ return res
+
+
+def _compat_to_bytes(intval, length, endianess):
+ assert isinstance(intval, _compat_int_types)
+ assert endianess == 'big'
+ if length == 4:
+ if intval < 0 or intval >= 2 ** 32:
+ raise struct.error("integer out of range for 'I' format code")
+ return struct.pack(b'!I', intval)
+ elif length == 16:
+ if intval < 0 or intval >= 2 ** 128:
+ raise struct.error("integer out of range for 'QQ' format code")
+ return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
+ else:
+ raise NotImplementedError()
+
+
+if hasattr(int, 'bit_length'):
+ # Not int.bit_length , since that won't work in 2.7 where long exists
+ def _compat_bit_length(i):
+ return i.bit_length()
+else:
+ def _compat_bit_length(i):
+ for res in itertools.count():
+ if i >> res == 0:
+ return res
+
+
+def _compat_range(start, end, step=1):
+ assert step > 0
+ i = start
+ while i < end:
+ yield i
+ i += step
+
+
+class _TotalOrderingMixin(object):
+ __slots__ = ()
+
+ # Helper that derives the other comparison operations from
+ # __lt__ and __eq__
+ # We avoid functools.total_ordering because it doesn't handle
+ # NotImplemented correctly yet (http://bugs.python.org/issue10042)
+ def __eq__(self, other):
+ raise NotImplementedError
+
+ def __ne__(self, other):
+ equal = self.__eq__(other)
+ if equal is NotImplemented:
+ return NotImplemented
+ return not equal
+
+ def __lt__(self, other):
+ raise NotImplementedError
+
+ def __le__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented or not less:
+ return self.__eq__(other)
+ return less
+
+ def __gt__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented:
+ return NotImplemented
+ equal = self.__eq__(other)
+ if equal is NotImplemented:
+ return NotImplemented
+ return not (less or equal)
+
+ def __ge__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented:
+ return NotImplemented
+ return not less
+
+
+IPV4LENGTH = 32
+IPV6LENGTH = 128
+
+
+class AddressValueError(ValueError):
+ """A Value Error related to the address."""
+
+
+class NetmaskValueError(ValueError):
+ """A Value Error related to the netmask."""
+
+
+def ip_address(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Address or IPv6Address object.
+
+ Raises:
+ ValueError: if the *address* passed isn't either a v4 or a v6
+ address
+
+ """
+ try:
+ return IPv4Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ if isinstance(address, bytes):
+ raise AddressValueError(
+ '%r does not appear to be an IPv4 or IPv6 address. '
+ 'Did you pass in a bytes (str in Python 2) instead of'
+ ' a unicode object?' % address)
+
+ raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
+ address)
+
+
+def ip_network(address, strict=True):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP network. Either IPv4 or
+ IPv6 networks may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Network or IPv6Network object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address. Or if the network has host bits set.
+
+ """
+ try:
+ return IPv4Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ if isinstance(address, bytes):
+ raise AddressValueError(
+ '%r does not appear to be an IPv4 or IPv6 network. '
+ 'Did you pass in a bytes (str in Python 2) instead of'
+ ' a unicode object?' % address)
+
+ raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
+ address)
+
+
+def ip_interface(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Interface or IPv6Interface object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address.
+
+ Notes:
+ The IPv?Interface classes describe an Address on a particular
+ Network, so they're basically a combination of both the Address
+ and Network classes.
+
+ """
+ try:
+ return IPv4Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
+ address)
+
+
+def v4_int_to_packed(address):
+ """Represent an address as 4 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv4 IP address.
+
+ Returns:
+ The integer address packed as 4 bytes in network (big-endian) order.
+
+ Raises:
+ ValueError: If the integer is negative or too large to be an
+ IPv4 IP address.
+
+ """
+ try:
+ return _compat_to_bytes(address, 4, 'big')
+ except (struct.error, OverflowError):
+ raise ValueError("Address negative or too large for IPv4")
+
+
+def v6_int_to_packed(address):
+ """Represent an address as 16 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv6 IP address.
+
+ Returns:
+ The integer address packed as 16 bytes in network (big-endian) order.
+
+ """
+ try:
+ return _compat_to_bytes(address, 16, 'big')
+ except (struct.error, OverflowError):
+ raise ValueError("Address negative or too large for IPv6")
+
+
+def _split_optional_netmask(address):
+ """Helper to split the netmask and raise AddressValueError if needed"""
+ addr = _compat_str(address).split('/')
+ if len(addr) > 2:
+ raise AddressValueError("Only one '/' permitted in %r" % address)
+ return addr
+
+
+def _find_address_range(addresses):
+ """Find a sequence of sorted deduplicated IPv#Address.
+
+ Args:
+ addresses: a list of IPv#Address objects.
+
+ Yields:
+ A tuple containing the first and last IP addresses in the sequence.
+
+ """
+ it = iter(addresses)
+ first = last = next(it) # pylint: disable=stop-iteration-return
+ for ip in it:
+ if ip._ip != last._ip + 1:
+ yield first, last
+ first = ip
+ last = ip
+ yield first, last
+
+
+def _count_righthand_zero_bits(number, bits):
+ """Count the number of zero bits on the right hand side.
+
+ Args:
+ number: an integer.
+ bits: maximum number of bits to count.
+
+ Returns:
+ The number of zero bits on the right hand side of the number.
+
+ """
+ if number == 0:
+ return bits
+ return min(bits, _compat_bit_length(~number & (number - 1)))
+
+
+def summarize_address_range(first, last):
+ """Summarize a network range given the first and last IP addresses.
+
+ Example:
+ >>> list(summarize_address_range(IPv4Address('192.0.2.0'),
+ ... IPv4Address('192.0.2.130')))
+ ... #doctest: +NORMALIZE_WHITESPACE
+ [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
+ IPv4Network('192.0.2.130/32')]
+
+ Args:
+ first: the first IPv4Address or IPv6Address in the range.
+ last: the last IPv4Address or IPv6Address in the range.
+
+ Returns:
+ An iterator of the summarized IPv(4|6) network objects.
+
+ Raise:
+ TypeError:
+ If the first and last objects are not IP addresses.
+ If the first and last objects are not the same version.
+ ValueError:
+ If the last object is not greater than the first.
+ If the version of the first address is not 4 or 6.
+
+ """
+ if (not (isinstance(first, _BaseAddress) and
+ isinstance(last, _BaseAddress))):
+ raise TypeError('first and last must be IP addresses, not networks')
+ if first.version != last.version:
+ raise TypeError("%s and %s are not of the same version" % (
+ first, last))
+ if first > last:
+ raise ValueError('last IP address must be greater than first')
+
+ if first.version == 4:
+ ip = IPv4Network
+ elif first.version == 6:
+ ip = IPv6Network
+ else:
+ raise ValueError('unknown IP version')
+
+ ip_bits = first._max_prefixlen
+ first_int = first._ip
+ last_int = last._ip
+ while first_int <= last_int:
+ nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
+ _compat_bit_length(last_int - first_int + 1) - 1)
+ net = ip((first_int, ip_bits - nbits))
+ yield net
+ first_int += 1 << nbits
+ if first_int - 1 == ip._ALL_ONES:
+ break
+
+
+def _collapse_addresses_internal(addresses):
+ """Loops through the addresses, collapsing concurrent netblocks.
+
+ Example:
+
+ ip1 = IPv4Network('192.0.2.0/26')
+ ip2 = IPv4Network('192.0.2.64/26')
+ ip3 = IPv4Network('192.0.2.128/26')
+ ip4 = IPv4Network('192.0.2.192/26')
+
+ _collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ This shouldn't be called directly; it is called via
+ collapse_addresses([]).
+
+ Args:
+ addresses: A list of IPv4Network's or IPv6Network's
+
+ Returns:
+ A list of IPv4Network's or IPv6Network's depending on what we were
+ passed.
+
+ """
+ # First merge
+ to_merge = list(addresses)
+ subnets = {}
+ while to_merge:
+ net = to_merge.pop()
+ supernet = net.supernet()
+ existing = subnets.get(supernet)
+ if existing is None:
+ subnets[supernet] = net
+ elif existing != net:
+ # Merge consecutive subnets
+ del subnets[supernet]
+ to_merge.append(supernet)
+ # Then iterate over resulting networks, skipping subsumed subnets
+ last = None
+ for net in sorted(subnets.values()):
+ if last is not None:
+ # Since they are sorted,
+ # last.network_address <= net.network_address is a given.
+ if last.broadcast_address >= net.broadcast_address:
+ continue
+ yield net
+ last = net
+
+
+def collapse_addresses(addresses):
+ """Collapse a list of IP objects.
+
+ Example:
+ collapse_addresses([IPv4Network('192.0.2.0/25'),
+ IPv4Network('192.0.2.128/25')]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ Args:
+ addresses: An iterator of IPv4Network or IPv6Network objects.
+
+ Returns:
+ An iterator of the collapsed IPv(4|6)Network objects.
+
+ Raises:
+ TypeError: If passed a list of mixed version objects.
+
+ """
+ addrs = []
+ ips = []
+ nets = []
+
+ # split IP addresses and networks
+ for ip in addresses:
+ if isinstance(ip, _BaseAddress):
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ ip, ips[-1]))
+ ips.append(ip)
+ elif ip._prefixlen == ip._max_prefixlen:
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ ip, ips[-1]))
+ try:
+ ips.append(ip.ip)
+ except AttributeError:
+ ips.append(ip.network_address)
+ else:
+ if nets and nets[-1]._version != ip._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ ip, nets[-1]))
+ nets.append(ip)
+
+ # sort and dedup
+ ips = sorted(set(ips))
+
+ # find consecutive address ranges in the sorted sequence and summarize them
+ if ips:
+ for first, last in _find_address_range(ips):
+ addrs.extend(summarize_address_range(first, last))
+
+ return _collapse_addresses_internal(addrs + nets)
+
+
+def get_mixed_type_key(obj):
+ """Return a key suitable for sorting between networks and addresses.
+
+ Address and Network objects are not sortable by default; they're
+ fundamentally different so the expression
+
+ IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
+
+ doesn't make any sense. There are some times however, where you may wish
+ to have ipaddress sort these for you anyway. If you need to do this, you
+ can use this function as the key= argument to sorted().
+
+ Args:
+ obj: either a Network or Address object.
+ Returns:
+ appropriate key.
+
+ """
+ if isinstance(obj, _BaseNetwork):
+ return obj._get_networks_key()
+ elif isinstance(obj, _BaseAddress):
+ return obj._get_address_key()
+ return NotImplemented
+
+
+class _IPAddressBase(_TotalOrderingMixin):
+
+ """The mother class."""
+
+ __slots__ = ()
+
+ @property
+ def exploded(self):
+ """Return the longhand version of the IP address as a string."""
+ return self._explode_shorthand_ip_string()
+
+ @property
+ def compressed(self):
+ """Return the shorthand version of the IP address as a string."""
+ return _compat_str(self)
+
+ @property
+ def reverse_pointer(self):
+ """The name of the reverse DNS pointer for the IP address, e.g.:
+ >>> ipaddress.ip_address("127.0.0.1").reverse_pointer
+ '1.0.0.127.in-addr.arpa'
+ >>> ipaddress.ip_address("2001:db8::1").reverse_pointer
+ '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
+
+ """
+ return self._reverse_pointer()
+
+ @property
+ def version(self):
+ msg = '%200s has no version specified' % (type(self),)
+ raise NotImplementedError(msg)
+
+ def _check_int_address(self, address):
+ if address < 0:
+ msg = "%d (< 0) is not permitted as an IPv%d address"
+ raise AddressValueError(msg % (address, self._version))
+ if address > self._ALL_ONES:
+ msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
+ raise AddressValueError(msg % (address, self._max_prefixlen,
+ self._version))
+
+ def _check_packed_address(self, address, expected_len):
+ address_len = len(address)
+ if address_len != expected_len:
+ msg = (
+ '%r (len %d != %d) is not permitted as an IPv%d address. '
+ 'Did you pass in a bytes (str in Python 2) instead of'
+ ' a unicode object?')
+ raise AddressValueError(msg % (address, address_len,
+ expected_len, self._version))
+
+ @classmethod
+ def _ip_int_from_prefix(cls, prefixlen):
+ """Turn the prefix length into a bitwise netmask
+
+ Args:
+ prefixlen: An integer, the prefix length.
+
+ Returns:
+ An integer.
+
+ """
+ return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
+
+ @classmethod
+ def _prefix_from_ip_int(cls, ip_int):
+ """Return prefix length from the bitwise netmask.
+
+ Args:
+ ip_int: An integer, the netmask in expanded bitwise format
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ ValueError: If the input intermingles zeroes & ones
+ """
+ trailing_zeroes = _count_righthand_zero_bits(ip_int,
+ cls._max_prefixlen)
+ prefixlen = cls._max_prefixlen - trailing_zeroes
+ leading_ones = ip_int >> trailing_zeroes
+ all_ones = (1 << prefixlen) - 1
+ if leading_ones != all_ones:
+ byteslen = cls._max_prefixlen // 8
+ details = _compat_to_bytes(ip_int, byteslen, 'big')
+ msg = 'Netmask pattern %r mixes zeroes & ones'
+ raise ValueError(msg % details)
+ return prefixlen
+
+ @classmethod
+ def _report_invalid_netmask(cls, netmask_str):
+ msg = '%r is not a valid netmask' % netmask_str
+ raise NetmaskValueError(msg)
+
+ @classmethod
+ def _prefix_from_prefix_string(cls, prefixlen_str):
+ """Return prefix length from a numeric string
+
+ Args:
+ prefixlen_str: The string to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask
+ """
+ # int allows a leading +/- as well as surrounding whitespace,
+ # so we ensure that isn't the case
+ if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
+ cls._report_invalid_netmask(prefixlen_str)
+ try:
+ prefixlen = int(prefixlen_str)
+ except ValueError:
+ cls._report_invalid_netmask(prefixlen_str)
+ if not (0 <= prefixlen <= cls._max_prefixlen):
+ cls._report_invalid_netmask(prefixlen_str)
+ return prefixlen
+
+ @classmethod
+ def _prefix_from_ip_string(cls, ip_str):
+ """Turn a netmask/hostmask string into a prefix length
+
+ Args:
+ ip_str: The netmask/hostmask to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask/hostmask
+ """
+ # Parse the netmask/hostmask like an IP address.
+ try:
+ ip_int = cls._ip_int_from_string(ip_str)
+ except AddressValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
+ # Note that the two ambiguous cases (all-ones and all-zeroes) are
+ # treated as netmasks.
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ pass
+
+ # Invert the bits, and try matching a /0+1+/ hostmask instead.
+ ip_int ^= cls._ALL_ONES
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ def __reduce__(self):
+ return self.__class__, (_compat_str(self),)
+
+
+class _BaseAddress(_IPAddressBase):
+
+ """A generic IP object.
+
+ This IP class contains the version independent methods which are
+ used by single IP addresses.
+ """
+
+ __slots__ = ()
+
+ def __int__(self):
+ return self._ip
+
+ def __eq__(self, other):
+ try:
+ return (self._ip == other._ip and
+ self._version == other._version)
+ except AttributeError:
+ return NotImplemented
+
+ def __lt__(self, other):
+ if not isinstance(other, _IPAddressBase):
+ return NotImplemented
+ if not isinstance(other, _BaseAddress):
+ raise TypeError('%s and %s are not of the same type' % (
+ self, other))
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same version' % (
+ self, other))
+ if self._ip != other._ip:
+ return self._ip < other._ip
+ return False
+
+ # Shorthand for Integer addition and subtraction. This is not
+ # meant to ever support addition/subtraction of addresses.
+ def __add__(self, other):
+ if not isinstance(other, _compat_int_types):
+ return NotImplemented
+ return self.__class__(int(self) + other)
+
+ def __sub__(self, other):
+ if not isinstance(other, _compat_int_types):
+ return NotImplemented
+ return self.__class__(int(self) - other)
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
+
+ def __str__(self):
+ return _compat_str(self._string_from_ip_int(self._ip))
+
+ def __hash__(self):
+ return hash(hex(int(self._ip)))
+
+ def _get_address_key(self):
+ return (self._version, self)
+
+ def __reduce__(self):
+ return self.__class__, (self._ip,)
+
+
+class _BaseNetwork(_IPAddressBase):
+
+ """A generic IP network object.
+
+ This IP class contains the version independent methods which are
+ used by networks.
+
+ """
+ def __init__(self, address):
+ self._cache = {}
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
+
+ def __str__(self):
+ return '%s/%d' % (self.network_address, self.prefixlen)
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the network
+ or broadcast addresses.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network + 1, broadcast):
+ yield self._address_class(x)
+
+ def __iter__(self):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network, broadcast + 1):
+ yield self._address_class(x)
+
+ def __getitem__(self, n):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ if n >= 0:
+ if network + n > broadcast:
+ raise IndexError('address out of range')
+ return self._address_class(network + n)
+ else:
+ n += 1
+ if broadcast + n < network:
+ raise IndexError('address out of range')
+ return self._address_class(broadcast + n)
+
+ def __lt__(self, other):
+ if not isinstance(other, _IPAddressBase):
+ return NotImplemented
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError('%s and %s are not of the same type' % (
+ self, other))
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same version' % (
+ self, other))
+ if self.network_address != other.network_address:
+ return self.network_address < other.network_address
+ if self.netmask != other.netmask:
+ return self.netmask < other.netmask
+ return False
+
+ def __eq__(self, other):
+ try:
+ return (self._version == other._version and
+ self.network_address == other.network_address and
+ int(self.netmask) == int(other.netmask))
+ except AttributeError:
+ return NotImplemented
+
+ def __hash__(self):
+ return hash(int(self.network_address) ^ int(self.netmask))
+
+ def __contains__(self, other):
+ # always false if one is v4 and the other is v6.
+ if self._version != other._version:
+ return False
+ # dealing with another network.
+ if isinstance(other, _BaseNetwork):
+ return False
+ # dealing with another address
+ else:
+ # address
+ return (int(self.network_address) <= int(other._ip) <=
+ int(self.broadcast_address))
+
+ def overlaps(self, other):
+ """Tell if self is partly contained in other."""
+ return self.network_address in other or (
+ self.broadcast_address in other or (
+ other.network_address in self or (
+ other.broadcast_address in self)))
+
+ @property
+ def broadcast_address(self):
+ x = self._cache.get('broadcast_address')
+ if x is None:
+ x = self._address_class(int(self.network_address) |
+ int(self.hostmask))
+ self._cache['broadcast_address'] = x
+ return x
+
+ @property
+ def hostmask(self):
+ x = self._cache.get('hostmask')
+ if x is None:
+ x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
+ self._cache['hostmask'] = x
+ return x
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%d' % (self.network_address, self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (self.network_address, self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (self.network_address, self.hostmask)
+
+ @property
+ def num_addresses(self):
+ """Number of hosts in the current subnet."""
+ return int(self.broadcast_address) - int(self.network_address) + 1
+
+ @property
+ def _address_class(self):
+ # Returning bare address objects (rather than interfaces) allows for
+ # more consistent behaviour across the network address, broadcast
+ # address and individual host addresses.
+ msg = '%200s has no associated address class' % (type(self),)
+ raise NotImplementedError(msg)
+
+ @property
+ def prefixlen(self):
+ return self._prefixlen
+
+ def address_exclude(self, other):
+ """Remove an address from a larger block.
+
+ For example:
+
+ addr1 = ip_network('192.0.2.0/28')
+ addr2 = ip_network('192.0.2.1/32')
+ list(addr1.address_exclude(addr2)) =
+ [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
+ IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
+
+ or IPv6:
+
+ addr1 = ip_network('2001:db8::1/32')
+ addr2 = ip_network('2001:db8::1/128')
+ list(addr1.address_exclude(addr2)) =
+ [ip_network('2001:db8::1/128'),
+ ip_network('2001:db8::2/127'),
+ ip_network('2001:db8::4/126'),
+ ip_network('2001:db8::8/125'),
+ ...
+ ip_network('2001:db8:8000::/33')]
+
+ Args:
+ other: An IPv4Network or IPv6Network object of the same type.
+
+ Returns:
+ An iterator of the IPv(4|6)Network objects which is self
+ minus other.
+
+ Raises:
+ TypeError: If self and other are of differing address
+ versions, or if other is not a network object.
+ ValueError: If other is not completely contained by self.
+
+ """
+ if not self._version == other._version:
+ raise TypeError("%s and %s are not of the same version" % (
+ self, other))
+
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError("%s is not a network object" % other)
+
+ if not other.subnet_of(self):
+ raise ValueError('%s not contained in %s' % (other, self))
+ if other == self:
+ return
+
+ # Make sure we're comparing the network of other.
+ other = other.__class__('%s/%s' % (other.network_address,
+ other.prefixlen))
+
+ s1, s2 = self.subnets()
+ while s1 != other and s2 != other:
+ if other.subnet_of(s1):
+ yield s2
+ s1, s2 = s1.subnets()
+ elif other.subnet_of(s2):
+ yield s1
+ s1, s2 = s2.subnets()
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError('Error performing exclusion: '
+ 's1: %s s2: %s other: %s' %
+ (s1, s2, other))
+ if s1 == other:
+ yield s2
+ elif s2 == other:
+ yield s1
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError('Error performing exclusion: '
+ 's1: %s s2: %s other: %s' %
+ (s1, s2, other))
+
+ def compare_networks(self, other):
+ """Compare two IP objects.
+
+ This is only concerned about the comparison of the integer
+ representation of the network addresses. This means that the
+ host bits aren't considered at all in this method. If you want
+ to compare host bits, you can easily enough do a
+ 'HostA._ip < HostB._ip'
+
+ Args:
+ other: An IP object.
+
+ Returns:
+ If the IP versions of self and other are the same, returns:
+
+ -1 if self < other:
+ eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
+ IPv6Network('2001:db8::1000/124') <
+ IPv6Network('2001:db8::2000/124')
+ 0 if self == other
+ eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
+ IPv6Network('2001:db8::1000/124') ==
+ IPv6Network('2001:db8::1000/124')
+ 1 if self > other
+ eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
+ IPv6Network('2001:db8::2000/124') >
+ IPv6Network('2001:db8::1000/124')
+
+ Raises:
+ TypeError if the IP versions are different.
+
+ """
+ # does this need to raise a ValueError?
+ if self._version != other._version:
+ raise TypeError('%s and %s are not of the same type' % (
+ self, other))
+ # self._version == other._version below here:
+ if self.network_address < other.network_address:
+ return -1
+ if self.network_address > other.network_address:
+ return 1
+ # self.network_address == other.network_address below here:
+ if self.netmask < other.netmask:
+ return -1
+ if self.netmask > other.netmask:
+ return 1
+ return 0
+
+ def _get_networks_key(self):
+ """Network-only key function.
+
+ Returns an object that identifies this address' network and
+ netmask. This function is a suitable "key" argument for sorted()
+ and list.sort().
+
+ """
+ return (self._version, self.network_address, self.netmask)
+
+ def subnets(self, prefixlen_diff=1, new_prefix=None):
+ """The subnets which join to make the current subnet.
+
+ In the case that self contains only one IP
+ (self._prefixlen == 32 for IPv4 or self._prefixlen == 128
+ for IPv6), yield an iterator with just ourself.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length
+ should be increased by. This should not be set if
+ new_prefix is also set.
+ new_prefix: The desired new prefix length. This must be a
+ larger number (smaller prefix) than the existing prefix.
+ This should not be set if prefixlen_diff is also set.
+
+ Returns:
+ An iterator of IPv(4|6) objects.
+
+ Raises:
+ ValueError: The prefixlen_diff is too small or too large.
+ OR
+ prefixlen_diff and new_prefix are both set or new_prefix
+ is a smaller number than the current prefix (smaller
+ number means a larger network)
+
+ """
+ if self._prefixlen == self._max_prefixlen:
+ yield self
+ return
+
+ if new_prefix is not None:
+ if new_prefix < self._prefixlen:
+ raise ValueError('new prefix must be longer')
+ if prefixlen_diff != 1:
+ raise ValueError('cannot set prefixlen_diff and new_prefix')
+ prefixlen_diff = new_prefix - self._prefixlen
+
+ if prefixlen_diff < 0:
+ raise ValueError('prefix length diff must be > 0')
+ new_prefixlen = self._prefixlen + prefixlen_diff
+
+ if new_prefixlen > self._max_prefixlen:
+ raise ValueError(
+ 'prefix length diff %d is invalid for netblock %s' % (
+ new_prefixlen, self))
+
+ start = int(self.network_address)
+ end = int(self.broadcast_address) + 1
+ step = (int(self.hostmask) + 1) >> prefixlen_diff
+ for new_addr in _compat_range(start, end, step):
+ current = self.__class__((new_addr, new_prefixlen))
+ yield current
+
+ def supernet(self, prefixlen_diff=1, new_prefix=None):
+ """The supernet containing the current network.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length of
+ the network should be decreased by. For example, given a
+ /24 network and a prefixlen_diff of 3, a supernet with a
+ /21 netmask is returned.
+
+ Returns:
+ An IPv4 network object.
+
+ Raises:
+ ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
+ a negative prefix length.
+ OR
+ If prefixlen_diff and new_prefix are both set or new_prefix is a
+ larger number than the current prefix (larger number means a
+ smaller network)
+
+ """
+ if self._prefixlen == 0:
+ return self
+
+ if new_prefix is not None:
+ if new_prefix > self._prefixlen:
+ raise ValueError('new prefix must be shorter')
+ if prefixlen_diff != 1:
+ raise ValueError('cannot set prefixlen_diff and new_prefix')
+ prefixlen_diff = self._prefixlen - new_prefix
+
+ new_prefixlen = self.prefixlen - prefixlen_diff
+ if new_prefixlen < 0:
+ raise ValueError(
+ 'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
+ (self.prefixlen, prefixlen_diff))
+ return self.__class__((
+ int(self.network_address) & (int(self.netmask) << prefixlen_diff),
+ new_prefixlen))
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return (self.network_address.is_multicast and
+ self.broadcast_address.is_multicast)
+
+ @staticmethod
+ def _is_subnet_of(a, b):
+ try:
+ # Always false if one is v4 and the other is v6.
+ if a._version != b._version:
+ raise TypeError("%s and %s are not of the same version" % (a, b))
+ return (b.network_address <= a.network_address and
+ b.broadcast_address >= a.broadcast_address)
+ except AttributeError:
+ raise TypeError("Unable to test subnet containment "
+ "between %s and %s" % (a, b))
+
+ def subnet_of(self, other):
+ """Return True if this network is a subnet of other."""
+ return self._is_subnet_of(self, other)
+
+ def supernet_of(self, other):
+ """Return True if this network is a supernet of other."""
+ return self._is_subnet_of(other, self)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return (self.network_address.is_reserved and
+ self.broadcast_address.is_reserved)
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return (self.network_address.is_link_local and
+ self.broadcast_address.is_link_local)
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return (self.network_address.is_private and
+ self.broadcast_address.is_private)
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return (self.network_address.is_unspecified and
+ self.broadcast_address.is_unspecified)
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return (self.network_address.is_loopback and
+ self.broadcast_address.is_loopback)
+
+
+class _BaseV4(object):
+
+ """Base IPv4 object.
+
+ The following methods are used by IPv4 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 4
+ # Equivalent to 255.255.255.255 or 32 bits of 1's.
+ _ALL_ONES = (2 ** IPV4LENGTH) - 1
+ _DECIMAL_DIGITS = frozenset('0123456789')
+
+ # the valid octets for host and netmasks. only useful for IPv4.
+ _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
+
+ _max_prefixlen = IPV4LENGTH
+ # There are only a handful of valid v4 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ def _explode_shorthand_ip_string(self):
+ return _compat_str(self)
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, _compat_int_types):
+ prefixlen = arg
+ else:
+ try:
+ # Check for a netmask in prefix length form
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ except NetmaskValueError:
+ # Check for a netmask or hostmask in dotted-quad form.
+ # This may raise NetmaskValueError.
+ prefixlen = cls._prefix_from_ip_string(arg)
+ netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn the given IP string into an integer for comparison.
+
+ Args:
+ ip_str: A string, the IP ip_str.
+
+ Returns:
+ The IP ip_str as an integer.
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv4 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError('Address cannot be empty')
+
+ octets = ip_str.split('.')
+ if len(octets) != 4:
+ raise AddressValueError("Expected 4 octets in %r" % ip_str)
+
+ try:
+ return _compat_int_from_byte_vals(
+ map(cls._parse_octet, octets), 'big')
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+
+ @classmethod
+ def _parse_octet(cls, octet_str):
+ """Convert a decimal octet into an integer.
+
+ Args:
+ octet_str: A string, the number to parse.
+
+ Returns:
+ The octet as an integer.
+
+ Raises:
+ ValueError: if the octet isn't strictly a decimal from [0..255].
+
+ """
+ if not octet_str:
+ raise ValueError("Empty octet not permitted")
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not cls._DECIMAL_DIGITS.issuperset(octet_str):
+ msg = "Only decimal digits permitted in %r"
+ raise ValueError(msg % octet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(octet_str) > 3:
+ msg = "At most 3 characters permitted in %r"
+ raise ValueError(msg % octet_str)
+ # Convert to integer (we know digits are legal)
+ octet_int = int(octet_str, 10)
+ # Any octets that look like they *might* be written in octal,
+ # and which don't look exactly the same in both octal and
+ # decimal are rejected as ambiguous
+ if octet_int > 7 and octet_str[0] == '0':
+ msg = "Ambiguous (octal/decimal) value in %r not permitted"
+ raise ValueError(msg % octet_str)
+ if octet_int > 255:
+ raise ValueError("Octet %d (> 255) not permitted" % octet_int)
+ return octet_int
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int):
+ """Turns a 32-bit integer into dotted decimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ The IP address as a string in dotted decimal notation.
+
+ """
+ return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
+ if isinstance(b, bytes)
+ else b)
+ for b in _compat_to_bytes(ip_int, 4, 'big'))
+
+ def _is_hostmask(self, ip_str):
+ """Test if the IP string is a hostmask (rather than a netmask).
+
+ Args:
+ ip_str: A string, the potential hostmask.
+
+ Returns:
+ A boolean, True if the IP string is a hostmask.
+
+ """
+ bits = ip_str.split('.')
+ try:
+ parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
+ except ValueError:
+ return False
+ if len(parts) != len(bits):
+ return False
+ if parts[0] < parts[-1]:
+ return True
+ return False
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv4 address.
+
+ This implements the method described in RFC1035 3.5.
+
+ """
+ reverse_octets = _compat_str(self).split('.')[::-1]
+ return '.'.join(reverse_octets) + '.in-addr.arpa'
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv4Address(_BaseV4, _BaseAddress):
+
+ """Represent and manipulate single IPv4 Addresses."""
+
+ __slots__ = ('_ip', '__weakref__')
+
+ def __init__(self, address):
+
+ """
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv4Address('192.0.2.1') == IPv4Address(3221225985).
+ or, more generally
+ IPv4Address(int(IPv4Address('192.0.2.1'))) ==
+ IPv4Address('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, _compat_int_types):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 4)
+ bvs = _compat_bytes_to_byte_vals(address)
+ self._ip = _compat_int_from_byte_vals(bvs, 'big')
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = _compat_str(address)
+ if '/' in addr_str:
+ raise AddressValueError("Unexpected '/' in %r" % address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v4_int_to_packed(self._ip)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within the
+ reserved IPv4 Network range.
+
+ """
+ return self in self._constants._reserved_network
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return any(self in net for net in self._constants._private_networks)
+
+ @property
+ def is_global(self):
+ return (
+ self not in self._constants._public_network and
+ not self.is_private)
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is multicast.
+ See RFC 3171 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 5735 3.
+
+ """
+ return self == self._constants._unspecified_address
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback per RFC 3330.
+
+ """
+ return self in self._constants._loopback_network
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is link-local per RFC 3927.
+
+ """
+ return self in self._constants._linklocal_network
+
+
+class IPv4Interface(IPv4Address):
+
+ def __init__(self, address):
+ if isinstance(address, (bytes, _compat_int_types)):
+ IPv4Address.__init__(self, address)
+ self.network = IPv4Network(self._ip)
+ self._prefixlen = self._max_prefixlen
+ return
+
+ if isinstance(address, tuple):
+ IPv4Address.__init__(self, address[0])
+ if len(address) > 1:
+ self._prefixlen = int(address[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+
+ self.network = IPv4Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+ return
+
+ addr = _split_optional_netmask(address)
+ IPv4Address.__init__(self, addr[0])
+
+ self.network = IPv4Network(address, strict=False)
+ self._prefixlen = self.network._prefixlen
+
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+
+ def __str__(self):
+ return '%s/%d' % (self._string_from_ip_int(self._ip),
+ self.network.prefixlen)
+
+ def __eq__(self, other):
+ address_equal = IPv4Address.__eq__(self, other)
+ if not address_equal or address_equal is NotImplemented:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv4Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (self.network < other.network or
+ self.network == other.network and address_less)
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv4Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.hostmask)
+
+
+class IPv4Network(_BaseV4, _BaseNetwork):
+
+ """This class represents and manipulates 32-bit IPv4 network + addresses..
+
+ Attributes: [examples for IPv4Network('192.0.2.0/27')]
+ .network_address: IPv4Address('192.0.2.0')
+ .hostmask: IPv4Address('0.0.0.31')
+ .broadcast_address: IPv4Address('192.0.2.32')
+ .netmask: IPv4Address('255.255.255.224')
+ .prefixlen: 27
+
+ """
+ # Class to use when creating address objects
+ _address_class = IPv4Address
+
+ def __init__(self, address, strict=True):
+
+ """Instantiate a new IPv4 network object.
+
+ Args:
+ address: A string or integer representing the IP [& network].
+ '192.0.2.0/24'
+ '192.0.2.0/255.255.255.0'
+ '192.0.0.2/0.0.0.255'
+ are all functionally the same in IPv4. Similarly,
+ '192.0.2.1'
+ '192.0.2.1/255.255.255.255'
+ '192.0.2.1/32'
+ are also functionally equivalent. That is to say, failing to
+ provide a subnetmask will create an object with a mask of /32.
+
+ If the mask (portion after the / in the argument) is given in
+ dotted quad form, it is treated as a netmask if it starts with a
+ non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
+ starts with a zero field (e.g. 0.255.255.255 == /8), with the
+ single exception of an all-zero mask which is treated as a
+ netmask == /0. If no mask is given, a default of /32 is used.
+
+ Additionally, an integer can be passed, so
+ IPv4Network('192.0.2.1') == IPv4Network(3221225985)
+ or, more generally
+ IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
+ IPv4Interface('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv4 address.
+ ValueError: If strict is True and a network address is not
+ supplied.
+
+ """
+ _BaseNetwork.__init__(self, address)
+
+ # Constructing from a packed address or integer
+ if isinstance(address, (_compat_int_types, bytes)):
+ self.network_address = IPv4Address(address)
+ self.netmask, self._prefixlen = self._make_netmask(
+ self._max_prefixlen)
+ # fixme: address/network test here.
+ return
+
+ if isinstance(address, tuple):
+ if len(address) > 1:
+ arg = address[1]
+ else:
+ # We weren't given an address[1]
+ arg = self._max_prefixlen
+ self.network_address = IPv4Address(address[0])
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError('%s has host bits set' % self)
+ else:
+ self.network_address = IPv4Address(packed &
+ int(self.netmask))
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = _split_optional_netmask(address)
+ self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
+
+ if len(addr) == 2:
+ arg = addr[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+
+ if strict:
+ if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
+ self.network_address):
+ raise ValueError('%s has host bits set' % self)
+ self.network_address = IPv4Address(int(self.network_address) &
+ int(self.netmask))
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return (not (self.network_address in IPv4Network('100.64.0.0/10') and
+ self.broadcast_address in IPv4Network('100.64.0.0/10')) and
+ not self.is_private)
+
+
+class _IPv4Constants(object):
+
+ _linklocal_network = IPv4Network('169.254.0.0/16')
+
+ _loopback_network = IPv4Network('127.0.0.0/8')
+
+ _multicast_network = IPv4Network('224.0.0.0/4')
+
+ _public_network = IPv4Network('100.64.0.0/10')
+
+ _private_networks = [
+ IPv4Network('0.0.0.0/8'),
+ IPv4Network('10.0.0.0/8'),
+ IPv4Network('127.0.0.0/8'),
+ IPv4Network('169.254.0.0/16'),
+ IPv4Network('172.16.0.0/12'),
+ IPv4Network('192.0.0.0/29'),
+ IPv4Network('192.0.0.170/31'),
+ IPv4Network('192.0.2.0/24'),
+ IPv4Network('192.168.0.0/16'),
+ IPv4Network('198.18.0.0/15'),
+ IPv4Network('198.51.100.0/24'),
+ IPv4Network('203.0.113.0/24'),
+ IPv4Network('240.0.0.0/4'),
+ IPv4Network('255.255.255.255/32'),
+ ]
+
+ _reserved_network = IPv4Network('240.0.0.0/4')
+
+ _unspecified_address = IPv4Address('0.0.0.0')
+
+
+IPv4Address._constants = _IPv4Constants
+
+
+class _BaseV6(object):
+
+ """Base IPv6 object.
+
+ The following methods are used by IPv6 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 6
+ _ALL_ONES = (2 ** IPV6LENGTH) - 1
+ _HEXTET_COUNT = 8
+ _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
+ _max_prefixlen = IPV6LENGTH
+
+ # There are only a bunch of valid v6 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, _compat_int_types):
+ prefixlen = arg
+ else:
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn an IPv6 ip_str into an integer.
+
+ Args:
+ ip_str: A string, the IPv6 ip_str.
+
+ Returns:
+ An int, the IPv6 address
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv6 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError('Address cannot be empty')
+
+ parts = ip_str.split(':')
+
+ # An IPv6 address needs at least 2 colons (3 parts).
+ _min_parts = 3
+ if len(parts) < _min_parts:
+ msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
+ raise AddressValueError(msg)
+
+ # If the address has an IPv4-style suffix, convert it to hexadecimal.
+ if '.' in parts[-1]:
+ try:
+ ipv4_int = IPv4Address(parts.pop())._ip
+ except AddressValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+ parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
+ parts.append('%x' % (ipv4_int & 0xFFFF))
+
+ # An IPv6 address can't have more than 8 colons (9 parts).
+ # The extra colon comes from using the "::" notation for a single
+ # leading or trailing zero part.
+ _max_parts = cls._HEXTET_COUNT + 1
+ if len(parts) > _max_parts:
+ msg = "At most %d colons permitted in %r" % (
+ _max_parts - 1, ip_str)
+ raise AddressValueError(msg)
+
+ # Disregarding the endpoints, find '::' with nothing in between.
+ # This indicates that a run of zeroes has been skipped.
+ skip_index = None
+ for i in _compat_range(1, len(parts) - 1):
+ if not parts[i]:
+ if skip_index is not None:
+ # Can't have more than one '::'
+ msg = "At most one '::' permitted in %r" % ip_str
+ raise AddressValueError(msg)
+ skip_index = i
+
+ # parts_hi is the number of parts to copy from above/before the '::'
+ # parts_lo is the number of parts to copy from below/after the '::'
+ if skip_index is not None:
+ # If we found a '::', then check if it also covers the endpoints.
+ parts_hi = skip_index
+ parts_lo = len(parts) - skip_index - 1
+ if not parts[0]:
+ parts_hi -= 1
+ if parts_hi:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ parts_lo -= 1
+ if parts_lo:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
+ if parts_skipped < 1:
+ msg = "Expected at most %d other parts with '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
+ else:
+ # Otherwise, allocate the entire address to parts_hi. The
+ # endpoints could still be empty, but _parse_hextet() will check
+ # for that.
+ if len(parts) != cls._HEXTET_COUNT:
+ msg = "Exactly %d parts expected without '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
+ if not parts[0]:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_hi = len(parts)
+ parts_lo = 0
+ parts_skipped = 0
+
+ try:
+ # Now, parse the hextets into a 128-bit integer.
+ ip_int = 0
+ for i in range(parts_hi):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ ip_int <<= 16 * parts_skipped
+ for i in range(-parts_lo, 0):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ return ip_int
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+
+ @classmethod
+ def _parse_hextet(cls, hextet_str):
+ """Convert an IPv6 hextet string into an integer.
+
+ Args:
+ hextet_str: A string, the number to parse.
+
+ Returns:
+ The hextet as an integer.
+
+ Raises:
+ ValueError: if the input isn't strictly a hex number from
+ [0..FFFF].
+
+ """
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not cls._HEX_DIGITS.issuperset(hextet_str):
+ raise ValueError("Only hex digits permitted in %r" % hextet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(hextet_str) > 4:
+ msg = "At most 4 characters permitted in %r"
+ raise ValueError(msg % hextet_str)
+ # Length check means we can skip checking the integer value
+ return int(hextet_str, 16)
+
+ @classmethod
+ def _compress_hextets(cls, hextets):
+ """Compresses a list of hextets.
+
+ Compresses a list of strings, replacing the longest continuous
+ sequence of "0" in the list with "" and adding empty strings at
+ the beginning or at the end of the string such that subsequently
+ calling ":".join(hextets) will produce the compressed version of
+ the IPv6 address.
+
+ Args:
+ hextets: A list of strings, the hextets to compress.
+
+ Returns:
+ A list of strings.
+
+ """
+ best_doublecolon_start = -1
+ best_doublecolon_len = 0
+ doublecolon_start = -1
+ doublecolon_len = 0
+ for index, hextet in enumerate(hextets):
+ if hextet == '0':
+ doublecolon_len += 1
+ if doublecolon_start == -1:
+ # Start of a sequence of zeros.
+ doublecolon_start = index
+ if doublecolon_len > best_doublecolon_len:
+ # This is the longest sequence of zeros so far.
+ best_doublecolon_len = doublecolon_len
+ best_doublecolon_start = doublecolon_start
+ else:
+ doublecolon_len = 0
+ doublecolon_start = -1
+
+ if best_doublecolon_len > 1:
+ best_doublecolon_end = (best_doublecolon_start +
+ best_doublecolon_len)
+ # For zeros at the end of the address.
+ if best_doublecolon_end == len(hextets):
+ hextets += ['']
+ hextets[best_doublecolon_start:best_doublecolon_end] = ['']
+ # For zeros at the beginning of the address.
+ if best_doublecolon_start == 0:
+ hextets = [''] + hextets
+
+ return hextets
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int=None):
+ """Turns a 128-bit integer into hexadecimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ A string, the hexadecimal representation of the address.
+
+ Raises:
+ ValueError: The address is bigger than 128 bits of all ones.
+
+ """
+ if ip_int is None:
+ ip_int = int(cls._ip)
+
+ if ip_int > cls._ALL_ONES:
+ raise ValueError('IPv6 address is too large')
+
+ hex_str = '%032x' % ip_int
+ hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
+
+ hextets = cls._compress_hextets(hextets)
+ return ':'.join(hextets)
+
+ def _explode_shorthand_ip_string(self):
+ """Expand a shortened IPv6 address.
+
+ Args:
+ ip_str: A string, the IPv6 address.
+
+ Returns:
+ A string, the expanded IPv6 address.
+
+ """
+ if isinstance(self, IPv6Network):
+ ip_str = _compat_str(self.network_address)
+ elif isinstance(self, IPv6Interface):
+ ip_str = _compat_str(self.ip)
+ else:
+ ip_str = _compat_str(self)
+
+ ip_int = self._ip_int_from_string(ip_str)
+ hex_str = '%032x' % ip_int
+ parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
+ if isinstance(self, (_BaseNetwork, IPv6Interface)):
+ return '%s/%d' % (':'.join(parts), self._prefixlen)
+ return ':'.join(parts)
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv6 address.
+
+ This implements the method described in RFC3596 2.5.
+
+ """
+ reverse_chars = self.exploded[::-1].replace(':', '')
+ return '.'.join(reverse_chars) + '.ip6.arpa'
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv6Address(_BaseV6, _BaseAddress):
+
+ """Represent and manipulate single IPv6 Addresses."""
+
+ __slots__ = ('_ip', '__weakref__')
+
+ def __init__(self, address):
+ """Instantiate a new IPv6 address object.
+
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv6Address('2001:db8::') ==
+ IPv6Address(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Address(int(IPv6Address('2001:db8::'))) ==
+ IPv6Address('2001:db8::')
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, _compat_int_types):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 16)
+ bvs = _compat_bytes_to_byte_vals(address)
+ self._ip = _compat_int_from_byte_vals(bvs, 'big')
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = _compat_str(address)
+ if '/' in addr_str:
+ raise AddressValueError("Unexpected '/' in %r" % address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v6_int_to_packed(self._ip)
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return any(self in x for x in self._constants._reserved_networks)
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return self in self._constants._linklocal_network
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return self in self._constants._sitelocal_network
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv6-special-registry.
+
+ """
+ return any(self in net for net in self._constants._private_networks)
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, true if the address is not reserved per
+ iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return self._ip == 0
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return self._ip == 1
+
+ @property
+ def ipv4_mapped(self):
+ """Return the IPv4 mapped address.
+
+ Returns:
+ If the IPv6 address is a v4 mapped address, return the
+ IPv4 mapped address. Return None otherwise.
+
+ """
+ if (self._ip >> 32) != 0xFFFF:
+ return None
+ return IPv4Address(self._ip & 0xFFFFFFFF)
+
+ @property
+ def teredo(self):
+ """Tuple of embedded teredo IPs.
+
+ Returns:
+ Tuple of the (server, client) IPs or None if the address
+ doesn't appear to be a teredo address (doesn't start with
+ 2001::/32)
+
+ """
+ if (self._ip >> 96) != 0x20010000:
+ return None
+ return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
+ IPv4Address(~self._ip & 0xFFFFFFFF))
+
+ @property
+ def sixtofour(self):
+ """Return the IPv4 6to4 embedded address.
+
+ Returns:
+ The IPv4 6to4-embedded address if present or None if the
+ address doesn't appear to contain a 6to4 embedded address.
+
+ """
+ if (self._ip >> 112) != 0x2002:
+ return None
+ return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
+
+
+class IPv6Interface(IPv6Address):
+
+ def __init__(self, address):
+ if isinstance(address, (bytes, _compat_int_types)):
+ IPv6Address.__init__(self, address)
+ self.network = IPv6Network(self._ip)
+ self._prefixlen = self._max_prefixlen
+ return
+ if isinstance(address, tuple):
+ IPv6Address.__init__(self, address[0])
+ if len(address) > 1:
+ self._prefixlen = int(address[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+ self.network = IPv6Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+ return
+
+ addr = _split_optional_netmask(address)
+ IPv6Address.__init__(self, addr[0])
+ self.network = IPv6Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self._prefixlen = self.network._prefixlen
+ self.hostmask = self.network.hostmask
+
+ def __str__(self):
+ return '%s/%d' % (self._string_from_ip_int(self._ip),
+ self.network.prefixlen)
+
+ def __eq__(self, other):
+ address_equal = IPv6Address.__eq__(self, other)
+ if not address_equal or address_equal is NotImplemented:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv6Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (self.network < other.network or
+ self.network == other.network and address_less)
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv6Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return '%s/%s' % (self._string_from_ip_int(self._ip),
+ self.hostmask)
+
+ @property
+ def is_unspecified(self):
+ return self._ip == 0 and self.network.is_unspecified
+
+ @property
+ def is_loopback(self):
+ return self._ip == 1 and self.network.is_loopback
+
+
+class IPv6Network(_BaseV6, _BaseNetwork):
+
+ """This class represents and manipulates 128-bit IPv6 networks.
+
+ Attributes: [examples for IPv6('2001:db8::1000/124')]
+ .network_address: IPv6Address('2001:db8::1000')
+ .hostmask: IPv6Address('::f')
+ .broadcast_address: IPv6Address('2001:db8::100f')
+ .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
+ .prefixlen: 124
+
+ """
+
+ # Class to use when creating address objects
+ _address_class = IPv6Address
+
+ def __init__(self, address, strict=True):
+ """Instantiate a new IPv6 Network object.
+
+ Args:
+ address: A string or integer representing the IPv6 network or the
+ IP and prefix/netmask.
+ '2001:db8::/128'
+ '2001:db8:0000:0000:0000:0000:0000:0000/128'
+ '2001:db8::'
+ are all functionally the same in IPv6. That is to say,
+ failing to provide a subnetmask will create an object with
+ a mask of /128.
+
+ Additionally, an integer can be passed, so
+ IPv6Network('2001:db8::') ==
+ IPv6Network(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Network(int(IPv6Network('2001:db8::'))) ==
+ IPv6Network('2001:db8::')
+
+ strict: A boolean. If true, ensure that we have been passed
+ A true network address, eg, 2001:db8::1000/124 and not an
+ IP address on a network, eg, 2001:db8::1/124.
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv6 address.
+ ValueError: If strict was True and a network address was not
+ supplied.
+
+ """
+ _BaseNetwork.__init__(self, address)
+
+ # Efficient constructor from integer or packed address
+ if isinstance(address, (bytes, _compat_int_types)):
+ self.network_address = IPv6Address(address)
+ self.netmask, self._prefixlen = self._make_netmask(
+ self._max_prefixlen)
+ return
+
+ if isinstance(address, tuple):
+ if len(address) > 1:
+ arg = address[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+ self.network_address = IPv6Address(address[0])
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError('%s has host bits set' % self)
+ else:
+ self.network_address = IPv6Address(packed &
+ int(self.netmask))
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = _split_optional_netmask(address)
+
+ self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
+
+ if len(addr) == 2:
+ arg = addr[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+
+ if strict:
+ if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
+ self.network_address):
+ raise ValueError('%s has host bits set' % self)
+ self.network_address = IPv6Address(int(self.network_address) &
+ int(self.netmask))
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the
+ Subnet-Router anycast address.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network + 1, broadcast + 1):
+ yield self._address_class(x)
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return (self.network_address.is_site_local and
+ self.broadcast_address.is_site_local)
+
+
+class _IPv6Constants(object):
+
+ _linklocal_network = IPv6Network('fe80::/10')
+
+ _multicast_network = IPv6Network('ff00::/8')
+
+ _private_networks = [
+ IPv6Network('::1/128'),
+ IPv6Network('::/128'),
+ IPv6Network('::ffff:0:0/96'),
+ IPv6Network('100::/64'),
+ IPv6Network('2001::/23'),
+ IPv6Network('2001:2::/48'),
+ IPv6Network('2001:db8::/32'),
+ IPv6Network('2001:10::/28'),
+ IPv6Network('fc00::/7'),
+ IPv6Network('fe80::/10'),
+ ]
+
+ _reserved_networks = [
+ IPv6Network('::/8'), IPv6Network('100::/8'),
+ IPv6Network('200::/7'), IPv6Network('400::/6'),
+ IPv6Network('800::/5'), IPv6Network('1000::/4'),
+ IPv6Network('4000::/3'), IPv6Network('6000::/3'),
+ IPv6Network('8000::/3'), IPv6Network('A000::/3'),
+ IPv6Network('C000::/3'), IPv6Network('E000::/4'),
+ IPv6Network('F000::/5'), IPv6Network('F800::/6'),
+ IPv6Network('FE00::/9'),
+ ]
+
+ _sitelocal_network = IPv6Network('fec0::/10')
+
+
+IPv6Address._constants = _IPv6Constants
diff --git a/test/support/integration/plugins/module_utils/ec2.py b/test/support/integration/plugins/module_utils/ec2.py
new file mode 100644
index 0000000000..5599ee7ea3
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/ec2.py
@@ -0,0 +1,758 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import sys
+import traceback
+
+from ansible.module_utils.ansible_release import __version__
+from ansible.module_utils.basic import missing_required_lib, env_fallback
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.cloud import CloudRetry
+from ansible.module_utils.six import string_types, binary_type, text_type
+from ansible.module_utils.common.dict_transformations import (
+ camel_dict_to_snake_dict, snake_dict_to_camel_dict,
+ _camel_to_snake, _snake_to_camel,
+)
+
+BOTO_IMP_ERR = None
+try:
+ import boto
+ import boto.ec2 # boto does weird import stuff
+ HAS_BOTO = True
+except ImportError:
+ BOTO_IMP_ERR = traceback.format_exc()
+ HAS_BOTO = False
+
+BOTO3_IMP_ERR = None
+try:
+ import boto3
+ import botocore
+ HAS_BOTO3 = True
+except Exception:
+ BOTO3_IMP_ERR = traceback.format_exc()
+ HAS_BOTO3 = False
+
+try:
+ # Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also
+ # uses this (and it works as expected). Python 2.6 will trigger the ImportError.
+ from functools import cmp_to_key
+ PY3_COMPARISON = True
+except ImportError:
+ PY3_COMPARISON = False
+
+
+class AnsibleAWSError(Exception):
+ pass
+
+
+def _botocore_exception_maybe():
+ """
+ Allow for boto3 not being installed when using these utils by wrapping
+ botocore.exceptions instead of assigning from it directly.
+ """
+ if HAS_BOTO3:
+ return botocore.exceptions.ClientError
+ return type(None)
+
+
+class AWSRetry(CloudRetry):
+ base_class = _botocore_exception_maybe()
+
+ @staticmethod
+ def status_code_from_exception(error):
+ return error.response['Error']['Code']
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ # This list of failures is based on this API Reference
+ # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
+ #
+ # TooManyRequestsException comes from inside botocore when it
+ # does retrys, unfortunately however it does not try long
+ # enough to allow some services such as API Gateway to
+ # complete configuration. At the moment of writing there is a
+ # botocore/boto3 bug open to fix this.
+ #
+ # https://github.com/boto/boto3/issues/876 (and linked PRs etc)
+ retry_on = [
+ 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
+ 'InternalFailure', 'InternalError', 'TooManyRequestsException',
+ 'Throttling'
+ ]
+ if catch_extra_error_codes:
+ retry_on.extend(catch_extra_error_codes)
+
+ return response_code in retry_on
+
+
+def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
+ try:
+ return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
+ except ValueError as e:
+ module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
+ except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
+ botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
+ module.fail_json(msg=to_native(e))
+ except botocore.exceptions.NoRegionError as e:
+ module.fail_json(msg="The %s module requires a region and none was found in configuration, "
+ "environment variables or module parameters" % module._name)
+
+
+def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
+ profile = params.pop('profile_name', None)
+
+ if conn_type not in ['both', 'resource', 'client']:
+ raise ValueError('There is an issue in the calling code. You '
+ 'must specify either both, resource, or client to '
+ 'the conn_type parameter in the boto3_conn function '
+ 'call')
+
+ config = botocore.config.Config(
+ user_agent_extra='Ansible/{0}'.format(__version__),
+ )
+
+ if params.get('config') is not None:
+ config = config.merge(params.pop('config'))
+ if params.get('aws_config') is not None:
+ config = config.merge(params.pop('aws_config'))
+
+ session = boto3.session.Session(
+ profile_name=profile,
+ )
+
+ if conn_type == 'resource':
+ return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ elif conn_type == 'client':
+ return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
+ else:
+ client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
+ resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
+ return client, resource
+
+
+boto3_inventory_conn = _boto3_conn
+
+
+def boto_exception(err):
+ """
+ Extracts the error message from a boto exception.
+
+ :param err: Exception from boto
+ :return: Error message
+ """
+ if hasattr(err, 'error_message'):
+ error = err.error_message
+ elif hasattr(err, 'message'):
+ error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
+ else:
+ error = '%s: %s' % (Exception, err)
+
+ return error
+
+
+def aws_common_argument_spec():
+ return dict(
+ debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
+ ec2_url=dict(),
+ aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
+ aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
+ validate_certs=dict(default=True, type='bool'),
+ security_token=dict(aliases=['access_token'], no_log=True),
+ profile=dict(),
+ aws_config=dict(type='dict'),
+ )
+
+
+def ec2_argument_spec():
+ spec = aws_common_argument_spec()
+ spec.update(
+ dict(
+ region=dict(aliases=['aws_region', 'ec2_region']),
+ )
+ )
+ return spec
+
+
+def get_aws_region(module, boto3=False):
+ region = module.params.get('region')
+
+ if region:
+ return region
+
+ if 'AWS_REGION' in os.environ:
+ return os.environ['AWS_REGION']
+ if 'AWS_DEFAULT_REGION' in os.environ:
+ return os.environ['AWS_DEFAULT_REGION']
+ if 'EC2_REGION' in os.environ:
+ return os.environ['EC2_REGION']
+
+ if not boto3:
+ if not HAS_BOTO:
+ module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR)
+ # boto.config.get returns None if config not found
+ region = boto.config.get('Boto', 'aws_region')
+ if region:
+ return region
+ return boto.config.get('Boto', 'ec2_region')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
+
+ # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
+ try:
+ profile_name = module.params.get('profile')
+ return botocore.session.Session(profile=profile_name).get_config_variable('region')
+ except botocore.exceptions.ProfileNotFound as e:
+ return None
+
+
+def get_aws_connection_info(module, boto3=False):
+
+ # Check module args for credentials, then check environment vars
+ # access_key
+
+ ec2_url = module.params.get('ec2_url')
+ access_key = module.params.get('aws_access_key')
+ secret_key = module.params.get('aws_secret_key')
+ security_token = module.params.get('security_token')
+ region = get_aws_region(module, boto3)
+ profile_name = module.params.get('profile')
+ validate_certs = module.params.get('validate_certs')
+ config = module.params.get('aws_config')
+
+ if not ec2_url:
+ if 'AWS_URL' in os.environ:
+ ec2_url = os.environ['AWS_URL']
+ elif 'EC2_URL' in os.environ:
+ ec2_url = os.environ['EC2_URL']
+
+ if not access_key:
+ if os.environ.get('AWS_ACCESS_KEY_ID'):
+ access_key = os.environ['AWS_ACCESS_KEY_ID']
+ elif os.environ.get('AWS_ACCESS_KEY'):
+ access_key = os.environ['AWS_ACCESS_KEY']
+ elif os.environ.get('EC2_ACCESS_KEY'):
+ access_key = os.environ['EC2_ACCESS_KEY']
+ elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'):
+ access_key = boto.config.get('Credentials', 'aws_access_key_id')
+ elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'):
+ access_key = boto.config.get('default', 'aws_access_key_id')
+ else:
+ # in case access_key came in as empty string
+ access_key = None
+
+ if not secret_key:
+ if os.environ.get('AWS_SECRET_ACCESS_KEY'):
+ secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
+ elif os.environ.get('AWS_SECRET_KEY'):
+ secret_key = os.environ['AWS_SECRET_KEY']
+ elif os.environ.get('EC2_SECRET_KEY'):
+ secret_key = os.environ['EC2_SECRET_KEY']
+ elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'):
+ secret_key = boto.config.get('Credentials', 'aws_secret_access_key')
+ elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'):
+ secret_key = boto.config.get('default', 'aws_secret_access_key')
+ else:
+ # in case secret_key came in as empty string
+ secret_key = None
+
+ if not security_token:
+ if os.environ.get('AWS_SECURITY_TOKEN'):
+ security_token = os.environ['AWS_SECURITY_TOKEN']
+ elif os.environ.get('AWS_SESSION_TOKEN'):
+ security_token = os.environ['AWS_SESSION_TOKEN']
+ elif os.environ.get('EC2_SECURITY_TOKEN'):
+ security_token = os.environ['EC2_SECURITY_TOKEN']
+ elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'):
+ security_token = boto.config.get('Credentials', 'aws_security_token')
+ elif HAS_BOTO and boto.config.get('default', 'aws_security_token'):
+ security_token = boto.config.get('default', 'aws_security_token')
+ else:
+ # in case secret_token came in as empty string
+ security_token = None
+
+ if HAS_BOTO3 and boto3:
+ boto_params = dict(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ aws_session_token=security_token)
+ boto_params['verify'] = validate_certs
+
+ if profile_name:
+ boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
+ boto_params['profile_name'] = profile_name
+
+ else:
+ boto_params = dict(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ security_token=security_token)
+
+ # only set profile_name if passed as an argument
+ if profile_name:
+ boto_params['profile_name'] = profile_name
+
+ boto_params['validate_certs'] = validate_certs
+
+ if config is not None:
+ if HAS_BOTO3 and boto3:
+ boto_params['aws_config'] = botocore.config.Config(**config)
+ elif HAS_BOTO and not boto3:
+ if 'user_agent' in config:
+ sys.modules["boto.connection"].UserAgent = config['user_agent']
+
+ for param, value in boto_params.items():
+ if isinstance(value, binary_type):
+ boto_params[param] = text_type(value, 'utf-8', 'strict')
+
+ return region, ec2_url, boto_params
+
+
+def get_ec2_creds(module):
+ ''' for compatibility mode with old modules that don't/can't yet
+ use ec2_connect method '''
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+ return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
+
+
+def boto_fix_security_token_in_profile(conn, profile_name):
+ ''' monkey patch for boto issue boto/boto#2100 '''
+ profile = 'profile ' + profile_name
+ if boto.config.has_option(profile, 'aws_security_token'):
+ conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
+ return conn
+
+
+def connect_to_aws(aws_module, region, **params):
+ try:
+ conn = aws_module.connect_to_region(region, **params)
+ except(boto.provider.ProfileNotFoundError):
+ raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.")
+ if not conn:
+ if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
+ raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
+ "boto or extend with endpoints_path" % (region, aws_module.__name__))
+ else:
+ raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
+ if params.get('profile_name'):
+ conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
+ return conn
+
+
+def ec2_connect(module):
+
+ """ Return an ec2 connection"""
+
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+
+ # If we have a region specified, connect to its endpoint.
+ if region:
+ try:
+ ec2 = connect_to_aws(boto.ec2, region, **boto_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ elif ec2_url:
+ try:
+ ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="Either region or ec2_url must be specified")
+
+ return ec2
+
+
+def ansible_dict_to_boto3_filter_list(filters_dict):
+
+ """ Convert an Ansible dict of filters to list of dicts that boto3 can use
+ Args:
+ filters_dict (dict): Dict of AWS filters.
+ Basic Usage:
+ >>> filters = {'some-aws-id': 'i-01234567'}
+ >>> ansible_dict_to_boto3_filter_list(filters)
+ {
+ 'some-aws-id': 'i-01234567'
+ }
+ Returns:
+ List: List of AWS filters and their values
+ [
+ {
+ 'Name': 'some-aws-id',
+ 'Values': [
+ 'i-01234567',
+ ]
+ }
+ ]
+ """
+
+ filters_list = []
+ for k, v in filters_dict.items():
+ filter_dict = {'Name': k}
+ if isinstance(v, string_types):
+ filter_dict['Values'] = [v]
+ else:
+ filter_dict['Values'] = v
+
+ filters_list.append(filter_dict)
+
+ return filters_list
+
+
+def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
+
+ """ Convert a boto3 list of resource tags to a flat dict of key:value pairs
+ Args:
+ tags_list (list): List of dicts representing AWS tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
+ >>> boto3_tag_list_to_ansible_dict(tags_list)
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ Returns:
+ Dict: Dict of key:value pairs representing AWS tags
+ {
+ 'MyTagKey': 'MyTagValue',
+ }
+ """
+
+ if tag_name_key_name and tag_value_key_name:
+ tag_candidates = {tag_name_key_name: tag_value_key_name}
+ else:
+ tag_candidates = {'key': 'value', 'Key': 'Value'}
+
+ if not tags_list:
+ return {}
+ for k, v in tag_candidates.items():
+ if k in tags_list[0] and v in tags_list[0]:
+ return dict((tag[k], tag[v]) for tag in tags_list)
+ raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
+
+
+def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
+
+ """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
+ Args:
+ tags_dict (dict): Dict representing AWS resource tags.
+ tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
+ tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
+ Basic Usage:
+ >>> tags_dict = {'MyTagKey': 'MyTagValue'}
+ >>> ansible_dict_to_boto3_tag_list(tags_dict)
+ {
+ 'MyTagKey': 'MyTagValue'
+ }
+ Returns:
+ List: List of dicts containing tag keys and values
+ [
+ {
+ 'Key': 'MyTagKey',
+ 'Value': 'MyTagValue'
+ }
+ ]
+ """
+
+ tags_list = []
+ for k, v in tags_dict.items():
+ tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
+
+ return tags_list
+
+
+def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
+
+ """ Return list of security group IDs from security group names. Note that security group names are not unique
+ across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
+ will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
+ a try block
+ """
+
+ def get_sg_name(sg, boto3):
+
+ if boto3:
+ return sg['GroupName']
+ else:
+ return sg.name
+
+ def get_sg_id(sg, boto3):
+
+ if boto3:
+ return sg['GroupId']
+ else:
+ return sg.id
+
+ sec_group_id_list = []
+
+ if isinstance(sec_group_list, string_types):
+ sec_group_list = [sec_group_list]
+
+ # Get all security groups
+ if boto3:
+ if vpc_id:
+ filters = [
+ {
+ 'Name': 'vpc-id',
+ 'Values': [
+ vpc_id,
+ ]
+ }
+ ]
+ all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
+ else:
+ all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
+ else:
+ if vpc_id:
+ filters = {'vpc-id': vpc_id}
+ all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
+ else:
+ all_sec_groups = ec2_connection.get_all_security_groups()
+
+ unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
+ sec_group_name_list = list(set(sec_group_list) - set(unmatched))
+
+ if len(unmatched) > 0:
+ # If we have unmatched names that look like an ID, assume they are
+ import re
+ sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
+ still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
+ if len(still_unmatched) > 0:
+ raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
+
+ sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list]
+
+ return sec_group_id_list
+
+
+def _hashable_policy(policy, policy_list):
+ """
+ Takes a policy and returns a list, the contents of which are all hashable and sorted.
+ Example input policy:
+ {'Version': '2012-10-17',
+ 'Statement': [{'Action': 's3:PutObjectAcl',
+ 'Sid': 'AddCannedAcl2',
+ 'Resource': 'arn:aws:s3:::test_policy/*',
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
+ }]}
+ Returned value:
+ [('Statement', ((('Action', (u's3:PutObjectAcl',)),
+ ('Effect', (u'Allow',)),
+ ('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
+ ('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
+ ('Version', (u'2012-10-17',)))]
+
+ """
+ # Amazon will automatically convert bool and int to strings for us
+ if isinstance(policy, bool):
+ return tuple([str(policy).lower()])
+ elif isinstance(policy, int):
+ return tuple([str(policy)])
+
+ if isinstance(policy, list):
+ for each in policy:
+ tupleified = _hashable_policy(each, [])
+ if isinstance(tupleified, list):
+ tupleified = tuple(tupleified)
+ policy_list.append(tupleified)
+ elif isinstance(policy, string_types) or isinstance(policy, binary_type):
+ policy = to_text(policy)
+ # convert root account ARNs to just account IDs
+ if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
+ policy = policy.split(':')[4]
+ return [policy]
+ elif isinstance(policy, dict):
+ sorted_keys = list(policy.keys())
+ sorted_keys.sort()
+ for key in sorted_keys:
+ tupleified = _hashable_policy(policy[key], [])
+ if isinstance(tupleified, list):
+ tupleified = tuple(tupleified)
+ policy_list.append((key, tupleified))
+
+ # ensure we aren't returning deeply nested structures of length 1
+ if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
+ policy_list = policy_list[0]
+ if isinstance(policy_list, list):
+ if PY3_COMPARISON:
+ policy_list.sort(key=cmp_to_key(py3cmp))
+ else:
+ policy_list.sort()
+ return policy_list
+
+
+def py3cmp(a, b):
+ """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
+ try:
+ if a > b:
+ return 1
+ elif a < b:
+ return -1
+ else:
+ return 0
+ except TypeError as e:
+ # check to see if they're tuple-string
+ # always say strings are less than tuples (to maintain compatibility with python2)
+ str_ind = to_text(e).find('str')
+ tup_ind = to_text(e).find('tuple')
+ if -1 not in (str_ind, tup_ind):
+ if str_ind < tup_ind:
+ return -1
+ elif tup_ind < str_ind:
+ return 1
+ raise
+
+
+def compare_policies(current_policy, new_policy):
+ """ Compares the existing policy and the updated policy
+ Returns True if there is a difference between policies.
+ """
+ return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
+
+
+def sort_json_policy_dict(policy_dict):
+
+ """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
+ different orders will return true
+ Args:
+ policy_dict (dict): Dict representing IAM JSON policy.
+ Basic Usage:
+ >>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
+ >>> sort_json_policy_dict(my_iam_policy)
+ Returns:
+ Dict: Will return a copy of the policy as a Dict but any List will be sorted
+ {
+ 'Principle': {
+ 'AWS': [ '7', '14', '31', '101' ]
+ }
+ }
+ """
+
+ def value_is_list(my_list):
+
+ checked_list = []
+ for item in my_list:
+ if isinstance(item, dict):
+ checked_list.append(sort_json_policy_dict(item))
+ elif isinstance(item, list):
+ checked_list.append(value_is_list(item))
+ else:
+ checked_list.append(item)
+
+ # Sort list. If it's a list of dictionaries, sort by tuple of key-value
+ # pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
+ checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
+ return checked_list
+
+ ordered_policy_dict = {}
+ for key, value in policy_dict.items():
+ if isinstance(value, dict):
+ ordered_policy_dict[key] = sort_json_policy_dict(value)
+ elif isinstance(value, list):
+ ordered_policy_dict[key] = value_is_list(value)
+ else:
+ ordered_policy_dict[key] = value
+
+ return ordered_policy_dict
+
+
+def map_complex_type(complex_type, type_map):
+ """
+ Allows to cast elements within a dictionary to a specific type
+ Example of usage:
+
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
+ 'maximum_percent': 'int',
+ 'minimum_healthy_percent': 'int'
+ }
+
+ deployment_configuration = map_complex_type(module.params['deployment_configuration'],
+ DEPLOYMENT_CONFIGURATION_TYPE_MAP)
+
+ This ensures all keys within the root element are casted and valid integers
+ """
+
+ if complex_type is None:
+ return
+ new_type = type(complex_type)()
+ if isinstance(complex_type, dict):
+ for key in complex_type:
+ if key in type_map:
+ if isinstance(type_map[key], list):
+ new_type[key] = map_complex_type(
+ complex_type[key],
+ type_map[key][0])
+ else:
+ new_type[key] = map_complex_type(
+ complex_type[key],
+ type_map[key])
+ else:
+ return complex_type
+ elif isinstance(complex_type, list):
+ for i in range(len(complex_type)):
+ new_type.append(map_complex_type(
+ complex_type[i],
+ type_map))
+ elif type_map:
+ return globals()['__builtins__'][type_map](complex_type)
+ return new_type
+
+
+def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
+ """
+ Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
+ Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
+ these may not be able to be used out of the box.
+
+ :param current_tags_dict:
+ :param new_tags_dict:
+ :param purge_tags:
+ :return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
+ :return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
+ """
+
+ tag_key_value_pairs_to_set = {}
+ tag_keys_to_unset = []
+
+ for key in current_tags_dict.keys():
+ if key not in new_tags_dict and purge_tags:
+ tag_keys_to_unset.append(key)
+
+ for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
+ if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
+ tag_key_value_pairs_to_set[key] = new_tags_dict[key]
+
+ return tag_key_value_pairs_to_set, tag_keys_to_unset
diff --git a/test/support/integration/plugins/module_utils/hcloud.py b/test/support/integration/plugins/module_utils/hcloud.py
new file mode 100644
index 0000000000..932b0c5294
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/hcloud.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
+
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible.module_utils.ansible_release import __version__
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+
+try:
+ import hcloud
+
+ HAS_HCLOUD = True
+except ImportError:
+ HAS_HCLOUD = False
+
+
+class Hcloud(object):
+ def __init__(self, module, represent):
+ self.module = module
+ self.represent = represent
+ self.result = {"changed": False, self.represent: None}
+ if not HAS_HCLOUD:
+ module.fail_json(msg=missing_required_lib("hcloud-python"))
+ self._build_client()
+
+ def _build_client(self):
+ self.client = hcloud.Client(
+ token=self.module.params["api_token"],
+ api_endpoint=self.module.params["endpoint"],
+ application_name="ansible-module",
+ application_version=__version__,
+ )
+
+ def _mark_as_changed(self):
+ self.result["changed"] = True
+
+ @staticmethod
+ def base_module_arguments():
+ return {
+ "api_token": {
+ "type": "str",
+ "required": True,
+ "fallback": (env_fallback, ["HCLOUD_TOKEN"]),
+ "no_log": True,
+ },
+ "endpoint": {"type": "str", "default": "https://api.hetzner.cloud/v1"},
+ }
+
+ def _prepare_result(self):
+ """Prepare the result for every module
+
+ :return: dict
+ """
+ return {}
+
+ def get_result(self):
+ if getattr(self, self.represent) is not None:
+ self.result[self.represent] = self._prepare_result()
+ return self.result
diff --git a/test/support/integration/plugins/module_utils/k8s/common.py b/test/support/integration/plugins/module_utils/k8s/common.py
new file mode 100644
index 0000000000..d86659f009
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/k8s/common.py
@@ -0,0 +1,290 @@
+# Copyright 2018 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+
+import copy
+import json
+import os
+import traceback
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.dict_transformations import recursive_diff
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils._text import to_native
+
+K8S_IMP_ERR = None
+try:
+ import kubernetes
+ import openshift
+ from openshift.dynamic import DynamicClient
+ from openshift.dynamic.exceptions import ResourceNotFoundError, ResourceNotUniqueError
+ HAS_K8S_MODULE_HELPER = True
+ k8s_import_exception = None
+except ImportError as e:
+ HAS_K8S_MODULE_HELPER = False
+ k8s_import_exception = e
+ K8S_IMP_ERR = traceback.format_exc()
+
+YAML_IMP_ERR = None
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ YAML_IMP_ERR = traceback.format_exc()
+ HAS_YAML = False
+
+try:
+ import urllib3
+ urllib3.disable_warnings()
+except ImportError:
+ pass
+
+
+def list_dict_str(value):
+ if isinstance(value, list):
+ return value
+ elif isinstance(value, dict):
+ return value
+ elif isinstance(value, string_types):
+ return value
+ raise TypeError
+
+
+ARG_ATTRIBUTES_BLACKLIST = ('property_path',)
+
+COMMON_ARG_SPEC = {
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'force': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'resource_definition': {
+ 'type': list_dict_str,
+ 'aliases': ['definition', 'inline']
+ },
+ 'src': {
+ 'type': 'path',
+ },
+ 'kind': {},
+ 'name': {},
+ 'namespace': {},
+ 'api_version': {
+ 'default': 'v1',
+ 'aliases': ['api', 'version'],
+ },
+}
+
+AUTH_ARG_SPEC = {
+ 'kubeconfig': {
+ 'type': 'path',
+ },
+ 'context': {},
+ 'host': {},
+ 'api_key': {
+ 'no_log': True,
+ },
+ 'username': {},
+ 'password': {
+ 'no_log': True,
+ },
+ 'validate_certs': {
+ 'type': 'bool',
+ 'aliases': ['verify_ssl'],
+ },
+ 'ca_cert': {
+ 'type': 'path',
+ 'aliases': ['ssl_ca_cert'],
+ },
+ 'client_cert': {
+ 'type': 'path',
+ 'aliases': ['cert_file'],
+ },
+ 'client_key': {
+ 'type': 'path',
+ 'aliases': ['key_file'],
+ },
+ 'proxy': {},
+ 'persist_config': {
+ 'type': 'bool',
+ },
+}
+
+# Map kubernetes-client parameters to ansible parameters
+AUTH_ARG_MAP = {
+ 'kubeconfig': 'kubeconfig',
+ 'context': 'context',
+ 'host': 'host',
+ 'api_key': 'api_key',
+ 'username': 'username',
+ 'password': 'password',
+ 'verify_ssl': 'validate_certs',
+ 'ssl_ca_cert': 'ca_cert',
+ 'cert_file': 'client_cert',
+ 'key_file': 'client_key',
+ 'proxy': 'proxy',
+ 'persist_config': 'persist_config',
+}
+
+
+class K8sAnsibleMixin(object):
+ _argspec_cache = None
+
+ @property
+ def argspec(self):
+ """
+ Introspect the model properties, and return an Ansible module arg_spec dict.
+ :return: dict
+ """
+ if self._argspec_cache:
+ return self._argspec_cache
+ argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
+ argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
+ self._argspec_cache = argument_spec
+ return self._argspec_cache
+
+ def get_api_client(self, **auth_params):
+ auth_params = auth_params or getattr(self, 'params', {})
+ auth = {}
+
+ # If authorization variables aren't defined, look for them in environment variables
+ for true_name, arg_name in AUTH_ARG_MAP.items():
+ if auth_params.get(arg_name) is None:
+ env_value = os.getenv('K8S_AUTH_{0}'.format(arg_name.upper()), None) or os.getenv('K8S_AUTH_{0}'.format(true_name.upper()), None)
+ if env_value is not None:
+ if AUTH_ARG_SPEC[arg_name].get('type') == 'bool':
+ env_value = env_value.lower() not in ['0', 'false', 'no']
+ auth[true_name] = env_value
+ else:
+ auth[true_name] = auth_params[arg_name]
+
+ def auth_set(*names):
+ return all([auth.get(name) for name in names])
+
+ if auth_set('username', 'password', 'host') or auth_set('api_key', 'host'):
+ # We have enough in the parameters to authenticate, no need to load incluster or kubeconfig
+ pass
+ elif auth_set('kubeconfig') or auth_set('context'):
+ kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config'))
+ else:
+ # First try to do incluster config, then kubeconfig
+ try:
+ kubernetes.config.load_incluster_config()
+ except kubernetes.config.ConfigException:
+ kubernetes.config.load_kube_config(auth.get('kubeconfig'), auth.get('context'), persist_config=auth.get('persist_config'))
+
+ # Override any values in the default configuration with Ansible parameters
+ configuration = kubernetes.client.Configuration()
+ for key, value in iteritems(auth):
+ if key in AUTH_ARG_MAP.keys() and value is not None:
+ if key == 'api_key':
+ setattr(configuration, key, {'authorization': "Bearer {0}".format(value)})
+ else:
+ setattr(configuration, key, value)
+
+ kubernetes.client.Configuration.set_default(configuration)
+ return DynamicClient(kubernetes.client.ApiClient(configuration))
+
+ def find_resource(self, kind, api_version, fail=False):
+ for attribute in ['kind', 'name', 'singular_name']:
+ try:
+ return self.client.resources.get(**{'api_version': api_version, attribute: kind})
+ except (ResourceNotFoundError, ResourceNotUniqueError):
+ pass
+ try:
+ return self.client.resources.get(api_version=api_version, short_names=[kind])
+ except (ResourceNotFoundError, ResourceNotUniqueError):
+ if fail:
+ self.fail(msg='Failed to find exact match for {0}.{1} by [kind, name, singularName, shortNames]'.format(api_version, kind))
+
+ def kubernetes_facts(self, kind, api_version, name=None, namespace=None, label_selectors=None, field_selectors=None):
+ resource = self.find_resource(kind, api_version)
+ if not resource:
+ return dict(resources=[])
+ try:
+ result = resource.get(name=name,
+ namespace=namespace,
+ label_selector=','.join(label_selectors),
+ field_selector=','.join(field_selectors)).to_dict()
+ except openshift.dynamic.exceptions.NotFoundError:
+ return dict(resources=[])
+
+ if 'items' in result:
+ return dict(resources=result['items'])
+ else:
+ return dict(resources=[result])
+
+ def remove_aliases(self):
+ """
+ The helper doesn't know what to do with aliased keys
+ """
+ for k, v in iteritems(self.argspec):
+ if 'aliases' in v:
+ for alias in v['aliases']:
+ if alias in self.params:
+ self.params.pop(alias)
+
+ def load_resource_definitions(self, src):
+ """ Load the requested src path """
+ result = None
+ path = os.path.normpath(src)
+ if not os.path.exists(path):
+ self.fail(msg="Error accessing {0}. Does the file exist?".format(path))
+ try:
+ with open(path, 'r') as f:
+ result = list(yaml.safe_load_all(f))
+ except (IOError, yaml.YAMLError) as exc:
+ self.fail(msg="Error loading resource_definition: {0}".format(exc))
+ return result
+
+ @staticmethod
+ def diff_objects(existing, new):
+ result = dict()
+ diff = recursive_diff(existing, new)
+ if diff:
+ result['before'] = diff[0]
+ result['after'] = diff[1]
+ return not diff, result
+
+
+class KubernetesAnsibleModule(AnsibleModule, K8sAnsibleMixin):
+ resource_definition = None
+ api_version = None
+ kind = None
+
+ def __init__(self, *args, **kwargs):
+
+ kwargs['argument_spec'] = self.argspec
+ AnsibleModule.__init__(self, *args, **kwargs)
+
+ if not HAS_K8S_MODULE_HELPER:
+ self.fail_json(msg=missing_required_lib('openshift'), exception=K8S_IMP_ERR,
+ error=to_native(k8s_import_exception))
+ self.openshift_version = openshift.__version__
+
+ if not HAS_YAML:
+ self.fail_json(msg=missing_required_lib("PyYAML"), exception=YAML_IMP_ERR)
+
+ def execute_module(self):
+ raise NotImplementedError()
+
+ def fail(self, msg=None):
+ self.fail_json(msg=msg)
diff --git a/test/support/integration/plugins/module_utils/k8s/raw.py b/test/support/integration/plugins/module_utils/k8s/raw.py
new file mode 100644
index 0000000000..06272b8158
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/k8s/raw.py
@@ -0,0 +1,519 @@
+#
+# Copyright 2018 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+
+import copy
+from datetime import datetime
+from distutils.version import LooseVersion
+import time
+import sys
+import traceback
+
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.k8s.common import AUTH_ARG_SPEC, COMMON_ARG_SPEC
+from ansible.module_utils.six import string_types
+from ansible.module_utils.k8s.common import KubernetesAnsibleModule
+from ansible.module_utils.common.dict_transformations import dict_merge
+
+
+try:
+ import yaml
+ from openshift.dynamic.exceptions import DynamicApiError, NotFoundError, ConflictError, ForbiddenError, KubernetesValidateMissing
+ import urllib3
+except ImportError:
+ # Exceptions handled in common
+ pass
+
+try:
+ import kubernetes_validate
+ HAS_KUBERNETES_VALIDATE = True
+except ImportError:
+ HAS_KUBERNETES_VALIDATE = False
+
+K8S_CONFIG_HASH_IMP_ERR = None
+try:
+ from openshift.helper.hashes import generate_hash
+ HAS_K8S_CONFIG_HASH = True
+except ImportError:
+ K8S_CONFIG_HASH_IMP_ERR = traceback.format_exc()
+ HAS_K8S_CONFIG_HASH = False
+
+HAS_K8S_APPLY = None
+try:
+ from openshift.dynamic.apply import apply_object
+ HAS_K8S_APPLY = True
+except ImportError:
+ HAS_K8S_APPLY = False
+
+
+class KubernetesRawModule(KubernetesAnsibleModule):
+
+ @property
+ def validate_spec(self):
+ return dict(
+ fail_on_error=dict(type='bool'),
+ version=dict(),
+ strict=dict(type='bool', default=True)
+ )
+
+ @property
+ def condition_spec(self):
+ return dict(
+ type=dict(),
+ status=dict(default=True, choices=[True, False, "Unknown"]),
+ reason=dict()
+ )
+
+ @property
+ def argspec(self):
+ argument_spec = copy.deepcopy(COMMON_ARG_SPEC)
+ argument_spec.update(copy.deepcopy(AUTH_ARG_SPEC))
+ argument_spec['merge_type'] = dict(type='list', choices=['json', 'merge', 'strategic-merge'])
+ argument_spec['wait'] = dict(type='bool', default=False)
+ argument_spec['wait_sleep'] = dict(type='int', default=5)
+ argument_spec['wait_timeout'] = dict(type='int', default=120)
+ argument_spec['wait_condition'] = dict(type='dict', default=None, options=self.condition_spec)
+ argument_spec['validate'] = dict(type='dict', default=None, options=self.validate_spec)
+ argument_spec['append_hash'] = dict(type='bool', default=False)
+ argument_spec['apply'] = dict(type='bool', default=False)
+ return argument_spec
+
+ def __init__(self, k8s_kind=None, *args, **kwargs):
+ self.client = None
+ self.warnings = []
+
+ mutually_exclusive = [
+ ('resource_definition', 'src'),
+ ('merge_type', 'apply'),
+ ]
+
+ KubernetesAnsibleModule.__init__(self, *args,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ **kwargs)
+ self.kind = k8s_kind or self.params.get('kind')
+ self.api_version = self.params.get('api_version')
+ self.name = self.params.get('name')
+ self.namespace = self.params.get('namespace')
+ resource_definition = self.params.get('resource_definition')
+ validate = self.params.get('validate')
+ if validate:
+ if LooseVersion(self.openshift_version) < LooseVersion("0.8.0"):
+ self.fail_json(msg="openshift >= 0.8.0 is required for validate")
+ self.append_hash = self.params.get('append_hash')
+ if self.append_hash:
+ if not HAS_K8S_CONFIG_HASH:
+ self.fail_json(msg=missing_required_lib("openshift >= 0.7.2", reason="for append_hash"),
+ exception=K8S_CONFIG_HASH_IMP_ERR)
+ if self.params['merge_type']:
+ if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
+ self.fail_json(msg=missing_required_lib("openshift >= 0.6.2", reason="for merge_type"))
+ self.apply = self.params.get('apply', False)
+ if self.apply:
+ if not HAS_K8S_APPLY:
+ self.fail_json(msg=missing_required_lib("openshift >= 0.9.2", reason="for apply"))
+
+ if resource_definition:
+ if isinstance(resource_definition, string_types):
+ try:
+ self.resource_definitions = yaml.safe_load_all(resource_definition)
+ except (IOError, yaml.YAMLError) as exc:
+ self.fail(msg="Error loading resource_definition: {0}".format(exc))
+ elif isinstance(resource_definition, list):
+ self.resource_definitions = resource_definition
+ else:
+ self.resource_definitions = [resource_definition]
+ src = self.params.get('src')
+ if src:
+ self.resource_definitions = self.load_resource_definitions(src)
+ try:
+ self.resource_definitions = [item for item in self.resource_definitions if item]
+ except AttributeError:
+ pass
+
+ if not resource_definition and not src:
+ implicit_definition = dict(
+ kind=self.kind,
+ apiVersion=self.api_version,
+ metadata=dict(name=self.name)
+ )
+ if self.namespace:
+ implicit_definition['metadata']['namespace'] = self.namespace
+ self.resource_definitions = [implicit_definition]
+
+ def flatten_list_kind(self, list_resource, definitions):
+ flattened = []
+ parent_api_version = list_resource.group_version if list_resource else None
+ parent_kind = list_resource.kind[:-4] if list_resource else None
+ for definition in definitions.get('items', []):
+ resource = self.find_resource(definition.get('kind', parent_kind), definition.get('apiVersion', parent_api_version), fail=True)
+ flattened.append((resource, self.set_defaults(resource, definition)))
+ return flattened
+
+ def execute_module(self):
+ changed = False
+ results = []
+ try:
+ self.client = self.get_api_client()
+ # Hopefully the kubernetes client will provide its own exception class one day
+ except (urllib3.exceptions.RequestError) as e:
+ self.fail_json(msg="Couldn't connect to Kubernetes: %s" % str(e))
+
+ flattened_definitions = []
+ for definition in self.resource_definitions:
+ kind = definition.get('kind', self.kind)
+ api_version = definition.get('apiVersion', self.api_version)
+ if kind.endswith('List'):
+ resource = self.find_resource(kind, api_version, fail=False)
+ flattened_definitions.extend(self.flatten_list_kind(resource, definition))
+ else:
+ resource = self.find_resource(kind, api_version, fail=True)
+ flattened_definitions.append((resource, definition))
+
+ for (resource, definition) in flattened_definitions:
+ kind = definition.get('kind', self.kind)
+ api_version = definition.get('apiVersion', self.api_version)
+ definition = self.set_defaults(resource, definition)
+ self.warnings = []
+ if self.params['validate'] is not None:
+ self.warnings = self.validate(definition)
+ result = self.perform_action(resource, definition)
+ result['warnings'] = self.warnings
+ changed = changed or result['changed']
+ results.append(result)
+
+ if len(results) == 1:
+ self.exit_json(**results[0])
+
+ self.exit_json(**{
+ 'changed': changed,
+ 'result': {
+ 'results': results
+ }
+ })
+
+ def validate(self, resource):
+ def _prepend_resource_info(resource, msg):
+ return "%s %s: %s" % (resource['kind'], resource['metadata']['name'], msg)
+
+ try:
+ warnings, errors = self.client.validate(resource, self.params['validate'].get('version'), self.params['validate'].get('strict'))
+ except KubernetesValidateMissing:
+ self.fail_json(msg="kubernetes-validate python library is required to validate resources")
+
+ if errors and self.params['validate']['fail_on_error']:
+ self.fail_json(msg="\n".join([_prepend_resource_info(resource, error) for error in errors]))
+ else:
+ return [_prepend_resource_info(resource, msg) for msg in warnings + errors]
+
+ def set_defaults(self, resource, definition):
+ definition['kind'] = resource.kind
+ definition['apiVersion'] = resource.group_version
+ metadata = definition.get('metadata', {})
+ if self.name and not metadata.get('name'):
+ metadata['name'] = self.name
+ if resource.namespaced and self.namespace and not metadata.get('namespace'):
+ metadata['namespace'] = self.namespace
+ definition['metadata'] = metadata
+ return definition
+
+ def perform_action(self, resource, definition):
+ result = {'changed': False, 'result': {}}
+ state = self.params.get('state', None)
+ force = self.params.get('force', False)
+ name = definition['metadata'].get('name')
+ namespace = definition['metadata'].get('namespace')
+ existing = None
+ wait = self.params.get('wait')
+ wait_sleep = self.params.get('wait_sleep')
+ wait_timeout = self.params.get('wait_timeout')
+ wait_condition = None
+ if self.params.get('wait_condition') and self.params['wait_condition'].get('type'):
+ wait_condition = self.params['wait_condition']
+
+ self.remove_aliases()
+
+ try:
+ # ignore append_hash for resources other than ConfigMap and Secret
+ if self.append_hash and definition['kind'] in ['ConfigMap', 'Secret']:
+ name = '%s-%s' % (name, generate_hash(definition))
+ definition['metadata']['name'] = name
+ params = dict(name=name)
+ if namespace:
+ params['namespace'] = namespace
+ existing = resource.get(**params)
+ except NotFoundError:
+ # Remove traceback so that it doesn't show up in later failures
+ try:
+ sys.exc_clear()
+ except AttributeError:
+ # no sys.exc_clear on python3
+ pass
+ except ForbiddenError as exc:
+ if definition['kind'] in ['Project', 'ProjectRequest'] and state != 'absent':
+ return self.create_project_request(definition)
+ self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ except DynamicApiError as exc:
+ self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+
+ if state == 'absent':
+ result['method'] = "delete"
+ if not existing:
+ # The object already does not exist
+ return result
+ else:
+ # Delete the object
+ result['changed'] = True
+ if not self.check_mode:
+ try:
+ k8s_obj = resource.delete(**params)
+ result['result'] = k8s_obj.to_dict()
+ except DynamicApiError as exc:
+ self.fail_json(msg="Failed to delete object: {0}".format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ if wait:
+ success, resource, duration = self.wait(resource, definition, wait_sleep, wait_timeout, 'absent')
+ result['duration'] = duration
+ if not success:
+ self.fail_json(msg="Resource deletion timed out", **result)
+ return result
+ else:
+ if self.apply:
+ if self.check_mode:
+ ignored, k8s_obj = apply_object(resource, definition)
+ else:
+ try:
+ k8s_obj = resource.apply(definition, namespace=namespace).to_dict()
+ except DynamicApiError as exc:
+ msg = "Failed to apply object: {0}".format(exc.body)
+ if self.warnings:
+ msg += "\n" + "\n ".join(self.warnings)
+ self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
+ success = True
+ result['result'] = k8s_obj
+ if wait:
+ success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
+ if existing:
+ existing = existing.to_dict()
+ else:
+ existing = {}
+ match, diffs = self.diff_objects(existing, result['result'])
+ result['changed'] = not match
+ result['diff'] = diffs
+ result['method'] = 'apply'
+ if not success:
+ self.fail_json(msg="Resource apply timed out", **result)
+ return result
+
+ if not existing:
+ if self.check_mode:
+ k8s_obj = definition
+ else:
+ try:
+ k8s_obj = resource.create(definition, namespace=namespace).to_dict()
+ except ConflictError:
+ # Some resources, like ProjectRequests, can't be created multiple times,
+ # because the resources that they create don't match their kind
+ # In this case we'll mark it as unchanged and warn the user
+ self.warn("{0} was not found, but creating it returned a 409 Conflict error. This can happen \
+ if the resource you are creating does not directly create a resource of the same kind.".format(name))
+ return result
+ except DynamicApiError as exc:
+ msg = "Failed to create object: {0}".format(exc.body)
+ if self.warnings:
+ msg += "\n" + "\n ".join(self.warnings)
+ self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
+ success = True
+ result['result'] = k8s_obj
+ if wait and not self.check_mode:
+ success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
+ result['changed'] = True
+ result['method'] = 'create'
+ if not success:
+ self.fail_json(msg="Resource creation timed out", **result)
+ return result
+
+ match = False
+ diffs = []
+
+ if existing and force:
+ if self.check_mode:
+ k8s_obj = definition
+ else:
+ try:
+ k8s_obj = resource.replace(definition, name=name, namespace=namespace, append_hash=self.append_hash).to_dict()
+ except DynamicApiError as exc:
+ msg = "Failed to replace object: {0}".format(exc.body)
+ if self.warnings:
+ msg += "\n" + "\n ".join(self.warnings)
+ self.fail_json(msg=msg, error=exc.status, status=exc.status, reason=exc.reason)
+ match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
+ success = True
+ result['result'] = k8s_obj
+ if wait:
+ success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
+ match, diffs = self.diff_objects(existing.to_dict(), result['result'])
+ result['changed'] = not match
+ result['method'] = 'replace'
+ result['diff'] = diffs
+ if not success:
+ self.fail_json(msg="Resource replacement timed out", **result)
+ return result
+
+ # Differences exist between the existing obj and requested params
+ if self.check_mode:
+ k8s_obj = dict_merge(existing.to_dict(), definition)
+ else:
+ if LooseVersion(self.openshift_version) < LooseVersion("0.6.2"):
+ k8s_obj, error = self.patch_resource(resource, definition, existing, name,
+ namespace)
+ else:
+ for merge_type in self.params['merge_type'] or ['strategic-merge', 'merge']:
+ k8s_obj, error = self.patch_resource(resource, definition, existing, name,
+ namespace, merge_type=merge_type)
+ if not error:
+ break
+ if error:
+ self.fail_json(**error)
+
+ success = True
+ result['result'] = k8s_obj
+ if wait:
+ success, result['result'], result['duration'] = self.wait(resource, definition, wait_sleep, wait_timeout, condition=wait_condition)
+ match, diffs = self.diff_objects(existing.to_dict(), result['result'])
+ result['changed'] = not match
+ result['method'] = 'patch'
+ result['diff'] = diffs
+
+ if not success:
+ self.fail_json(msg="Resource update timed out", **result)
+ return result
+
+ def patch_resource(self, resource, definition, existing, name, namespace, merge_type=None):
+ try:
+ params = dict(name=name, namespace=namespace)
+ if merge_type:
+ params['content_type'] = 'application/{0}-patch+json'.format(merge_type)
+ k8s_obj = resource.patch(definition, **params).to_dict()
+ match, diffs = self.diff_objects(existing.to_dict(), k8s_obj)
+ error = {}
+ return k8s_obj, {}
+ except DynamicApiError as exc:
+ msg = "Failed to patch object: {0}".format(exc.body)
+ if self.warnings:
+ msg += "\n" + "\n ".join(self.warnings)
+ error = dict(msg=msg, error=exc.status, status=exc.status, reason=exc.reason, warnings=self.warnings)
+ return None, error
+
+ def create_project_request(self, definition):
+ definition['kind'] = 'ProjectRequest'
+ result = {'changed': False, 'result': {}}
+ resource = self.find_resource('ProjectRequest', definition['apiVersion'], fail=True)
+ if not self.check_mode:
+ try:
+ k8s_obj = resource.create(definition)
+ result['result'] = k8s_obj.to_dict()
+ except DynamicApiError as exc:
+ self.fail_json(msg="Failed to create object: {0}".format(exc.body),
+ error=exc.status, status=exc.status, reason=exc.reason)
+ result['changed'] = True
+ result['method'] = 'create'
+ return result
+
+ def _wait_for(self, resource, name, namespace, predicate, sleep, timeout, state):
+ start = datetime.now()
+
+ def _wait_for_elapsed():
+ return (datetime.now() - start).seconds
+
+ response = None
+ while _wait_for_elapsed() < timeout:
+ try:
+ response = resource.get(name=name, namespace=namespace)
+ if predicate(response):
+ if response:
+ return True, response.to_dict(), _wait_for_elapsed()
+ else:
+ return True, {}, _wait_for_elapsed()
+ time.sleep(sleep)
+ except NotFoundError:
+ if state == 'absent':
+ return True, {}, _wait_for_elapsed()
+ if response:
+ response = response.to_dict()
+ return False, response, _wait_for_elapsed()
+
+ def wait(self, resource, definition, sleep, timeout, state='present', condition=None):
+
+ def _deployment_ready(deployment):
+ # FIXME: frustratingly bool(deployment.status) is True even if status is empty
+ # Furthermore deployment.status.availableReplicas == deployment.status.replicas == None if status is empty
+ return (deployment.status and deployment.status.replicas is not None and
+ deployment.status.availableReplicas == deployment.status.replicas and
+ deployment.status.observedGeneration == deployment.metadata.generation)
+
+ def _pod_ready(pod):
+ return (pod.status and pod.status.containerStatuses is not None and
+ all([container.ready for container in pod.status.containerStatuses]))
+
+ def _daemonset_ready(daemonset):
+ return (daemonset.status and daemonset.status.desiredNumberScheduled is not None and
+ daemonset.status.numberReady == daemonset.status.desiredNumberScheduled and
+ daemonset.status.observedGeneration == daemonset.metadata.generation)
+
+ def _custom_condition(resource):
+ if not resource.status or not resource.status.conditions:
+ return False
+ match = [x for x in resource.status.conditions if x.type == condition['type']]
+ if not match:
+ return False
+ # There should never be more than one condition of a specific type
+ match = match[0]
+ if match.status == 'Unknown':
+ if match.status == condition['status']:
+ if 'reason' not in condition:
+ return True
+ if condition['reason']:
+ return match.reason == condition['reason']
+ return False
+ status = True if match.status == 'True' else False
+ if status == condition['status']:
+ if condition.get('reason'):
+ return match.reason == condition['reason']
+ return True
+ return False
+
+ def _resource_absent(resource):
+ return not resource
+
+ waiter = dict(
+ Deployment=_deployment_ready,
+ DaemonSet=_daemonset_ready,
+ Pod=_pod_ready
+ )
+ kind = definition['kind']
+ if state == 'present' and not condition:
+ predicate = waiter.get(kind, lambda x: x)
+ elif state == 'present' and condition:
+ predicate = _custom_condition
+ else:
+ predicate = _resource_absent
+ return self._wait_for(resource, definition['metadata']['name'], definition['metadata'].get('namespace'), predicate, sleep, timeout, state)
diff --git a/test/support/integration/plugins/module_utils/net_tools/nios/api.py b/test/support/integration/plugins/module_utils/net_tools/nios/api.py
new file mode 100644
index 0000000000..2a759033e2
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/net_tools/nios/api.py
@@ -0,0 +1,601 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2018 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+import os
+from functools import partial
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import env_fallback
+
+try:
+ from infoblox_client.connector import Connector
+ from infoblox_client.exceptions import InfobloxException
+ HAS_INFOBLOX_CLIENT = True
+except ImportError:
+ HAS_INFOBLOX_CLIENT = False
+
+# defining nios constants
+NIOS_DNS_VIEW = 'view'
+NIOS_NETWORK_VIEW = 'networkview'
+NIOS_HOST_RECORD = 'record:host'
+NIOS_IPV4_NETWORK = 'network'
+NIOS_IPV6_NETWORK = 'ipv6network'
+NIOS_ZONE = 'zone_auth'
+NIOS_PTR_RECORD = 'record:ptr'
+NIOS_A_RECORD = 'record:a'
+NIOS_AAAA_RECORD = 'record:aaaa'
+NIOS_CNAME_RECORD = 'record:cname'
+NIOS_MX_RECORD = 'record:mx'
+NIOS_SRV_RECORD = 'record:srv'
+NIOS_NAPTR_RECORD = 'record:naptr'
+NIOS_TXT_RECORD = 'record:txt'
+NIOS_NSGROUP = 'nsgroup'
+NIOS_IPV4_FIXED_ADDRESS = 'fixedaddress'
+NIOS_IPV6_FIXED_ADDRESS = 'ipv6fixedaddress'
+NIOS_NEXT_AVAILABLE_IP = 'func:nextavailableip'
+NIOS_IPV4_NETWORK_CONTAINER = 'networkcontainer'
+NIOS_IPV6_NETWORK_CONTAINER = 'ipv6networkcontainer'
+NIOS_MEMBER = 'member'
+
+NIOS_PROVIDER_SPEC = {
+ 'host': dict(fallback=(env_fallback, ['INFOBLOX_HOST'])),
+ 'username': dict(fallback=(env_fallback, ['INFOBLOX_USERNAME'])),
+ 'password': dict(fallback=(env_fallback, ['INFOBLOX_PASSWORD']), no_log=True),
+ 'validate_certs': dict(type='bool', default=False, fallback=(env_fallback, ['INFOBLOX_SSL_VERIFY']), aliases=['ssl_verify']),
+ 'silent_ssl_warnings': dict(type='bool', default=True),
+ 'http_request_timeout': dict(type='int', default=10, fallback=(env_fallback, ['INFOBLOX_HTTP_REQUEST_TIMEOUT'])),
+ 'http_pool_connections': dict(type='int', default=10),
+ 'http_pool_maxsize': dict(type='int', default=10),
+ 'max_retries': dict(type='int', default=3, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES'])),
+ 'wapi_version': dict(default='2.1', fallback=(env_fallback, ['INFOBLOX_WAP_VERSION'])),
+ 'max_results': dict(type='int', default=1000, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES']))
+}
+
+
+def get_connector(*args, **kwargs):
+ ''' Returns an instance of infoblox_client.connector.Connector
+ :params args: positional arguments are silently ignored
+ :params kwargs: dict that is passed to Connector init
+ :returns: Connector
+ '''
+ if not HAS_INFOBLOX_CLIENT:
+ raise Exception('infoblox-client is required but does not appear '
+ 'to be installed. It can be installed using the '
+ 'command `pip install infoblox-client`')
+
+ if not set(kwargs.keys()).issubset(list(NIOS_PROVIDER_SPEC.keys()) + ['ssl_verify']):
+ raise Exception('invalid or unsupported keyword argument for connector')
+ for key, value in iteritems(NIOS_PROVIDER_SPEC):
+ if key not in kwargs:
+ # apply default values from NIOS_PROVIDER_SPEC since we cannot just
+ # assume the provider values are coming from AnsibleModule
+ if 'default' in value:
+ kwargs[key] = value['default']
+
+ # override any values with env variables unless they were
+ # explicitly set
+ env = ('INFOBLOX_%s' % key).upper()
+ if env in os.environ:
+ kwargs[key] = os.environ.get(env)
+
+ if 'validate_certs' in kwargs.keys():
+ kwargs['ssl_verify'] = kwargs['validate_certs']
+ kwargs.pop('validate_certs', None)
+
+ return Connector(kwargs)
+
+
+def normalize_extattrs(value):
+ ''' Normalize extattrs field to expected format
+ The module accepts extattrs as key/value pairs. This method will
+ transform the key/value pairs into a structure suitable for
+ sending across WAPI in the format of:
+ extattrs: {
+ key: {
+ value: <value>
+ }
+ }
+ '''
+ return dict([(k, {'value': v}) for k, v in iteritems(value)])
+
+
+def flatten_extattrs(value):
+ ''' Flatten the key/value struct for extattrs
+ WAPI returns extattrs field as a dict in form of:
+ extattrs: {
+ key: {
+ value: <value>
+ }
+ }
+ This method will flatten the structure to:
+ extattrs: {
+ key: value
+ }
+ '''
+ return dict([(k, v['value']) for k, v in iteritems(value)])
+
+
+def member_normalize(member_spec):
+ ''' Transforms the member module arguments into a valid WAPI struct
+ This function will transform the arguments into a structure that
+ is a valid WAPI structure in the format of:
+ {
+ key: <value>,
+ }
+ It will remove any arguments that are set to None since WAPI will error on
+ that condition.
+ The remainder of the value validation is performed by WAPI
+ Some parameters in ib_spec are passed as a list in order to pass the validation for elements.
+ In this function, they are converted to dictionary.
+ '''
+ member_elements = ['vip_setting', 'ipv6_setting', 'lan2_port_setting', 'mgmt_port_setting',
+ 'pre_provisioning', 'network_setting', 'v6_network_setting',
+ 'ha_port_setting', 'lan_port_setting', 'lan2_physical_setting',
+ 'lan_ha_port_setting', 'mgmt_network_setting', 'v6_mgmt_network_setting']
+ for key in member_spec.keys():
+ if key in member_elements and member_spec[key] is not None:
+ member_spec[key] = member_spec[key][0]
+ if isinstance(member_spec[key], dict):
+ member_spec[key] = member_normalize(member_spec[key])
+ elif isinstance(member_spec[key], list):
+ for x in member_spec[key]:
+ if isinstance(x, dict):
+ x = member_normalize(x)
+ elif member_spec[key] is None:
+ del member_spec[key]
+ return member_spec
+
+
+class WapiBase(object):
+ ''' Base class for implementing Infoblox WAPI API '''
+ provider_spec = {'provider': dict(type='dict', options=NIOS_PROVIDER_SPEC)}
+
+ def __init__(self, provider):
+ self.connector = get_connector(**provider)
+
+ def __getattr__(self, name):
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ if name.startswith('_'):
+ raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
+ return partial(self._invoke_method, name)
+
+ def _invoke_method(self, name, *args, **kwargs):
+ try:
+ method = getattr(self.connector, name)
+ return method(*args, **kwargs)
+ except InfobloxException as exc:
+ if hasattr(self, 'handle_exception'):
+ self.handle_exception(name, exc)
+ else:
+ raise
+
+
+class WapiLookup(WapiBase):
+ ''' Implements WapiBase for lookup plugins '''
+ def handle_exception(self, method_name, exc):
+ if ('text' in exc.response):
+ raise Exception(exc.response['text'])
+ else:
+ raise Exception(exc)
+
+
+class WapiInventory(WapiBase):
+ ''' Implements WapiBase for dynamic inventory script '''
+ pass
+
+
+class WapiModule(WapiBase):
+ ''' Implements WapiBase for executing a NIOS module '''
+ def __init__(self, module):
+ self.module = module
+ provider = module.params['provider']
+ try:
+ super(WapiModule, self).__init__(provider)
+ except Exception as exc:
+ self.module.fail_json(msg=to_text(exc))
+
+ def handle_exception(self, method_name, exc):
+ ''' Handles any exceptions raised
+ This method will be called if an InfobloxException is raised for
+ any call to the instance of Connector and also, in case of generic
+ exception. This method will then gracefully fail the module.
+ :args exc: instance of InfobloxException
+ '''
+ if ('text' in exc.response):
+ self.module.fail_json(
+ msg=exc.response['text'],
+ type=exc.response['Error'].split(':')[0],
+ code=exc.response.get('code'),
+ operation=method_name
+ )
+ else:
+ self.module.fail_json(msg=to_native(exc))
+
+ def run(self, ib_obj_type, ib_spec):
+ ''' Runs the module and performans configuration tasks
+ :args ib_obj_type: the WAPI object type to operate against
+ :args ib_spec: the specification for the WAPI object as a dict
+ :returns: a results dict
+ '''
+
+ update = new_name = None
+ state = self.module.params['state']
+ if state not in ('present', 'absent'):
+ self.module.fail_json(msg='state must be one of `present`, `absent`, got `%s`' % state)
+
+ result = {'changed': False}
+
+ obj_filter = dict([(k, self.module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
+
+ # get object reference
+ ib_obj_ref, update, new_name = self.get_object_ref(self.module, ib_obj_type, obj_filter, ib_spec)
+ proposed_object = {}
+ for key, value in iteritems(ib_spec):
+ if self.module.params[key] is not None:
+ if 'transform' in value:
+ proposed_object[key] = value['transform'](self.module)
+ else:
+ proposed_object[key] = self.module.params[key]
+
+ # If configure_by_dns is set to False, then delete the default dns set in the param else throw exception
+ if not proposed_object.get('configure_for_dns') and proposed_object.get('view') == 'default'\
+ and ib_obj_type == NIOS_HOST_RECORD:
+ del proposed_object['view']
+ elif not proposed_object.get('configure_for_dns') and proposed_object.get('view') != 'default'\
+ and ib_obj_type == NIOS_HOST_RECORD:
+ self.module.fail_json(msg='DNS Bypass is not allowed if DNS view is set other than \'default\'')
+
+ if ib_obj_ref:
+ if len(ib_obj_ref) > 1:
+ for each in ib_obj_ref:
+ # To check for existing A_record with same name with input A_record by IP
+ if each.get('ipv4addr') and each.get('ipv4addr') == proposed_object.get('ipv4addr'):
+ current_object = each
+ # To check for existing Host_record with same name with input Host_record by IP
+ elif each.get('ipv4addrs')[0].get('ipv4addr') and each.get('ipv4addrs')[0].get('ipv4addr')\
+ == proposed_object.get('ipv4addrs')[0].get('ipv4addr'):
+ current_object = each
+ # Else set the current_object with input value
+ else:
+ current_object = obj_filter
+ ref = None
+ else:
+ current_object = ib_obj_ref[0]
+ if 'extattrs' in current_object:
+ current_object['extattrs'] = flatten_extattrs(current_object['extattrs'])
+ if current_object.get('_ref'):
+ ref = current_object.pop('_ref')
+ else:
+ current_object = obj_filter
+ ref = None
+ # checks if the object type is member to normalize the attributes being passed
+ if (ib_obj_type == NIOS_MEMBER):
+ proposed_object = member_normalize(proposed_object)
+
+ # checks if the name's field has been updated
+ if update and new_name:
+ proposed_object['name'] = new_name
+
+ check_remove = []
+ if (ib_obj_type == NIOS_HOST_RECORD):
+ # this check is for idempotency, as if the same ip address shall be passed
+ # add param will be removed, and same exists true for remove case as well.
+ if 'ipv4addrs' in [current_object and proposed_object]:
+ for each in current_object['ipv4addrs']:
+ if each['ipv4addr'] == proposed_object['ipv4addrs'][0]['ipv4addr']:
+ if 'add' in proposed_object['ipv4addrs'][0]:
+ del proposed_object['ipv4addrs'][0]['add']
+ break
+ check_remove += each.values()
+ if proposed_object['ipv4addrs'][0]['ipv4addr'] not in check_remove:
+ if 'remove' in proposed_object['ipv4addrs'][0]:
+ del proposed_object['ipv4addrs'][0]['remove']
+
+ res = None
+ modified = not self.compare_objects(current_object, proposed_object)
+ if 'extattrs' in proposed_object:
+ proposed_object['extattrs'] = normalize_extattrs(proposed_object['extattrs'])
+
+ # Checks if nios_next_ip param is passed in ipv4addrs/ipv4addr args
+ proposed_object = self.check_if_nios_next_ip_exists(proposed_object)
+
+ if state == 'present':
+ if ref is None:
+ if not self.module.check_mode:
+ self.create_object(ib_obj_type, proposed_object)
+ result['changed'] = True
+ # Check if NIOS_MEMBER and the flag to call function create_token is set
+ elif (ib_obj_type == NIOS_MEMBER) and (proposed_object['create_token']):
+ proposed_object = None
+ # the function creates a token that can be used by a pre-provisioned member to join the grid
+ result['api_results'] = self.call_func('create_token', ref, proposed_object)
+ result['changed'] = True
+ elif modified:
+ if 'ipv4addrs' in proposed_object:
+ if ('add' not in proposed_object['ipv4addrs'][0]) and ('remove' not in proposed_object['ipv4addrs'][0]):
+ self.check_if_recordname_exists(obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object)
+
+ if (ib_obj_type in (NIOS_HOST_RECORD, NIOS_NETWORK_VIEW, NIOS_DNS_VIEW)):
+ run_update = True
+ proposed_object = self.on_update(proposed_object, ib_spec)
+ if 'ipv4addrs' in proposed_object:
+ if ('add' or 'remove') in proposed_object['ipv4addrs'][0]:
+ run_update, proposed_object = self.check_if_add_remove_ip_arg_exists(proposed_object)
+ if run_update:
+ res = self.update_object(ref, proposed_object)
+ result['changed'] = True
+ else:
+ res = ref
+ if (ib_obj_type in (NIOS_A_RECORD, NIOS_AAAA_RECORD, NIOS_PTR_RECORD, NIOS_SRV_RECORD)):
+ # popping 'view' key as update of 'view' is not supported with respect to a:record/aaaa:record/srv:record/ptr:record
+ proposed_object = self.on_update(proposed_object, ib_spec)
+ del proposed_object['view']
+ if not self.module.check_mode:
+ res = self.update_object(ref, proposed_object)
+ result['changed'] = True
+ elif 'network_view' in proposed_object:
+ proposed_object.pop('network_view')
+ result['changed'] = True
+ if not self.module.check_mode and res is None:
+ proposed_object = self.on_update(proposed_object, ib_spec)
+ self.update_object(ref, proposed_object)
+ result['changed'] = True
+
+ elif state == 'absent':
+ if ref is not None:
+ if 'ipv4addrs' in proposed_object:
+ if 'remove' in proposed_object['ipv4addrs'][0]:
+ self.check_if_add_remove_ip_arg_exists(proposed_object)
+ self.update_object(ref, proposed_object)
+ result['changed'] = True
+ elif not self.module.check_mode:
+ self.delete_object(ref)
+ result['changed'] = True
+
+ return result
+
+ def check_if_recordname_exists(self, obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object):
+ ''' Send POST request if host record input name and retrieved ref name is same,
+ but input IP and retrieved IP is different'''
+
+ if 'name' in (obj_filter and ib_obj_ref[0]) and ib_obj_type == NIOS_HOST_RECORD:
+ obj_host_name = obj_filter['name']
+ ref_host_name = ib_obj_ref[0]['name']
+ if 'ipv4addrs' in (current_object and proposed_object):
+ current_ip_addr = current_object['ipv4addrs'][0]['ipv4addr']
+ proposed_ip_addr = proposed_object['ipv4addrs'][0]['ipv4addr']
+ elif 'ipv6addrs' in (current_object and proposed_object):
+ current_ip_addr = current_object['ipv6addrs'][0]['ipv6addr']
+ proposed_ip_addr = proposed_object['ipv6addrs'][0]['ipv6addr']
+
+ if obj_host_name == ref_host_name and current_ip_addr != proposed_ip_addr:
+ self.create_object(ib_obj_type, proposed_object)
+
+ def check_if_nios_next_ip_exists(self, proposed_object):
+ ''' Check if nios_next_ip argument is passed in ipaddr while creating
+ host record, if yes then format proposed object ipv4addrs and pass
+ func:nextavailableip and ipaddr range to create hostrecord with next
+ available ip in one call to avoid any race condition '''
+
+ if 'ipv4addrs' in proposed_object:
+ if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']:
+ ip_range = self.module._check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
+ proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
+ elif 'ipv4addr' in proposed_object:
+ if 'nios_next_ip' in proposed_object['ipv4addr']:
+ ip_range = self.module._check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
+ proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
+
+ return proposed_object
+
+ def check_if_add_remove_ip_arg_exists(self, proposed_object):
+ '''
+ This function shall check if add/remove param is set to true and
+ is passed in the args, then we will update the proposed dictionary
+ to add/remove IP to existing host_record, if the user passes false
+ param with the argument nothing shall be done.
+ :returns: True if param is changed based on add/remove, and also the
+ changed proposed_object.
+ '''
+ update = False
+ if 'add' in proposed_object['ipv4addrs'][0]:
+ if proposed_object['ipv4addrs'][0]['add']:
+ proposed_object['ipv4addrs+'] = proposed_object['ipv4addrs']
+ del proposed_object['ipv4addrs']
+ del proposed_object['ipv4addrs+'][0]['add']
+ update = True
+ else:
+ del proposed_object['ipv4addrs'][0]['add']
+ elif 'remove' in proposed_object['ipv4addrs'][0]:
+ if proposed_object['ipv4addrs'][0]['remove']:
+ proposed_object['ipv4addrs-'] = proposed_object['ipv4addrs']
+ del proposed_object['ipv4addrs']
+ del proposed_object['ipv4addrs-'][0]['remove']
+ update = True
+ else:
+ del proposed_object['ipv4addrs'][0]['remove']
+ return update, proposed_object
+
+ def issubset(self, item, objects):
+ ''' Checks if item is a subset of objects
+ :args item: the subset item to validate
+ :args objects: superset list of objects to validate against
+ :returns: True if item is a subset of one entry in objects otherwise
+ this method will return None
+ '''
+ for obj in objects:
+ if isinstance(item, dict):
+ if all(entry in obj.items() for entry in item.items()):
+ return True
+ else:
+ if item in obj:
+ return True
+
+ def compare_objects(self, current_object, proposed_object):
+ for key, proposed_item in iteritems(proposed_object):
+ current_item = current_object.get(key)
+
+ # if proposed has a key that current doesn't then the objects are
+ # not equal and False will be immediately returned
+ if current_item is None:
+ return False
+
+ elif isinstance(proposed_item, list):
+ for subitem in proposed_item:
+ if not self.issubset(subitem, current_item):
+ return False
+
+ elif isinstance(proposed_item, dict):
+ return self.compare_objects(current_item, proposed_item)
+
+ else:
+ if current_item != proposed_item:
+ return False
+
+ return True
+
+ def get_object_ref(self, module, ib_obj_type, obj_filter, ib_spec):
+ ''' this function gets the reference object of pre-existing nios objects '''
+
+ update = False
+ old_name = new_name = None
+ if ('name' in obj_filter):
+ # gets and returns the current object based on name/old_name passed
+ try:
+ name_obj = self.module._check_type_dict(obj_filter['name'])
+ old_name = name_obj['old_name']
+ new_name = name_obj['new_name']
+ except TypeError:
+ name = obj_filter['name']
+
+ if old_name and new_name:
+ if (ib_obj_type == NIOS_HOST_RECORD):
+ test_obj_filter = dict([('name', old_name), ('view', obj_filter['view'])])
+ elif (ib_obj_type in (NIOS_AAAA_RECORD, NIOS_A_RECORD)):
+ test_obj_filter = obj_filter
+ else:
+ test_obj_filter = dict([('name', old_name)])
+ # get the object reference
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
+ if ib_obj:
+ obj_filter['name'] = new_name
+ else:
+ test_obj_filter['name'] = new_name
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
+ update = True
+ return ib_obj, update, new_name
+ if (ib_obj_type == NIOS_HOST_RECORD):
+ # to check only by name if dns bypassing is set
+ if not obj_filter['configure_for_dns']:
+ test_obj_filter = dict([('name', name)])
+ else:
+ test_obj_filter = dict([('name', name), ('view', obj_filter['view'])])
+ elif (ib_obj_type == NIOS_IPV4_FIXED_ADDRESS or ib_obj_type == NIOS_IPV6_FIXED_ADDRESS and 'mac' in obj_filter):
+ test_obj_filter = dict([['mac', obj_filter['mac']]])
+ elif (ib_obj_type == NIOS_A_RECORD):
+ # resolves issue where a_record with uppercase name was returning null and was failing
+ test_obj_filter = obj_filter
+ test_obj_filter['name'] = test_obj_filter['name'].lower()
+ # resolves issue where multiple a_records with same name and different IP address
+ try:
+ ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
+ ipaddr = ipaddr_obj['old_ipv4addr']
+ except TypeError:
+ ipaddr = obj_filter['ipv4addr']
+ test_obj_filter['ipv4addr'] = ipaddr
+ elif (ib_obj_type == NIOS_TXT_RECORD):
+ # resolves issue where multiple txt_records with same name and different text
+ test_obj_filter = obj_filter
+ try:
+ text_obj = self.module._check_type_dict(obj_filter['text'])
+ txt = text_obj['old_text']
+ except TypeError:
+ txt = obj_filter['text']
+ test_obj_filter['text'] = txt
+ # check if test_obj_filter is empty copy passed obj_filter
+ else:
+ test_obj_filter = obj_filter
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
+ elif (ib_obj_type == NIOS_A_RECORD):
+ # resolves issue where multiple a_records with same name and different IP address
+ test_obj_filter = obj_filter
+ try:
+ ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
+ ipaddr = ipaddr_obj['old_ipv4addr']
+ except TypeError:
+ ipaddr = obj_filter['ipv4addr']
+ test_obj_filter['ipv4addr'] = ipaddr
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
+ elif (ib_obj_type == NIOS_TXT_RECORD):
+ # resolves issue where multiple txt_records with same name and different text
+ test_obj_filter = obj_filter
+ try:
+ text_obj = self.module._check_type_dict(obj_filter['text'])
+ txt = text_obj['old_text']
+ except TypeError:
+ txt = obj_filter['text']
+ test_obj_filter['text'] = txt
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
+ elif (ib_obj_type == NIOS_ZONE):
+ # del key 'restart_if_needed' as nios_zone get_object fails with the key present
+ temp = ib_spec['restart_if_needed']
+ del ib_spec['restart_if_needed']
+ ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
+ # reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref
+ if not ib_obj:
+ ib_spec['restart_if_needed'] = temp
+ elif (ib_obj_type == NIOS_MEMBER):
+ # del key 'create_token' as nios_member get_object fails with the key present
+ temp = ib_spec['create_token']
+ del ib_spec['create_token']
+ ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
+ if temp:
+ # reinstate 'create_token' key
+ ib_spec['create_token'] = temp
+ else:
+ ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
+ return ib_obj, update, new_name
+
+ def on_update(self, proposed_object, ib_spec):
+ ''' Event called before the update is sent to the API endpoing
+ This method will allow the final proposed object to be changed
+ and/or keys filtered before it is sent to the API endpoint to
+ be processed.
+ :args proposed_object: A dict item that will be encoded and sent
+ the API endpoint with the updated data structure
+ :returns: updated object to be sent to API endpoint
+ '''
+ keys = set()
+ for key, value in iteritems(proposed_object):
+ update = ib_spec[key].get('update', True)
+ if not update:
+ keys.add(key)
+ return dict([(k, v) for k, v in iteritems(proposed_object) if k not in keys])
diff --git a/test/support/integration/plugins/module_utils/network/common/utils.py b/test/support/integration/plugins/module_utils/network/common/utils.py
new file mode 100644
index 0000000000..8031738781
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/network/common/utils.py
@@ -0,0 +1,643 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2016 Red Hat Inc.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+# Networking tools for network modules only
+
+import re
+import ast
+import operator
+import socket
+import json
+
+from itertools import chain
+
+from ansible.module_utils._text import to_text, to_bytes
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils import basic
+from ansible.module_utils.parsing.convert_bool import boolean
+
+# Backwards compatibility for 3rd party modules
+# TODO(pabelanger): With move to ansible.netcommon, we should clean this code
+# up and have modules import directly themself.
+from ansible.module_utils.common.network import ( # noqa: F401
+ to_bits, is_netmask, is_masklen, to_netmask, to_masklen, to_subnet, to_ipv6_network, VALID_MASKS
+)
+
+try:
+ from jinja2 import Environment, StrictUndefined
+ from jinja2.exceptions import UndefinedError
+ HAS_JINJA2 = True
+except ImportError:
+ HAS_JINJA2 = False
+
+
+OPERATORS = frozenset(['ge', 'gt', 'eq', 'neq', 'lt', 'le'])
+ALIASES = frozenset([('min', 'ge'), ('max', 'le'), ('exactly', 'eq'), ('neq', 'ne')])
+
+
+def to_list(val):
+ if isinstance(val, (list, tuple, set)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+
+def to_lines(stdout):
+ for item in stdout:
+ if isinstance(item, string_types):
+ item = to_text(item).split('\n')
+ yield item
+
+
+def transform_commands(module):
+ transform = ComplexList(dict(
+ command=dict(key=True),
+ output=dict(),
+ prompt=dict(type='list'),
+ answer=dict(type='list'),
+ newline=dict(type='bool', default=True),
+ sendonly=dict(type='bool', default=False),
+ check_all=dict(type='bool', default=False),
+ ), module)
+
+ return transform(module.params['commands'])
+
+
+def sort_list(val):
+ if isinstance(val, list):
+ return sorted(val)
+ return val
+
+
+class Entity(object):
+ """Transforms a dict to with an argument spec
+
+ This class will take a dict and apply an Ansible argument spec to the
+ values. The resulting dict will contain all of the keys in the param
+ with appropriate values set.
+
+ Example::
+
+ argument_spec = dict(
+ command=dict(key=True),
+ display=dict(default='text', choices=['text', 'json']),
+ validate=dict(type='bool')
+ )
+ transform = Entity(module, argument_spec)
+ value = dict(command='foo')
+ result = transform(value)
+ print result
+ {'command': 'foo', 'display': 'text', 'validate': None}
+
+ Supported argument spec:
+ * key - specifies how to map a single value to a dict
+ * read_from - read and apply the argument_spec from the module
+ * required - a value is required
+ * type - type of value (uses AnsibleModule type checker)
+ * fallback - implements fallback function
+ * choices - set of valid options
+ * default - default value
+ """
+
+ def __init__(self, module, attrs=None, args=None, keys=None, from_argspec=False):
+ args = [] if args is None else args
+
+ self._attributes = attrs or {}
+ self._module = module
+
+ for arg in args:
+ self._attributes[arg] = dict()
+ if from_argspec:
+ self._attributes[arg]['read_from'] = arg
+ if keys and arg in keys:
+ self._attributes[arg]['key'] = True
+
+ self.attr_names = frozenset(self._attributes.keys())
+
+ _has_key = False
+
+ for name, attr in iteritems(self._attributes):
+ if attr.get('read_from'):
+ if attr['read_from'] not in self._module.argument_spec:
+ module.fail_json(msg='argument %s does not exist' % attr['read_from'])
+ spec = self._module.argument_spec.get(attr['read_from'])
+ for key, value in iteritems(spec):
+ if key not in attr:
+ attr[key] = value
+
+ if attr.get('key'):
+ if _has_key:
+ module.fail_json(msg='only one key value can be specified')
+ _has_key = True
+ attr['required'] = True
+
+ def serialize(self):
+ return self._attributes
+
+ def to_dict(self, value):
+ obj = {}
+ for name, attr in iteritems(self._attributes):
+ if attr.get('key'):
+ obj[name] = value
+ else:
+ obj[name] = attr.get('default')
+ return obj
+
+ def __call__(self, value, strict=True):
+ if not isinstance(value, dict):
+ value = self.to_dict(value)
+
+ if strict:
+ unknown = set(value).difference(self.attr_names)
+ if unknown:
+ self._module.fail_json(msg='invalid keys: %s' % ','.join(unknown))
+
+ for name, attr in iteritems(self._attributes):
+ if value.get(name) is None:
+ value[name] = attr.get('default')
+
+ if attr.get('fallback') and not value.get(name):
+ fallback = attr.get('fallback', (None,))
+ fallback_strategy = fallback[0]
+ fallback_args = []
+ fallback_kwargs = {}
+ if fallback_strategy is not None:
+ for item in fallback[1:]:
+ if isinstance(item, dict):
+ fallback_kwargs = item
+ else:
+ fallback_args = item
+ try:
+ value[name] = fallback_strategy(*fallback_args, **fallback_kwargs)
+ except basic.AnsibleFallbackNotFound:
+ continue
+
+ if attr.get('required') and value.get(name) is None:
+ self._module.fail_json(msg='missing required attribute %s' % name)
+
+ if 'choices' in attr:
+ if value[name] not in attr['choices']:
+ self._module.fail_json(msg='%s must be one of %s, got %s' % (name, ', '.join(attr['choices']), value[name]))
+
+ if value[name] is not None:
+ value_type = attr.get('type', 'str')
+ type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
+ type_checker(value[name])
+ elif value.get(name):
+ value[name] = self._module.params[name]
+
+ return value
+
+
+class EntityCollection(Entity):
+ """Extends ```Entity``` to handle a list of dicts """
+
+ def __call__(self, iterable, strict=True):
+ if iterable is None:
+ iterable = [super(EntityCollection, self).__call__(self._module.params, strict)]
+
+ if not isinstance(iterable, (list, tuple)):
+ self._module.fail_json(msg='value must be an iterable')
+
+ return [(super(EntityCollection, self).__call__(i, strict)) for i in iterable]
+
+
+# these two are for backwards compatibility and can be removed once all of the
+# modules that use them are updated
+class ComplexDict(Entity):
+ def __init__(self, attrs, module, *args, **kwargs):
+ super(ComplexDict, self).__init__(module, attrs, *args, **kwargs)
+
+
+class ComplexList(EntityCollection):
+ def __init__(self, attrs, module, *args, **kwargs):
+ super(ComplexList, self).__init__(module, attrs, *args, **kwargs)
+
+
+def dict_diff(base, comparable):
+ """ Generate a dict object of differences
+
+ This function will compare two dict objects and return the difference
+ between them as a dict object. For scalar values, the key will reflect
+ the updated value. If the key does not exist in `comparable`, then then no
+ key will be returned. For lists, the value in comparable will wholly replace
+ the value in base for the key. For dicts, the returned value will only
+ return keys that are different.
+
+ :param base: dict object to base the diff on
+ :param comparable: dict object to compare against base
+
+ :returns: new dict object with differences
+ """
+ if not isinstance(base, dict):
+ raise AssertionError("`base` must be of type <dict>")
+ if not isinstance(comparable, dict):
+ if comparable is None:
+ comparable = dict()
+ else:
+ raise AssertionError("`comparable` must be of type <dict>")
+
+ updates = dict()
+
+ for key, value in iteritems(base):
+ if isinstance(value, dict):
+ item = comparable.get(key)
+ if item is not None:
+ sub_diff = dict_diff(value, comparable[key])
+ if sub_diff:
+ updates[key] = sub_diff
+ else:
+ comparable_value = comparable.get(key)
+ if comparable_value is not None:
+ if sort_list(base[key]) != sort_list(comparable_value):
+ updates[key] = comparable_value
+
+ for key in set(comparable.keys()).difference(base.keys()):
+ updates[key] = comparable.get(key)
+
+ return updates
+
+
+def dict_merge(base, other):
+ """ Return a new dict object that combines base and other
+
+ This will create a new dict object that is a combination of the key/value
+ pairs from base and other. When both keys exist, the value will be
+ selected from other. If the value is a list object, the two lists will
+ be combined and duplicate entries removed.
+
+ :param base: dict object to serve as base
+ :param other: dict object to combine with base
+
+ :returns: new combined dict object
+ """
+ if not isinstance(base, dict):
+ raise AssertionError("`base` must be of type <dict>")
+ if not isinstance(other, dict):
+ raise AssertionError("`other` must be of type <dict>")
+
+ combined = dict()
+
+ for key, value in iteritems(base):
+ if isinstance(value, dict):
+ if key in other:
+ item = other.get(key)
+ if item is not None:
+ if isinstance(other[key], Mapping):
+ combined[key] = dict_merge(value, other[key])
+ else:
+ combined[key] = other[key]
+ else:
+ combined[key] = item
+ else:
+ combined[key] = value
+ elif isinstance(value, list):
+ if key in other:
+ item = other.get(key)
+ if item is not None:
+ try:
+ combined[key] = list(set(chain(value, item)))
+ except TypeError:
+ value.extend([i for i in item if i not in value])
+ combined[key] = value
+ else:
+ combined[key] = item
+ else:
+ combined[key] = value
+ else:
+ if key in other:
+ other_value = other.get(key)
+ if other_value is not None:
+ if sort_list(base[key]) != sort_list(other_value):
+ combined[key] = other_value
+ else:
+ combined[key] = value
+ else:
+ combined[key] = other_value
+ else:
+ combined[key] = value
+
+ for key in set(other.keys()).difference(base.keys()):
+ combined[key] = other.get(key)
+
+ return combined
+
+
+def param_list_to_dict(param_list, unique_key="name", remove_key=True):
+ """Rotates a list of dictionaries to be a dictionary of dictionaries.
+
+ :param param_list: The aforementioned list of dictionaries
+ :param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value
+ behind this key will be the key each dictionary can be found at in the new root dictionary
+ :param remove_key: If True, remove unique_key from the individual dictionaries before returning.
+ """
+ param_dict = {}
+ for params in param_list:
+ params = params.copy()
+ if remove_key:
+ name = params.pop(unique_key)
+ else:
+ name = params.get(unique_key)
+ param_dict[name] = params
+
+ return param_dict
+
+
+def conditional(expr, val, cast=None):
+ match = re.match(r'^(.+)\((.+)\)$', str(expr), re.I)
+ if match:
+ op, arg = match.groups()
+ else:
+ op = 'eq'
+ if ' ' in str(expr):
+ raise AssertionError('invalid expression: cannot contain spaces')
+ arg = expr
+
+ if cast is None and val is not None:
+ arg = type(val)(arg)
+ elif callable(cast):
+ arg = cast(arg)
+ val = cast(val)
+
+ op = next((oper for alias, oper in ALIASES if op == alias), op)
+
+ if not hasattr(operator, op) and op not in OPERATORS:
+ raise ValueError('unknown operator: %s' % op)
+
+ func = getattr(operator, op)
+ return func(val, arg)
+
+
+def ternary(value, true_val, false_val):
+ ''' value ? true_val : false_val '''
+ if value:
+ return true_val
+ else:
+ return false_val
+
+
+def remove_default_spec(spec):
+ for item in spec:
+ if 'default' in spec[item]:
+ del spec[item]['default']
+
+
+def validate_ip_address(address):
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count('.') == 3
+
+
+def validate_ip_v6_address(address):
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error:
+ return False
+ return True
+
+
+def validate_prefix(prefix):
+ if prefix and not 0 <= int(prefix) <= 32:
+ return False
+ return True
+
+
+def load_provider(spec, args):
+ provider = args.get('provider') or {}
+ for key, value in iteritems(spec):
+ if key not in provider:
+ if 'fallback' in value:
+ provider[key] = _fallback(value['fallback'])
+ elif 'default' in value:
+ provider[key] = value['default']
+ else:
+ provider[key] = None
+ if 'authorize' in provider:
+ # Coerce authorize to provider if a string has somehow snuck in.
+ provider['authorize'] = boolean(provider['authorize'] or False)
+ args['provider'] = provider
+ return provider
+
+
+def _fallback(fallback):
+ strategy = fallback[0]
+ args = []
+ kwargs = {}
+
+ for item in fallback[1:]:
+ if isinstance(item, dict):
+ kwargs = item
+ else:
+ args = item
+ try:
+ return strategy(*args, **kwargs)
+ except basic.AnsibleFallbackNotFound:
+ pass
+
+
+def generate_dict(spec):
+ """
+ Generate dictionary which is in sync with argspec
+
+ :param spec: A dictionary that is the argspec of the module
+ :rtype: A dictionary
+ :returns: A dictionary in sync with argspec with default value
+ """
+ obj = {}
+ if not spec:
+ return obj
+
+ for key, val in iteritems(spec):
+ if 'default' in val:
+ dct = {key: val['default']}
+ elif 'type' in val and val['type'] == 'dict':
+ dct = {key: generate_dict(val['options'])}
+ else:
+ dct = {key: None}
+ obj.update(dct)
+ return obj
+
+
+def parse_conf_arg(cfg, arg):
+ """
+ Parse config based on argument
+
+ :param cfg: A text string which is a line of configuration.
+ :param arg: A text string which is to be matched.
+ :rtype: A text string
+ :returns: A text string if match is found
+ """
+ match = re.search(r'%s (.+)(\n|$)' % arg, cfg, re.M)
+ if match:
+ result = match.group(1).strip()
+ else:
+ result = None
+ return result
+
+
+def parse_conf_cmd_arg(cfg, cmd, res1, res2=None, delete_str='no'):
+ """
+ Parse config based on command
+
+ :param cfg: A text string which is a line of configuration.
+ :param cmd: A text string which is the command to be matched
+ :param res1: A text string to be returned if the command is present
+ :param res2: A text string to be returned if the negate command
+ is present
+ :param delete_str: A text string to identify the start of the
+ negate command
+ :rtype: A text string
+ :returns: A text string if match is found
+ """
+ match = re.search(r'\n\s+%s(\n|$)' % cmd, cfg)
+ if match:
+ return res1
+ if res2 is not None:
+ match = re.search(r'\n\s+%s %s(\n|$)' % (delete_str, cmd), cfg)
+ if match:
+ return res2
+ return None
+
+
+def get_xml_conf_arg(cfg, path, data='text'):
+ """
+ :param cfg: The top level configuration lxml Element tree object
+ :param path: The relative xpath w.r.t to top level element (cfg)
+ to be searched in the xml hierarchy
+ :param data: The type of data to be returned for the matched xml node.
+ Valid values are text, tag, attrib, with default as text.
+ :return: Returns the required type for the matched xml node or else None
+ """
+ match = cfg.xpath(path)
+ if len(match):
+ if data == 'tag':
+ result = getattr(match[0], 'tag')
+ elif data == 'attrib':
+ result = getattr(match[0], 'attrib')
+ else:
+ result = getattr(match[0], 'text')
+ else:
+ result = None
+ return result
+
+
+def remove_empties(cfg_dict):
+ """
+ Generate final config dictionary
+
+ :param cfg_dict: A dictionary parsed in the facts system
+ :rtype: A dictionary
+ :returns: A dictionary by eliminating keys that have null values
+ """
+ final_cfg = {}
+ if not cfg_dict:
+ return final_cfg
+
+ for key, val in iteritems(cfg_dict):
+ dct = None
+ if isinstance(val, dict):
+ child_val = remove_empties(val)
+ if child_val:
+ dct = {key: child_val}
+ elif (isinstance(val, list) and val
+ and all([isinstance(x, dict) for x in val])):
+ child_val = [remove_empties(x) for x in val]
+ if child_val:
+ dct = {key: child_val}
+ elif val not in [None, [], {}, (), '']:
+ dct = {key: val}
+ if dct:
+ final_cfg.update(dct)
+ return final_cfg
+
+
+def validate_config(spec, data):
+ """
+ Validate if the input data against the AnsibleModule spec format
+ :param spec: Ansible argument spec
+ :param data: Data to be validated
+ :return:
+ """
+ params = basic._ANSIBLE_ARGS
+ basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': data}))
+ validated_data = basic.AnsibleModule(spec).params
+ basic._ANSIBLE_ARGS = params
+ return validated_data
+
+
+def search_obj_in_list(name, lst, key='name'):
+ if not lst:
+ return None
+ else:
+ for item in lst:
+ if item.get(key) == name:
+ return item
+
+
+class Template:
+
+ def __init__(self):
+ if not HAS_JINJA2:
+ raise ImportError("jinja2 is required but does not appear to be installed. "
+ "It can be installed using `pip install jinja2`")
+
+ self.env = Environment(undefined=StrictUndefined)
+ self.env.filters.update({'ternary': ternary})
+
+ def __call__(self, value, variables=None, fail_on_undefined=True):
+ variables = variables or {}
+
+ if not self.contains_vars(value):
+ return value
+
+ try:
+ value = self.env.from_string(value).render(variables)
+ except UndefinedError:
+ if not fail_on_undefined:
+ return None
+ raise
+
+ if value:
+ try:
+ return ast.literal_eval(value)
+ except Exception:
+ return str(value)
+ else:
+ return None
+
+ def contains_vars(self, data):
+ if isinstance(data, string_types):
+ for marker in (self.env.block_start_string, self.env.variable_start_string, self.env.comment_start_string):
+ if marker in data:
+ return True
+ return False
diff --git a/test/support/integration/plugins/module_utils/vmware.py b/test/support/integration/plugins/module_utils/vmware.py
new file mode 100644
index 0000000000..765e8c18f2
--- /dev/null
+++ b/test/support/integration/plugins/module_utils/vmware.py
@@ -0,0 +1,1630 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
+# Copyright: (c) 2018, Ansible Project
+# Copyright: (c) 2018, James E. King III (@jeking3) <jking@apache.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import atexit
+import ansible.module_utils.common._collections_compat as collections_compat
+import json
+import os
+import re
+import ssl
+import time
+import traceback
+from random import randint
+from distutils.version import StrictVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ # requests is required for exception handling of the ConnectionError
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+PYVMOMI_IMP_ERR = None
+try:
+ from pyVim import connect
+ from pyVmomi import vim, vmodl, VmomiSupport
+ HAS_PYVMOMI = True
+ HAS_PYVMOMIJSON = hasattr(VmomiSupport, 'VmomiJSONEncoder')
+except ImportError:
+ PYVMOMI_IMP_ERR = traceback.format_exc()
+ HAS_PYVMOMI = False
+ HAS_PYVMOMIJSON = False
+
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.six import integer_types, iteritems, string_types, raise_from
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+
+
+class TaskError(Exception):
+ def __init__(self, *args, **kwargs):
+ super(TaskError, self).__init__(*args, **kwargs)
+
+
+def wait_for_task(task, max_backoff=64, timeout=3600):
+ """Wait for given task using exponential back-off algorithm.
+
+ Args:
+ task: VMware task object
+ max_backoff: Maximum amount of sleep time in seconds
+ timeout: Timeout for the given task in seconds
+
+ Returns: Tuple with True and result for successful task
+ Raises: TaskError on failure
+ """
+ failure_counter = 0
+ start_time = time.time()
+
+ while True:
+ if time.time() - start_time >= timeout:
+ raise TaskError("Timeout")
+ if task.info.state == vim.TaskInfo.State.success:
+ return True, task.info.result
+ if task.info.state == vim.TaskInfo.State.error:
+ error_msg = task.info.error
+ host_thumbprint = None
+ try:
+ error_msg = error_msg.msg
+ if hasattr(task.info.error, 'thumbprint'):
+ host_thumbprint = task.info.error.thumbprint
+ except AttributeError:
+ pass
+ finally:
+ raise_from(TaskError(error_msg, host_thumbprint), task.info.error)
+ if task.info.state in [vim.TaskInfo.State.running, vim.TaskInfo.State.queued]:
+ sleep_time = min(2 ** failure_counter + randint(1, 1000) / 1000, max_backoff)
+ time.sleep(sleep_time)
+ failure_counter += 1
+
+
+def wait_for_vm_ip(content, vm, timeout=300):
+ facts = dict()
+ interval = 15
+ while timeout > 0:
+ _facts = gather_vm_facts(content, vm)
+ if _facts['ipv4'] or _facts['ipv6']:
+ facts = _facts
+ break
+ time.sleep(interval)
+ timeout -= interval
+
+ return facts
+
+
+def find_obj(content, vimtype, name, first=True, folder=None):
+ container = content.viewManager.CreateContainerView(folder or content.rootFolder, recursive=True, type=vimtype)
+ # Get all objects matching type (and name if given)
+ obj_list = [obj for obj in container.view if not name or to_text(obj.name) == to_text(name)]
+ container.Destroy()
+
+ # Return first match or None
+ if first:
+ if obj_list:
+ return obj_list[0]
+ return None
+
+ # Return all matching objects or empty list
+ return obj_list
+
+
+def find_dvspg_by_name(dv_switch, portgroup_name):
+ portgroup_name = quote_obj_name(portgroup_name)
+ portgroups = dv_switch.portgroup
+
+ for pg in portgroups:
+ if pg.name == portgroup_name:
+ return pg
+
+ return None
+
+
+def find_object_by_name(content, name, obj_type, folder=None, recurse=True):
+ if not isinstance(obj_type, list):
+ obj_type = [obj_type]
+
+ objects = get_all_objs(content, obj_type, folder=folder, recurse=recurse)
+ for obj in objects:
+ if obj.name == name:
+ return obj
+
+ return None
+
+
+def find_cluster_by_name(content, cluster_name, datacenter=None):
+
+ if datacenter and hasattr(datacenter, 'hostFolder'):
+ folder = datacenter.hostFolder
+ else:
+ folder = content.rootFolder
+
+ return find_object_by_name(content, cluster_name, [vim.ClusterComputeResource], folder=folder)
+
+
+def find_datacenter_by_name(content, datacenter_name):
+ return find_object_by_name(content, datacenter_name, [vim.Datacenter])
+
+
+def get_parent_datacenter(obj):
+ """ Walk the parent tree to find the objects datacenter """
+ if isinstance(obj, vim.Datacenter):
+ return obj
+ datacenter = None
+ while True:
+ if not hasattr(obj, 'parent'):
+ break
+ obj = obj.parent
+ if isinstance(obj, vim.Datacenter):
+ datacenter = obj
+ break
+ return datacenter
+
+
+def find_datastore_by_name(content, datastore_name, datacenter_name=None):
+ return find_object_by_name(content, datastore_name, [vim.Datastore], datacenter_name)
+
+
+def find_folder_by_name(content, folder_name):
+ return find_object_by_name(content, folder_name, [vim.Folder])
+
+
+def find_dvs_by_name(content, switch_name, folder=None):
+ return find_object_by_name(content, switch_name, [vim.DistributedVirtualSwitch], folder=folder)
+
+
+def find_hostsystem_by_name(content, hostname):
+ return find_object_by_name(content, hostname, [vim.HostSystem])
+
+
+def find_resource_pool_by_name(content, resource_pool_name):
+ return find_object_by_name(content, resource_pool_name, [vim.ResourcePool])
+
+
+def find_resource_pool_by_cluster(content, resource_pool_name='Resources', cluster=None):
+ return find_object_by_name(content, resource_pool_name, [vim.ResourcePool], folder=cluster)
+
+
+def find_network_by_name(content, network_name):
+ return find_object_by_name(content, quote_obj_name(network_name), [vim.Network])
+
+
+def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None,
+ cluster=None, folder=None, match_first=False):
+ """ UUID is unique to a VM, every other id returns the first match. """
+ si = content.searchIndex
+ vm = None
+
+ if vm_id_type == 'dns_name':
+ vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True)
+ elif vm_id_type == 'uuid':
+ # Search By BIOS UUID rather than instance UUID
+ vm = si.FindByUuid(datacenter=datacenter, instanceUuid=False, uuid=vm_id, vmSearch=True)
+ elif vm_id_type == 'instance_uuid':
+ vm = si.FindByUuid(datacenter=datacenter, instanceUuid=True, uuid=vm_id, vmSearch=True)
+ elif vm_id_type == 'ip':
+ vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True)
+ elif vm_id_type == 'vm_name':
+ folder = None
+ if cluster:
+ folder = cluster
+ elif datacenter:
+ folder = datacenter.hostFolder
+ vm = find_vm_by_name(content, vm_id, folder)
+ elif vm_id_type == 'inventory_path':
+ searchpath = folder
+ # get all objects for this path
+ f_obj = si.FindByInventoryPath(searchpath)
+ if f_obj:
+ if isinstance(f_obj, vim.Datacenter):
+ f_obj = f_obj.vmFolder
+ for c_obj in f_obj.childEntity:
+ if not isinstance(c_obj, vim.VirtualMachine):
+ continue
+ if c_obj.name == vm_id:
+ vm = c_obj
+ if match_first:
+ break
+ return vm
+
+
+def find_vm_by_name(content, vm_name, folder=None, recurse=True):
+ return find_object_by_name(content, vm_name, [vim.VirtualMachine], folder=folder, recurse=recurse)
+
+
+def find_host_portgroup_by_name(host, portgroup_name):
+
+ for portgroup in host.config.network.portgroup:
+ if portgroup.spec.name == portgroup_name:
+ return portgroup
+ return None
+
+
+def compile_folder_path_for_object(vobj):
+ """ make a /vm/foo/bar/baz like folder path for an object """
+
+ paths = []
+ if isinstance(vobj, vim.Folder):
+ paths.append(vobj.name)
+
+ thisobj = vobj
+ while hasattr(thisobj, 'parent'):
+ thisobj = thisobj.parent
+ try:
+ moid = thisobj._moId
+ except AttributeError:
+ moid = None
+ if moid in ['group-d1', 'ha-folder-root']:
+ break
+ if isinstance(thisobj, vim.Folder):
+ paths.append(thisobj.name)
+ paths.reverse()
+ return '/' + '/'.join(paths)
+
+
+def _get_vm_prop(vm, attributes):
+ """Safely get a property or return None"""
+ result = vm
+ for attribute in attributes:
+ try:
+ result = getattr(result, attribute)
+ except (AttributeError, IndexError):
+ return None
+ return result
+
+
+def gather_vm_facts(content, vm):
+ """ Gather facts from vim.VirtualMachine object. """
+ facts = {
+ 'module_hw': True,
+ 'hw_name': vm.config.name,
+ 'hw_power_status': vm.summary.runtime.powerState,
+ 'hw_guest_full_name': vm.summary.guest.guestFullName,
+ 'hw_guest_id': vm.summary.guest.guestId,
+ 'hw_product_uuid': vm.config.uuid,
+ 'hw_processor_count': vm.config.hardware.numCPU,
+ 'hw_cores_per_socket': vm.config.hardware.numCoresPerSocket,
+ 'hw_memtotal_mb': vm.config.hardware.memoryMB,
+ 'hw_interfaces': [],
+ 'hw_datastores': [],
+ 'hw_files': [],
+ 'hw_esxi_host': None,
+ 'hw_guest_ha_state': None,
+ 'hw_is_template': vm.config.template,
+ 'hw_folder': None,
+ 'hw_version': vm.config.version,
+ 'instance_uuid': vm.config.instanceUuid,
+ 'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')),
+ 'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')),
+ 'guest_question': vm.summary.runtime.question,
+ 'guest_consolidation_needed': vm.summary.runtime.consolidationNeeded,
+ 'ipv4': None,
+ 'ipv6': None,
+ 'annotation': vm.config.annotation,
+ 'customvalues': {},
+ 'snapshots': [],
+ 'current_snapshot': None,
+ 'vnc': {},
+ 'moid': vm._moId,
+ 'vimref': "vim.VirtualMachine:%s" % vm._moId,
+ }
+
+ # facts that may or may not exist
+ if vm.summary.runtime.host:
+ try:
+ host = vm.summary.runtime.host
+ facts['hw_esxi_host'] = host.summary.config.name
+ facts['hw_cluster'] = host.parent.name if host.parent and isinstance(host.parent, vim.ClusterComputeResource) else None
+
+ except vim.fault.NoPermission:
+ # User does not have read permission for the host system,
+ # proceed without this value. This value does not contribute or hamper
+ # provisioning or power management operations.
+ pass
+ if vm.summary.runtime.dasVmProtection:
+ facts['hw_guest_ha_state'] = vm.summary.runtime.dasVmProtection.dasProtected
+
+ datastores = vm.datastore
+ for ds in datastores:
+ facts['hw_datastores'].append(ds.info.name)
+
+ try:
+ files = vm.config.files
+ layout = vm.layout
+ if files:
+ facts['hw_files'] = [files.vmPathName]
+ for item in layout.snapshot:
+ for snap in item.snapshotFile:
+ if 'vmsn' in snap:
+ facts['hw_files'].append(snap)
+ for item in layout.configFile:
+ facts['hw_files'].append(os.path.join(os.path.dirname(files.vmPathName), item))
+ for item in vm.layout.logFile:
+ facts['hw_files'].append(os.path.join(files.logDirectory, item))
+ for item in vm.layout.disk:
+ for disk in item.diskFile:
+ facts['hw_files'].append(disk)
+ except Exception:
+ pass
+
+ facts['hw_folder'] = PyVmomi.get_vm_path(content, vm)
+
+ cfm = content.customFieldsManager
+ # Resolve custom values
+ for value_obj in vm.summary.customValue:
+ kn = value_obj.key
+ if cfm is not None and cfm.field:
+ for f in cfm.field:
+ if f.key == value_obj.key:
+ kn = f.name
+ # Exit the loop immediately, we found it
+ break
+
+ facts['customvalues'][kn] = value_obj.value
+
+ net_dict = {}
+ vmnet = _get_vm_prop(vm, ('guest', 'net'))
+ if vmnet:
+ for device in vmnet:
+ if device.deviceConfigId > 0:
+ net_dict[device.macAddress] = list(device.ipAddress)
+
+ if vm.guest.ipAddress:
+ if ':' in vm.guest.ipAddress:
+ facts['ipv6'] = vm.guest.ipAddress
+ else:
+ facts['ipv4'] = vm.guest.ipAddress
+
+ ethernet_idx = 0
+ for entry in vm.config.hardware.device:
+ if not hasattr(entry, 'macAddress'):
+ continue
+
+ if entry.macAddress:
+ mac_addr = entry.macAddress
+ mac_addr_dash = mac_addr.replace(':', '-')
+ else:
+ mac_addr = mac_addr_dash = None
+
+ if (hasattr(entry, 'backing') and hasattr(entry.backing, 'port') and
+ hasattr(entry.backing.port, 'portKey') and hasattr(entry.backing.port, 'portgroupKey')):
+ port_group_key = entry.backing.port.portgroupKey
+ port_key = entry.backing.port.portKey
+ else:
+ port_group_key = None
+ port_key = None
+
+ factname = 'hw_eth' + str(ethernet_idx)
+ facts[factname] = {
+ 'addresstype': entry.addressType,
+ 'label': entry.deviceInfo.label,
+ 'macaddress': mac_addr,
+ 'ipaddresses': net_dict.get(entry.macAddress, None),
+ 'macaddress_dash': mac_addr_dash,
+ 'summary': entry.deviceInfo.summary,
+ 'portgroup_portkey': port_key,
+ 'portgroup_key': port_group_key,
+ }
+ facts['hw_interfaces'].append('eth' + str(ethernet_idx))
+ ethernet_idx += 1
+
+ snapshot_facts = list_snapshots(vm)
+ if 'snapshots' in snapshot_facts:
+ facts['snapshots'] = snapshot_facts['snapshots']
+ facts['current_snapshot'] = snapshot_facts['current_snapshot']
+
+ facts['vnc'] = get_vnc_extraconfig(vm)
+ return facts
+
+
+def deserialize_snapshot_obj(obj):
+ return {'id': obj.id,
+ 'name': obj.name,
+ 'description': obj.description,
+ 'creation_time': obj.createTime,
+ 'state': obj.state}
+
+
+def list_snapshots_recursively(snapshots):
+ snapshot_data = []
+ for snapshot in snapshots:
+ snapshot_data.append(deserialize_snapshot_obj(snapshot))
+ snapshot_data = snapshot_data + list_snapshots_recursively(snapshot.childSnapshotList)
+ return snapshot_data
+
+
+def get_current_snap_obj(snapshots, snapob):
+ snap_obj = []
+ for snapshot in snapshots:
+ if snapshot.snapshot == snapob:
+ snap_obj.append(snapshot)
+ snap_obj = snap_obj + get_current_snap_obj(snapshot.childSnapshotList, snapob)
+ return snap_obj
+
+
+def list_snapshots(vm):
+ result = {}
+ snapshot = _get_vm_prop(vm, ('snapshot',))
+ if not snapshot:
+ return result
+ if vm.snapshot is None:
+ return result
+
+ result['snapshots'] = list_snapshots_recursively(vm.snapshot.rootSnapshotList)
+ current_snapref = vm.snapshot.currentSnapshot
+ current_snap_obj = get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref)
+ if current_snap_obj:
+ result['current_snapshot'] = deserialize_snapshot_obj(current_snap_obj[0])
+ else:
+ result['current_snapshot'] = dict()
+ return result
+
+
+def get_vnc_extraconfig(vm):
+ result = {}
+ for opts in vm.config.extraConfig:
+ for optkeyname in ['enabled', 'ip', 'port', 'password']:
+ if opts.key.lower() == "remotedisplay.vnc." + optkeyname:
+ result[optkeyname] = opts.value
+ return result
+
+
+def vmware_argument_spec():
+ return dict(
+ hostname=dict(type='str',
+ required=False,
+ fallback=(env_fallback, ['VMWARE_HOST']),
+ ),
+ username=dict(type='str',
+ aliases=['user', 'admin'],
+ required=False,
+ fallback=(env_fallback, ['VMWARE_USER'])),
+ password=dict(type='str',
+ aliases=['pass', 'pwd'],
+ required=False,
+ no_log=True,
+ fallback=(env_fallback, ['VMWARE_PASSWORD'])),
+ port=dict(type='int',
+ default=443,
+ fallback=(env_fallback, ['VMWARE_PORT'])),
+ validate_certs=dict(type='bool',
+ required=False,
+ default=True,
+ fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS'])
+ ),
+ proxy_host=dict(type='str',
+ required=False,
+ default=None,
+ fallback=(env_fallback, ['VMWARE_PROXY_HOST'])),
+ proxy_port=dict(type='int',
+ required=False,
+ default=None,
+ fallback=(env_fallback, ['VMWARE_PROXY_PORT'])),
+ )
+
+
+def connect_to_api(module, disconnect_atexit=True, return_si=False, hostname=None, username=None, password=None, port=None, validate_certs=None):
+ hostname = hostname if hostname else module.params['hostname']
+ username = username if username else module.params['username']
+ password = password if password else module.params['password']
+ port = port if port else module.params.get('port', 443)
+ validate_certs = validate_certs if validate_certs else module.params['validate_certs']
+
+ if not hostname:
+ module.fail_json(msg="Hostname parameter is missing."
+ " Please specify this parameter in task or"
+ " export environment variable like 'export VMWARE_HOST=ESXI_HOSTNAME'")
+
+ if not username:
+ module.fail_json(msg="Username parameter is missing."
+ " Please specify this parameter in task or"
+ " export environment variable like 'export VMWARE_USER=ESXI_USERNAME'")
+
+ if not password:
+ module.fail_json(msg="Password parameter is missing."
+ " Please specify this parameter in task or"
+ " export environment variable like 'export VMWARE_PASSWORD=ESXI_PASSWORD'")
+
+ if validate_certs and not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update '
+ 'python or use validate_certs=false.')
+ elif validate_certs:
+ ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ ssl_context.verify_mode = ssl.CERT_REQUIRED
+ ssl_context.check_hostname = True
+ ssl_context.load_default_certs()
+ elif hasattr(ssl, 'SSLContext'):
+ ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ ssl_context.verify_mode = ssl.CERT_NONE
+ ssl_context.check_hostname = False
+ else: # Python < 2.7.9 or RHEL/Centos < 7.4
+ ssl_context = None
+
+ service_instance = None
+ proxy_host = module.params.get('proxy_host')
+ proxy_port = module.params.get('proxy_port')
+
+ connect_args = dict(
+ host=hostname,
+ port=port,
+ )
+ if ssl_context:
+ connect_args.update(sslContext=ssl_context)
+
+ msg_suffix = ''
+ try:
+ if proxy_host:
+ msg_suffix = " [proxy: %s:%d]" % (proxy_host, proxy_port)
+ connect_args.update(httpProxyHost=proxy_host, httpProxyPort=proxy_port)
+ smart_stub = connect.SmartStubAdapter(**connect_args)
+ session_stub = connect.VimSessionOrientedStub(smart_stub, connect.VimSessionOrientedStub.makeUserLoginMethod(username, password))
+ service_instance = vim.ServiceInstance('ServiceInstance', session_stub)
+ else:
+ connect_args.update(user=username, pwd=password)
+ service_instance = connect.SmartConnect(**connect_args)
+ except vim.fault.InvalidLogin as invalid_login:
+ msg = "Unable to log on to vCenter or ESXi API at %s:%s " % (hostname, port)
+ module.fail_json(msg="%s as %s: %s" % (msg, username, invalid_login.msg) + msg_suffix)
+ except vim.fault.NoPermission as no_permission:
+ module.fail_json(msg="User %s does not have required permission"
+ " to log on to vCenter or ESXi API at %s:%s : %s" % (username, hostname, port, no_permission.msg))
+ except (requests.ConnectionError, ssl.SSLError) as generic_req_exc:
+ module.fail_json(msg="Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" % (hostname, port, generic_req_exc))
+ except vmodl.fault.InvalidRequest as invalid_request:
+ # Request is malformed
+ msg = "Failed to get a response from server %s:%s " % (hostname, port)
+ module.fail_json(msg="%s as request is malformed: %s" % (msg, invalid_request.msg) + msg_suffix)
+ except Exception as generic_exc:
+ msg = "Unknown error while connecting to vCenter or ESXi API at %s:%s" % (hostname, port) + msg_suffix
+ module.fail_json(msg="%s : %s" % (msg, generic_exc))
+
+ if service_instance is None:
+ msg = "Unknown error while connecting to vCenter or ESXi API at %s:%s" % (hostname, port)
+ module.fail_json(msg=msg + msg_suffix)
+
+ # Disabling atexit should be used in special cases only.
+ # Such as IP change of the ESXi host which removes the connection anyway.
+ # Also removal significantly speeds up the return of the module
+ if disconnect_atexit:
+ atexit.register(connect.Disconnect, service_instance)
+ if return_si:
+ return service_instance, service_instance.RetrieveContent()
+ return service_instance.RetrieveContent()
+
+
+def get_all_objs(content, vimtype, folder=None, recurse=True):
+ if not folder:
+ folder = content.rootFolder
+
+ obj = {}
+ container = content.viewManager.CreateContainerView(folder, vimtype, recurse)
+ for managed_object_ref in container.view:
+ obj.update({managed_object_ref: managed_object_ref.name})
+ return obj
+
+
+def run_command_in_guest(content, vm, username, password, program_path, program_args, program_cwd, program_env):
+
+ result = {'failed': False}
+
+ tools_status = vm.guest.toolsStatus
+ if (tools_status == 'toolsNotInstalled' or
+ tools_status == 'toolsNotRunning'):
+ result['failed'] = True
+ result['msg'] = "VMwareTools is not installed or is not running in the guest"
+ return result
+
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
+ creds = vim.vm.guest.NamePasswordAuthentication(
+ username=username, password=password
+ )
+
+ try:
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
+ pm = content.guestOperationsManager.processManager
+ # https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
+ ps = vim.vm.guest.ProcessManager.ProgramSpec(
+ # programPath=program,
+ # arguments=args
+ programPath=program_path,
+ arguments=program_args,
+ workingDirectory=program_cwd,
+ )
+
+ res = pm.StartProgramInGuest(vm, creds, ps)
+ result['pid'] = res
+ pdata = pm.ListProcessesInGuest(vm, creds, [res])
+
+ # wait for pid to finish
+ while not pdata[0].endTime:
+ time.sleep(1)
+ pdata = pm.ListProcessesInGuest(vm, creds, [res])
+
+ result['owner'] = pdata[0].owner
+ result['startTime'] = pdata[0].startTime.isoformat()
+ result['endTime'] = pdata[0].endTime.isoformat()
+ result['exitCode'] = pdata[0].exitCode
+ if result['exitCode'] != 0:
+ result['failed'] = True
+ result['msg'] = "program exited non-zero"
+ else:
+ result['msg'] = "program completed successfully"
+
+ except Exception as e:
+ result['msg'] = str(e)
+ result['failed'] = True
+
+ return result
+
+
+def serialize_spec(clonespec):
+ """Serialize a clonespec or a relocation spec"""
+ data = {}
+ attrs = dir(clonespec)
+ attrs = [x for x in attrs if not x.startswith('_')]
+ for x in attrs:
+ xo = getattr(clonespec, x)
+ if callable(xo):
+ continue
+ xt = type(xo)
+ if xo is None:
+ data[x] = None
+ elif isinstance(xo, vim.vm.ConfigSpec):
+ data[x] = serialize_spec(xo)
+ elif isinstance(xo, vim.vm.RelocateSpec):
+ data[x] = serialize_spec(xo)
+ elif isinstance(xo, vim.vm.device.VirtualDisk):
+ data[x] = serialize_spec(xo)
+ elif isinstance(xo, vim.vm.device.VirtualDeviceSpec.FileOperation):
+ data[x] = to_text(xo)
+ elif isinstance(xo, vim.Description):
+ data[x] = {
+ 'dynamicProperty': serialize_spec(xo.dynamicProperty),
+ 'dynamicType': serialize_spec(xo.dynamicType),
+ 'label': serialize_spec(xo.label),
+ 'summary': serialize_spec(xo.summary),
+ }
+ elif hasattr(xo, 'name'):
+ data[x] = to_text(xo) + ':' + to_text(xo.name)
+ elif isinstance(xo, vim.vm.ProfileSpec):
+ pass
+ elif issubclass(xt, list):
+ data[x] = []
+ for xe in xo:
+ data[x].append(serialize_spec(xe))
+ elif issubclass(xt, string_types + integer_types + (float, bool)):
+ if issubclass(xt, integer_types):
+ data[x] = int(xo)
+ else:
+ data[x] = to_text(xo)
+ elif issubclass(xt, bool):
+ data[x] = xo
+ elif issubclass(xt, dict):
+ data[to_text(x)] = {}
+ for k, v in xo.items():
+ k = to_text(k)
+ data[x][k] = serialize_spec(v)
+ else:
+ data[x] = str(xt)
+
+ return data
+
+
+def find_host_by_cluster_datacenter(module, content, datacenter_name, cluster_name, host_name):
+ dc = find_datacenter_by_name(content, datacenter_name)
+ if dc is None:
+ module.fail_json(msg="Unable to find datacenter with name %s" % datacenter_name)
+ cluster = find_cluster_by_name(content, cluster_name, datacenter=dc)
+ if cluster is None:
+ module.fail_json(msg="Unable to find cluster with name %s" % cluster_name)
+
+ for host in cluster.host:
+ if host.name == host_name:
+ return host, cluster
+
+ return None, cluster
+
+
+def set_vm_power_state(content, vm, state, force, timeout=0):
+ """
+ Set the power status for a VM determined by the current and
+ requested states. force is forceful
+ """
+ facts = gather_vm_facts(content, vm)
+ expected_state = state.replace('_', '').replace('-', '').lower()
+ current_state = facts['hw_power_status'].lower()
+ result = dict(
+ changed=False,
+ failed=False,
+ )
+
+ # Need Force
+ if not force and current_state not in ['poweredon', 'poweredoff']:
+ result['failed'] = True
+ result['msg'] = "Virtual Machine is in %s power state. Force is required!" % current_state
+ result['instance'] = gather_vm_facts(content, vm)
+ return result
+
+ # State is not already true
+ if current_state != expected_state:
+ task = None
+ try:
+ if expected_state == 'poweredoff':
+ task = vm.PowerOff()
+
+ elif expected_state == 'poweredon':
+ task = vm.PowerOn()
+
+ elif expected_state == 'restarted':
+ if current_state in ('poweredon', 'poweringon', 'resetting', 'poweredoff'):
+ task = vm.Reset()
+ else:
+ result['failed'] = True
+ result['msg'] = "Cannot restart virtual machine in the current state %s" % current_state
+
+ elif expected_state == 'suspended':
+ if current_state in ('poweredon', 'poweringon'):
+ task = vm.Suspend()
+ else:
+ result['failed'] = True
+ result['msg'] = 'Cannot suspend virtual machine in the current state %s' % current_state
+
+ elif expected_state in ['shutdownguest', 'rebootguest']:
+ if current_state == 'poweredon':
+ if vm.guest.toolsRunningStatus == 'guestToolsRunning':
+ if expected_state == 'shutdownguest':
+ task = vm.ShutdownGuest()
+ if timeout > 0:
+ result.update(wait_for_poweroff(vm, timeout))
+ else:
+ task = vm.RebootGuest()
+ # Set result['changed'] immediately because
+ # shutdown and reboot return None.
+ result['changed'] = True
+ else:
+ result['failed'] = True
+ result['msg'] = "VMware tools should be installed for guest shutdown/reboot"
+ else:
+ result['failed'] = True
+ result['msg'] = "Virtual machine %s must be in poweredon state for guest shutdown/reboot" % vm.name
+
+ else:
+ result['failed'] = True
+ result['msg'] = "Unsupported expected state provided: %s" % expected_state
+
+ except Exception as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+
+ if task:
+ wait_for_task(task)
+ if task.info.state == 'error':
+ result['failed'] = True
+ result['msg'] = task.info.error.msg
+ else:
+ result['changed'] = True
+
+ # need to get new metadata if changed
+ result['instance'] = gather_vm_facts(content, vm)
+
+ return result
+
+
+def wait_for_poweroff(vm, timeout=300):
+ result = dict()
+ interval = 15
+ while timeout > 0:
+ if vm.runtime.powerState.lower() == 'poweredoff':
+ break
+ time.sleep(interval)
+ timeout -= interval
+ else:
+ result['failed'] = True
+ result['msg'] = 'Timeout while waiting for VM power off.'
+ return result
+
+
+def is_integer(value, type_of='int'):
+ try:
+ VmomiSupport.vmodlTypes[type_of](value)
+ return True
+ except (TypeError, ValueError):
+ return False
+
+
+def is_boolean(value):
+ if str(value).lower() in ['true', 'on', 'yes', 'false', 'off', 'no']:
+ return True
+ return False
+
+
+def is_truthy(value):
+ if str(value).lower() in ['true', 'on', 'yes']:
+ return True
+ return False
+
+
+# options is the dict as defined in the module parameters, current_options is
+# the list of the currently set options as returned by the vSphere API.
+def option_diff(options, current_options):
+ current_options_dict = {}
+ for option in current_options:
+ current_options_dict[option.key] = option.value
+
+ change_option_list = []
+ for option_key, option_value in options.items():
+ if is_boolean(option_value):
+ option_value = VmomiSupport.vmodlTypes['bool'](is_truthy(option_value))
+ elif isinstance(option_value, int):
+ option_value = VmomiSupport.vmodlTypes['int'](option_value)
+ elif isinstance(option_value, float):
+ option_value = VmomiSupport.vmodlTypes['float'](option_value)
+ elif isinstance(option_value, str):
+ option_value = VmomiSupport.vmodlTypes['string'](option_value)
+
+ if option_key not in current_options_dict or current_options_dict[option_key] != option_value:
+ change_option_list.append(vim.option.OptionValue(key=option_key, value=option_value))
+
+ return change_option_list
+
+
+def quote_obj_name(object_name=None):
+ """
+ Replace special characters in object name
+ with urllib quote equivalent
+
+ """
+ if not object_name:
+ return None
+
+ from collections import OrderedDict
+ SPECIAL_CHARS = OrderedDict({
+ '%': '%25',
+ '/': '%2f',
+ '\\': '%5c'
+ })
+ for key in SPECIAL_CHARS.keys():
+ if key in object_name:
+ object_name = object_name.replace(key, SPECIAL_CHARS[key])
+
+ return object_name
+
+
+class PyVmomi(object):
+ def __init__(self, module):
+ """
+ Constructor
+ """
+ if not HAS_REQUESTS:
+ module.fail_json(msg=missing_required_lib('requests'),
+ exception=REQUESTS_IMP_ERR)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg=missing_required_lib('PyVmomi'),
+ exception=PYVMOMI_IMP_ERR)
+
+ self.module = module
+ self.params = module.params
+ self.current_vm_obj = None
+ self.si, self.content = connect_to_api(self.module, return_si=True)
+ self.custom_field_mgr = []
+ if self.content.customFieldsManager: # not an ESXi
+ self.custom_field_mgr = self.content.customFieldsManager.field
+
+ def is_vcenter(self):
+ """
+ Check if given hostname is vCenter or ESXi host
+ Returns: True if given connection is with vCenter server
+ False if given connection is with ESXi server
+
+ """
+ api_type = None
+ try:
+ api_type = self.content.about.apiType
+ except (vmodl.RuntimeFault, vim.fault.VimFault) as exc:
+ self.module.fail_json(msg="Failed to get status of vCenter server : %s" % exc.msg)
+
+ if api_type == 'VirtualCenter':
+ return True
+ elif api_type == 'HostAgent':
+ return False
+
+ def get_managed_objects_properties(self, vim_type, properties=None):
+ """
+ Look up a Managed Object Reference in vCenter / ESXi Environment
+ :param vim_type: Type of vim object e.g, for datacenter - vim.Datacenter
+ :param properties: List of properties related to vim object e.g. Name
+ :return: local content object
+ """
+ # Get Root Folder
+ root_folder = self.content.rootFolder
+
+ if properties is None:
+ properties = ['name']
+
+ # Create Container View with default root folder
+ mor = self.content.viewManager.CreateContainerView(root_folder, [vim_type], True)
+
+ # Create Traversal spec
+ traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
+ name="traversal_spec",
+ path='view',
+ skip=False,
+ type=vim.view.ContainerView
+ )
+
+ # Create Property Spec
+ property_spec = vmodl.query.PropertyCollector.PropertySpec(
+ type=vim_type, # Type of object to retrieved
+ all=False,
+ pathSet=properties
+ )
+
+ # Create Object Spec
+ object_spec = vmodl.query.PropertyCollector.ObjectSpec(
+ obj=mor,
+ skip=True,
+ selectSet=[traversal_spec]
+ )
+
+ # Create Filter Spec
+ filter_spec = vmodl.query.PropertyCollector.FilterSpec(
+ objectSet=[object_spec],
+ propSet=[property_spec],
+ reportMissingObjectsInResults=False
+ )
+
+ return self.content.propertyCollector.RetrieveContents([filter_spec])
+
+ # Virtual Machine related functions
+ def get_vm(self):
+ """
+ Find unique virtual machine either by UUID, MoID or Name.
+ Returns: virtual machine object if found, else None.
+
+ """
+ vm_obj = None
+ user_desired_path = None
+ use_instance_uuid = self.params.get('use_instance_uuid') or False
+ if 'uuid' in self.params and self.params['uuid']:
+ if not use_instance_uuid:
+ vm_obj = find_vm_by_id(self.content, vm_id=self.params['uuid'], vm_id_type="uuid")
+ elif use_instance_uuid:
+ vm_obj = find_vm_by_id(self.content,
+ vm_id=self.params['uuid'],
+ vm_id_type="instance_uuid")
+ elif 'name' in self.params and self.params['name']:
+ objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
+ vms = []
+
+ for temp_vm_object in objects:
+ if (
+ len(temp_vm_object.propSet) == 1 and
+ temp_vm_object.propSet[0].val == self.params['name']):
+ vms.append(temp_vm_object.obj)
+
+ # get_managed_objects_properties may return multiple virtual machine,
+ # following code tries to find user desired one depending upon the folder specified.
+ if len(vms) > 1:
+ # We have found multiple virtual machines, decide depending upon folder value
+ if self.params['folder'] is None:
+ self.module.fail_json(msg="Multiple virtual machines with same name [%s] found, "
+ "Folder value is a required parameter to find uniqueness "
+ "of the virtual machine" % self.params['name'],
+ details="Please see documentation of the vmware_guest module "
+ "for folder parameter.")
+
+ # Get folder path where virtual machine is located
+ # User provided folder where user thinks virtual machine is present
+ user_folder = self.params['folder']
+ # User defined datacenter
+ user_defined_dc = self.params['datacenter']
+ # User defined datacenter's object
+ datacenter_obj = find_datacenter_by_name(self.content, self.params['datacenter'])
+ # Get Path for Datacenter
+ dcpath = compile_folder_path_for_object(vobj=datacenter_obj)
+
+ # Nested folder does not return trailing /
+ if not dcpath.endswith('/'):
+ dcpath += '/'
+
+ if user_folder in [None, '', '/']:
+ # User provided blank value or
+ # User provided only root value, we fail
+ self.module.fail_json(msg="vmware_guest found multiple virtual machines with same "
+ "name [%s], please specify folder path other than blank "
+ "or '/'" % self.params['name'])
+ elif user_folder.startswith('/vm/'):
+ # User provided nested folder under VMware default vm folder i.e. folder = /vm/india/finance
+ user_desired_path = "%s%s%s" % (dcpath, user_defined_dc, user_folder)
+ else:
+ # User defined datacenter is not nested i.e. dcpath = '/' , or
+ # User defined datacenter is nested i.e. dcpath = '/F0/DC0' or
+ # User provided folder starts with / and datacenter i.e. folder = /ha-datacenter/ or
+ # User defined folder starts with datacenter without '/' i.e.
+ # folder = DC0/vm/india/finance or
+ # folder = DC0/vm
+ user_desired_path = user_folder
+
+ for vm in vms:
+ # Check if user has provided same path as virtual machine
+ actual_vm_folder_path = self.get_vm_path(content=self.content, vm_name=vm)
+ if not actual_vm_folder_path.startswith("%s%s" % (dcpath, user_defined_dc)):
+ continue
+ if user_desired_path in actual_vm_folder_path:
+ vm_obj = vm
+ break
+ elif vms:
+ # Unique virtual machine found.
+ actual_vm_folder_path = self.get_vm_path(content=self.content, vm_name=vms[0])
+ if self.params.get('folder') is None:
+ vm_obj = vms[0]
+ elif self.params['folder'] in actual_vm_folder_path:
+ vm_obj = vms[0]
+ elif 'moid' in self.params and self.params['moid']:
+ vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.params['moid'], self.si._stub)
+
+ if vm_obj:
+ self.current_vm_obj = vm_obj
+
+ return vm_obj
+
+ def gather_facts(self, vm):
+ """
+ Gather facts of virtual machine.
+ Args:
+ vm: Name of virtual machine.
+
+ Returns: Facts dictionary of the given virtual machine.
+
+ """
+ return gather_vm_facts(self.content, vm)
+
+ @staticmethod
+ def get_vm_path(content, vm_name):
+ """
+ Find the path of virtual machine.
+ Args:
+ content: VMware content object
+ vm_name: virtual machine managed object
+
+ Returns: Folder of virtual machine if exists, else None
+
+ """
+ folder_name = None
+ folder = vm_name.parent
+ if folder:
+ folder_name = folder.name
+ fp = folder.parent
+ # climb back up the tree to find our path, stop before the root folder
+ while fp is not None and fp.name is not None and fp != content.rootFolder:
+ folder_name = fp.name + '/' + folder_name
+ try:
+ fp = fp.parent
+ except Exception:
+ break
+ folder_name = '/' + folder_name
+ return folder_name
+
+ def get_vm_or_template(self, template_name=None):
+ """
+ Find the virtual machine or virtual machine template using name
+ used for cloning purpose.
+ Args:
+ template_name: Name of virtual machine or virtual machine template
+
+ Returns: virtual machine or virtual machine template object
+
+ """
+ template_obj = None
+ if not template_name:
+ return template_obj
+
+ if "/" in template_name:
+ vm_obj_path = os.path.dirname(template_name)
+ vm_obj_name = os.path.basename(template_name)
+ template_obj = find_vm_by_id(self.content, vm_obj_name, vm_id_type="inventory_path", folder=vm_obj_path)
+ if template_obj:
+ return template_obj
+ else:
+ template_obj = find_vm_by_id(self.content, vm_id=template_name, vm_id_type="uuid")
+ if template_obj:
+ return template_obj
+
+ objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name'])
+ templates = []
+
+ for temp_vm_object in objects:
+ if len(temp_vm_object.propSet) != 1:
+ continue
+ for temp_vm_object_property in temp_vm_object.propSet:
+ if temp_vm_object_property.val == template_name:
+ templates.append(temp_vm_object.obj)
+ break
+
+ if len(templates) > 1:
+ # We have found multiple virtual machine templates
+ self.module.fail_json(msg="Multiple virtual machines or templates with same name [%s] found." % template_name)
+ elif templates:
+ template_obj = templates[0]
+
+ return template_obj
+
+ # Cluster related functions
+ def find_cluster_by_name(self, cluster_name, datacenter_name=None):
+ """
+ Find Cluster by name in given datacenter
+ Args:
+ cluster_name: Name of cluster name to find
+ datacenter_name: (optional) Name of datacenter
+
+ Returns: True if found
+
+ """
+ return find_cluster_by_name(self.content, cluster_name, datacenter=datacenter_name)
+
+ def get_all_hosts_by_cluster(self, cluster_name):
+ """
+ Get all hosts from cluster by cluster name
+ Args:
+ cluster_name: Name of cluster
+
+ Returns: List of hosts
+
+ """
+ cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
+ if cluster_obj:
+ return [host for host in cluster_obj.host]
+ else:
+ return []
+
+ # Hosts related functions
+ def find_hostsystem_by_name(self, host_name):
+ """
+ Find Host by name
+ Args:
+ host_name: Name of ESXi host
+
+ Returns: True if found
+
+ """
+ return find_hostsystem_by_name(self.content, hostname=host_name)
+
+ def get_all_host_objs(self, cluster_name=None, esxi_host_name=None):
+ """
+ Get all host system managed object
+
+ Args:
+ cluster_name: Name of Cluster
+ esxi_host_name: Name of ESXi server
+
+ Returns: A list of all host system managed objects, else empty list
+
+ """
+ host_obj_list = []
+ if not self.is_vcenter():
+ hosts = get_all_objs(self.content, [vim.HostSystem]).keys()
+ if hosts:
+ host_obj_list.append(list(hosts)[0])
+ else:
+ if cluster_name:
+ cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name)
+ if cluster_obj:
+ host_obj_list = [host for host in cluster_obj.host]
+ else:
+ self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name)
+ elif esxi_host_name:
+ if isinstance(esxi_host_name, str):
+ esxi_host_name = [esxi_host_name]
+
+ for host in esxi_host_name:
+ esxi_host_obj = self.find_hostsystem_by_name(host_name=host)
+ if esxi_host_obj:
+ host_obj_list.append(esxi_host_obj)
+ else:
+ self.module.fail_json(changed=False, msg="ESXi '%s' not found" % host)
+
+ return host_obj_list
+
+ def host_version_at_least(self, version=None, vm_obj=None, host_name=None):
+ """
+ Check that the ESXi Host is at least a specific version number
+ Args:
+ vm_obj: virtual machine object, required one of vm_obj, host_name
+ host_name (string): ESXi host name
+ version (tuple): a version tuple, for example (6, 7, 0)
+ Returns: bool
+ """
+ if vm_obj:
+ host_system = vm_obj.summary.runtime.host
+ elif host_name:
+ host_system = self.find_hostsystem_by_name(host_name=host_name)
+ else:
+ self.module.fail_json(msg='VM object or ESXi host name must be set one.')
+ if host_system and version:
+ host_version = host_system.summary.config.product.version
+ return StrictVersion(host_version) >= StrictVersion('.'.join(map(str, version)))
+ else:
+ self.module.fail_json(msg='Unable to get the ESXi host from vm: %s, or hostname %s,'
+ 'or the passed ESXi version: %s is None.' % (vm_obj, host_name, version))
+
+ # Network related functions
+ @staticmethod
+ def find_host_portgroup_by_name(host, portgroup_name):
+ """
+ Find Portgroup on given host
+ Args:
+ host: Host config object
+ portgroup_name: Name of portgroup
+
+ Returns: True if found else False
+
+ """
+ for portgroup in host.config.network.portgroup:
+ if portgroup.spec.name == portgroup_name:
+ return portgroup
+ return False
+
+ def get_all_port_groups_by_host(self, host_system):
+ """
+ Get all Port Group by host
+ Args:
+ host_system: Name of Host System
+
+ Returns: List of Port Group Spec
+ """
+ pgs_list = []
+ for pg in host_system.config.network.portgroup:
+ pgs_list.append(pg)
+ return pgs_list
+
+ def find_network_by_name(self, network_name=None):
+ """
+ Get network specified by name
+ Args:
+ network_name: Name of network
+
+ Returns: List of network managed objects
+ """
+ networks = []
+
+ if not network_name:
+ return networks
+
+ objects = self.get_managed_objects_properties(vim_type=vim.Network, properties=['name'])
+
+ for temp_vm_object in objects:
+ if len(temp_vm_object.propSet) != 1:
+ continue
+ for temp_vm_object_property in temp_vm_object.propSet:
+ if temp_vm_object_property.val == network_name:
+ networks.append(temp_vm_object.obj)
+ break
+ return networks
+
+ def network_exists_by_name(self, network_name=None):
+ """
+ Check if network with a specified name exists or not
+ Args:
+ network_name: Name of network
+
+ Returns: True if network exists else False
+ """
+ ret = False
+ if not network_name:
+ return ret
+ ret = True if self.find_network_by_name(network_name=network_name) else False
+ return ret
+
+ # Datacenter
+ def find_datacenter_by_name(self, datacenter_name):
+ """
+ Get datacenter managed object by name
+
+ Args:
+ datacenter_name: Name of datacenter
+
+ Returns: datacenter managed object if found else None
+
+ """
+ return find_datacenter_by_name(self.content, datacenter_name=datacenter_name)
+
+ def is_datastore_valid(self, datastore_obj=None):
+ """
+ Check if datastore selected is valid or not
+ Args:
+ datastore_obj: datastore managed object
+
+ Returns: True if datastore is valid, False if not
+ """
+ if not datastore_obj \
+ or datastore_obj.summary.maintenanceMode != 'normal' \
+ or not datastore_obj.summary.accessible:
+ return False
+ return True
+
+ def find_datastore_by_name(self, datastore_name, datacenter_name=None):
+ """
+ Get datastore managed object by name
+ Args:
+ datastore_name: Name of datastore
+ datacenter_name: Name of datacenter where the datastore resides. This is needed because Datastores can be
+ shared across Datacenters, so we need to specify the datacenter to assure we get the correct Managed Object Reference
+
+ Returns: datastore managed object if found else None
+
+ """
+ return find_datastore_by_name(self.content, datastore_name=datastore_name, datacenter_name=datacenter_name)
+
+ def find_folder_by_name(self, folder_name):
+ """
+ Get vm folder managed object by name
+ Args:
+ folder_name: Name of the vm folder
+
+ Returns: vm folder managed object if found else None
+
+ """
+ return find_folder_by_name(self.content, folder_name=folder_name)
+
+ # Datastore cluster
+ def find_datastore_cluster_by_name(self, datastore_cluster_name):
+ """
+ Get datastore cluster managed object by name
+ Args:
+ datastore_cluster_name: Name of datastore cluster
+
+ Returns: Datastore cluster managed object if found else None
+
+ """
+ data_store_clusters = get_all_objs(self.content, [vim.StoragePod])
+ for dsc in data_store_clusters:
+ if dsc.name == datastore_cluster_name:
+ return dsc
+ return None
+
+ # Resource pool
+ def find_resource_pool_by_name(self, resource_pool_name, folder=None):
+ """
+ Get resource pool managed object by name
+ Args:
+ resource_pool_name: Name of resource pool
+
+ Returns: Resource pool managed object if found else None
+
+ """
+ if not folder:
+ folder = self.content.rootFolder
+
+ resource_pools = get_all_objs(self.content, [vim.ResourcePool], folder=folder)
+ for rp in resource_pools:
+ if rp.name == resource_pool_name:
+ return rp
+ return None
+
+ def find_resource_pool_by_cluster(self, resource_pool_name='Resources', cluster=None):
+ """
+ Get resource pool managed object by cluster object
+ Args:
+ resource_pool_name: Name of resource pool
+ cluster: Managed object of cluster
+
+ Returns: Resource pool managed object if found else None
+
+ """
+ desired_rp = None
+ if not cluster:
+ return desired_rp
+
+ if resource_pool_name != 'Resources':
+ # Resource pool name is different than default 'Resources'
+ resource_pools = cluster.resourcePool.resourcePool
+ if resource_pools:
+ for rp in resource_pools:
+ if rp.name == resource_pool_name:
+ desired_rp = rp
+ break
+ else:
+ desired_rp = cluster.resourcePool
+
+ return desired_rp
+
+ # VMDK stuff
+ def vmdk_disk_path_split(self, vmdk_path):
+ """
+ Takes a string in the format
+
+ [datastore_name] path/to/vm_name.vmdk
+
+ Returns a tuple with multiple strings:
+
+ 1. datastore_name: The name of the datastore (without brackets)
+ 2. vmdk_fullpath: The "path/to/vm_name.vmdk" portion
+ 3. vmdk_filename: The "vm_name.vmdk" portion of the string (os.path.basename equivalent)
+ 4. vmdk_folder: The "path/to/" portion of the string (os.path.dirname equivalent)
+ """
+ try:
+ datastore_name = re.match(r'^\[(.*?)\]', vmdk_path, re.DOTALL).groups()[0]
+ vmdk_fullpath = re.match(r'\[.*?\] (.*)$', vmdk_path).groups()[0]
+ vmdk_filename = os.path.basename(vmdk_fullpath)
+ vmdk_folder = os.path.dirname(vmdk_fullpath)
+ return datastore_name, vmdk_fullpath, vmdk_filename, vmdk_folder
+ except (IndexError, AttributeError) as e:
+ self.module.fail_json(msg="Bad path '%s' for filename disk vmdk image: %s" % (vmdk_path, to_native(e)))
+
+ def find_vmdk_file(self, datastore_obj, vmdk_fullpath, vmdk_filename, vmdk_folder):
+ """
+ Return vSphere file object or fail_json
+ Args:
+ datastore_obj: Managed object of datastore
+ vmdk_fullpath: Path of VMDK file e.g., path/to/vm/vmdk_filename.vmdk
+ vmdk_filename: Name of vmdk e.g., VM0001_1.vmdk
+ vmdk_folder: Base dir of VMDK e.g, path/to/vm
+
+ """
+
+ browser = datastore_obj.browser
+ datastore_name = datastore_obj.name
+ datastore_name_sq = "[" + datastore_name + "]"
+ if browser is None:
+ self.module.fail_json(msg="Unable to access browser for datastore %s" % datastore_name)
+
+ detail_query = vim.host.DatastoreBrowser.FileInfo.Details(
+ fileOwner=True,
+ fileSize=True,
+ fileType=True,
+ modification=True
+ )
+ search_spec = vim.host.DatastoreBrowser.SearchSpec(
+ details=detail_query,
+ matchPattern=[vmdk_filename],
+ searchCaseInsensitive=True,
+ )
+ search_res = browser.SearchSubFolders(
+ datastorePath=datastore_name_sq,
+ searchSpec=search_spec
+ )
+
+ changed = False
+ vmdk_path = datastore_name_sq + " " + vmdk_fullpath
+ try:
+ changed, result = wait_for_task(search_res)
+ except TaskError as task_e:
+ self.module.fail_json(msg=to_native(task_e))
+
+ if not changed:
+ self.module.fail_json(msg="No valid disk vmdk image found for path %s" % vmdk_path)
+
+ target_folder_paths = [
+ datastore_name_sq + " " + vmdk_folder + '/',
+ datastore_name_sq + " " + vmdk_folder,
+ ]
+
+ for file_result in search_res.info.result:
+ for f in getattr(file_result, 'file'):
+ if f.path == vmdk_filename and file_result.folderPath in target_folder_paths:
+ return f
+
+ self.module.fail_json(msg="No vmdk file found for path specified [%s]" % vmdk_path)
+
+ #
+ # Conversion to JSON
+ #
+
+ def _deepmerge(self, d, u):
+ """
+ Deep merges u into d.
+
+ Credit:
+ https://bit.ly/2EDOs1B (stackoverflow question 3232943)
+ License:
+ cc-by-sa 3.0 (https://creativecommons.org/licenses/by-sa/3.0/)
+ Changes:
+ using collections_compat for compatibility
+
+ Args:
+ - d (dict): dict to merge into
+ - u (dict): dict to merge into d
+
+ Returns:
+ dict, with u merged into d
+ """
+ for k, v in iteritems(u):
+ if isinstance(v, collections_compat.Mapping):
+ d[k] = self._deepmerge(d.get(k, {}), v)
+ else:
+ d[k] = v
+ return d
+
+ def _extract(self, data, remainder):
+ """
+ This is used to break down dotted properties for extraction.
+
+ Args:
+ - data (dict): result of _jsonify on a property
+ - remainder: the remainder of the dotted property to select
+
+ Return:
+ dict
+ """
+ result = dict()
+ if '.' not in remainder:
+ result[remainder] = data[remainder]
+ return result
+ key, remainder = remainder.split('.', 1)
+ result[key] = self._extract(data[key], remainder)
+ return result
+
+ def _jsonify(self, obj):
+ """
+ Convert an object from pyVmomi into JSON.
+
+ Args:
+ - obj (object): vim object
+
+ Return:
+ dict
+ """
+ return json.loads(json.dumps(obj, cls=VmomiSupport.VmomiJSONEncoder,
+ sort_keys=True, strip_dynamic=True))
+
+ def to_json(self, obj, properties=None):
+ """
+ Convert a vSphere (pyVmomi) Object into JSON. This is a deep
+ transformation. The list of properties is optional - if not
+ provided then all properties are deeply converted. The resulting
+ JSON is sorted to improve human readability.
+
+ Requires upstream support from pyVmomi > 6.7.1
+ (https://github.com/vmware/pyvmomi/pull/732)
+
+ Args:
+ - obj (object): vim object
+ - properties (list, optional): list of properties following
+ the property collector specification, for example:
+ ["config.hardware.memoryMB", "name", "overallStatus"]
+ default is a complete object dump, which can be large
+
+ Return:
+ dict
+ """
+ if not HAS_PYVMOMIJSON:
+ self.module.fail_json(msg='The installed version of pyvmomi lacks JSON output support; need pyvmomi>6.7.1')
+
+ result = dict()
+ if properties:
+ for prop in properties:
+ try:
+ if '.' in prop:
+ key, remainder = prop.split('.', 1)
+ tmp = dict()
+ tmp[key] = self._extract(self._jsonify(getattr(obj, key)), remainder)
+ self._deepmerge(result, tmp)
+ else:
+ result[prop] = self._jsonify(getattr(obj, prop))
+ # To match gather_vm_facts output
+ prop_name = prop
+ if prop.lower() == '_moid':
+ prop_name = 'moid'
+ elif prop.lower() == '_vimref':
+ prop_name = 'vimref'
+ result[prop_name] = result[prop]
+ except (AttributeError, KeyError):
+ self.module.fail_json(msg="Property '{0}' not found.".format(prop))
+ else:
+ result = self._jsonify(obj)
+ return result
+
+ def get_folder_path(self, cur):
+ full_path = '/' + cur.name
+ while hasattr(cur, 'parent') and cur.parent:
+ if cur.parent == self.content.rootFolder:
+ break
+ cur = cur.parent
+ full_path = '/' + cur.name + full_path
+ return full_path
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py
new file mode 120000
index 0000000000..f9993bfba7
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_mariadbconfiguration_facts.py
@@ -0,0 +1 @@
+azure_rm_mariadbconfiguration_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py
new file mode 120000
index 0000000000..b8293e64df
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_mariadbdatabase_facts.py
@@ -0,0 +1 @@
+azure_rm_mariadbdatabase_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py
new file mode 120000
index 0000000000..4311a0c1cc
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_mariadbfirewallrule_facts.py
@@ -0,0 +1 @@
+azure_rm_mariadbfirewallrule_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py b/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py
new file mode 120000
index 0000000000..5f76e0e932
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_mariadbserver_facts.py
@@ -0,0 +1 @@
+azure_rm_mariadbserver_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_resource_facts.py b/test/support/integration/plugins/modules/_azure_rm_resource_facts.py
new file mode 120000
index 0000000000..710fda1074
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_resource_facts.py
@@ -0,0 +1 @@
+azure_rm_resource_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py b/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py
new file mode 120000
index 0000000000..ead87c850b
--- /dev/null
+++ b/test/support/integration/plugins/modules/_azure_rm_webapp_facts.py
@@ -0,0 +1 @@
+azure_rm_webapp_info.py \ No newline at end of file
diff --git a/test/support/integration/plugins/modules/aws_az_info.py b/test/support/integration/plugins/modules/aws_az_info.py
new file mode 100644
index 0000000000..eccbf4d7d4
--- /dev/null
+++ b/test/support/integration/plugins/modules/aws_az_info.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'supported_by': 'community',
+ 'status': ['preview']
+}
+
+DOCUMENTATION = '''
+module: aws_az_info
+short_description: Gather information about availability zones in AWS.
+description:
+ - Gather information about availability zones in AWS.
+ - This module was called C(aws_az_facts) before Ansible 2.9. The usage did not change.
+version_added: '2.5'
+author: 'Henrique Rodrigues (@Sodki)'
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for
+ possible filters. Filter names and values are case sensitive. You can also use underscores
+ instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
+ required: false
+ default: {}
+ type: dict
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements: [botocore, boto3]
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all availability zones
+- aws_az_info:
+
+# Gather information about a single availability zone
+- aws_az_info:
+ filters:
+ zone-name: eu-west-1a
+'''
+
+RETURN = '''
+availability_zones:
+ returned: on success
+ description: >
+ Availability zones that match the provided filters. Each element consists of a dict with all the information
+ related to that available zone.
+ type: list
+ sample: "[
+ {
+ 'messages': [],
+ 'region_name': 'us-west-1',
+ 'state': 'available',
+ 'zone_name': 'us-west-1b'
+ },
+ {
+ 'messages': [],
+ 'region_name': 'us-west-1',
+ 'state': 'available',
+ 'zone_name': 'us-west-1c'
+ }
+ ]"
+'''
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def main():
+ argument_spec = dict(
+ filters=dict(default={}, type='dict')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ if module._name == 'aws_az_facts':
+ module.deprecate("The 'aws_az_facts' module has been renamed to 'aws_az_info'", version='2.14')
+
+ connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
+
+ # Replace filter key underscores with dashes, for compatibility
+ sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items())
+
+ try:
+ availability_zones = connection.describe_availability_zones(
+ Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to describe availability zones.")
+
+ # Turn the boto3 result into ansible_friendly_snaked_names
+ snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']]
+
+ module.exit_json(availability_zones=snaked_availability_zones)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/aws_codebuild.py b/test/support/integration/plugins/modules/aws_codebuild.py
new file mode 100644
index 0000000000..837e22e005
--- /dev/null
+++ b/test/support/integration/plugins/modules/aws_codebuild.py
@@ -0,0 +1,408 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: aws_codebuild
+short_description: Create or delete an AWS CodeBuild project
+notes:
+ - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html).
+description:
+ - Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code.
+version_added: "2.9"
+author:
+ - Stefan Horning (@stefanhorning) <horning@mediapeers.com>
+requirements: [ botocore, boto3 ]
+options:
+ name:
+ description:
+ - Name of the CodeBuild project.
+ required: true
+ type: str
+ description:
+ description:
+ - Descriptive text of the CodeBuild project.
+ type: str
+ source:
+ description:
+ - Configure service and location for the build input source.
+ required: true
+ suboptions:
+ type:
+ description:
+ - "The type of the source. Allows one of these: C(CODECOMMIT), C(CODEPIPELINE), C(GITHUB), C(S3), C(BITBUCKET), C(GITHUB_ENTERPRISE)."
+ required: true
+ type: str
+ location:
+ description:
+ - Information about the location of the source code to be built. For type CODEPIPELINE location should not be specified.
+ type: str
+ git_clone_depth:
+ description:
+ - When using git you can specify the clone depth as an integer here.
+ type: int
+ buildspec:
+ description:
+ - The build spec declaration to use for the builds in this build project. Leave empty if part of the code project.
+ type: str
+ insecure_ssl:
+ description:
+ - Enable this flag to ignore SSL warnings while connecting to the project source code.
+ type: bool
+ type: dict
+ artifacts:
+ description:
+ - Information about the build output artifacts for the build project.
+ required: true
+ suboptions:
+ type:
+ description:
+ - "The type of build output for artifacts. Can be one of the following: C(CODEPIPELINE), C(NO_ARTIFACTS), C(S3)."
+ required: true
+ location:
+ description:
+ - Information about the build output artifact location. When choosing type S3, set the bucket name here.
+ path:
+ description:
+ - Along with namespace_type and name, the pattern that AWS CodeBuild will use to name and store the output artifacts.
+ - Used for path in S3 bucket when type is C(S3).
+ namespace_type:
+ description:
+ - Along with path and name, the pattern that AWS CodeBuild will use to determine the name and location to store the output artifacts.
+ - Accepts C(BUILD_ID) and C(NONE).
+ - "See docs here: U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project)."
+ name:
+ description:
+ - Along with path and namespace_type, the pattern that AWS CodeBuild will use to name and store the output artifact.
+ packaging:
+ description:
+ - The type of build output artifact to create on S3, can be NONE for creating a folder or ZIP for a ZIP file.
+ type: dict
+ cache:
+ description:
+ - Caching params to speed up following builds.
+ suboptions:
+ type:
+ description:
+ - Cache type. Can be C(NO_CACHE) or C(S3).
+ required: true
+ location:
+ description:
+ - Caching location on S3.
+ required: true
+ type: dict
+ environment:
+ description:
+ - Information about the build environment for the build project.
+ suboptions:
+ type:
+ description:
+ - The type of build environment to use for the project. Usually C(LINUX_CONTAINER).
+ required: true
+ image:
+ description:
+ - The ID of the Docker image to use for this build project.
+ required: true
+ compute_type:
+ description:
+ - Information about the compute resources the build project will use.
+ - "Available values include: C(BUILD_GENERAL1_SMALL), C(BUILD_GENERAL1_MEDIUM), C(BUILD_GENERAL1_LARGE)."
+ required: true
+ environment_variables:
+ description:
+ - A set of environment variables to make available to builds for the build project. List of dictionaries with name and value fields.
+ - "Example: { name: 'MY_ENV_VARIABLE', value: 'test' }"
+ privileged_mode:
+ description:
+ - Enables running the Docker daemon inside a Docker container. Set to true only if the build project is be used to build Docker images.
+ type: dict
+ service_role:
+ description:
+ - The ARN of the AWS IAM role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
+ type: str
+ timeout_in_minutes:
+ description:
+ - How long CodeBuild should wait until timing out any build that has not been marked as completed.
+ default: 60
+ type: int
+ encryption_key:
+ description:
+ - The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.
+ type: str
+ tags:
+ description:
+ - A set of tags for the build project.
+ type: list
+ elements: dict
+ suboptions:
+ key:
+ description: The name of the Tag.
+ type: str
+ value:
+ description: The value of the Tag.
+ type: str
+ vpc_config:
+ description:
+ - The VPC config enables AWS CodeBuild to access resources in an Amazon VPC.
+ type: dict
+ state:
+ description:
+ - Create or remove code build project.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- aws_codebuild:
+ name: my_project
+ description: My nice little project
+ service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role"
+ source:
+ # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3
+ type: CODEPIPELINE
+ buildspec: ''
+ artifacts:
+ namespaceType: NONE
+ packaging: NONE
+ type: CODEPIPELINE
+ name: my_project
+ environment:
+ computeType: BUILD_GENERAL1_SMALL
+ privilegedMode: "true"
+ image: "aws/codebuild/docker:17.09.0"
+ type: LINUX_CONTAINER
+ environmentVariables:
+ - { name: 'PROFILE', value: 'staging' }
+ encryption_key: "arn:aws:kms:us-east-1:123123:alias/aws/s3"
+ region: us-east-1
+ state: present
+'''
+
+RETURN = '''
+project:
+ description: Returns the dictionary describing the code project configuration.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: Name of the CodeBuild project
+ returned: always
+ type: str
+ sample: my_project
+ arn:
+ description: ARN of the CodeBuild project
+ returned: always
+ type: str
+ sample: arn:aws:codebuild:us-east-1:123123123:project/vod-api-app-builder
+ description:
+ description: A description of the build project
+ returned: always
+ type: str
+ sample: My nice little project
+ source:
+ description: Information about the build input source code.
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of the repository
+ returned: always
+ type: str
+ sample: CODEPIPELINE
+ location:
+ description: Location identifier, depending on the source type.
+ returned: when configured
+ type: str
+ git_clone_depth:
+ description: The git clone depth
+ returned: when configured
+ type: int
+ build_spec:
+ description: The build spec declaration to use for the builds in this build project.
+ returned: always
+ type: str
+ auth:
+ description: Information about the authorization settings for AWS CodeBuild to access the source code to be built.
+ returned: when configured
+ type: complex
+ insecure_ssl:
+ description: True if set to ignore SSL warnings.
+ returned: when configured
+ type: bool
+ artifacts:
+ description: Information about the output of build artifacts
+ returned: always
+ type: complex
+ contains:
+ type:
+ description: The type of build artifact.
+ returned: always
+ type: str
+ sample: CODEPIPELINE
+ location:
+ description: Output location for build artifacts
+ returned: when configured
+ type: str
+ # and more... see http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project
+ cache:
+ description: Cache settings for the build project.
+ returned: when configured
+ type: dict
+ environment:
+ description: Environment settings for the build
+ returned: always
+ type: dict
+ service_role:
+ description: IAM role to be used during build to access other AWS services.
+ returned: always
+ type: str
+ sample: arn:aws:iam::123123123:role/codebuild-service-role
+ timeout_in_minutes:
+ description: The timeout of a build in minutes
+ returned: always
+ type: int
+ sample: 60
+ tags:
+ description: Tags added to the project
+ returned: when configured
+ type: list
+ created:
+ description: Timestamp of the create time of the project
+ returned: always
+ type: str
+ sample: "2018-04-17T16:56:03.245000+02:00"
+'''
+
+from ansible.module_utils.aws.core import AnsibleAWSModule, get_boto3_client_method_parameters
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+
+def create_or_update_project(client, params, module):
+ resp = {}
+ name = params['name']
+ # clean up params
+ formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None))
+ permitted_create_params = get_boto3_client_method_parameters(client, 'create_project')
+ permitted_update_params = get_boto3_client_method_parameters(client, 'update_project')
+
+ formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params)
+ formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params)
+
+ # Check if project with that name already exists and if so update existing:
+ found = describe_project(client=client, name=name, module=module)
+ changed = False
+
+ if 'name' in found:
+ found_project = found
+ resp = update_project(client=client, params=formatted_update_params, module=module)
+ updated_project = resp['project']
+
+ # Prep both dicts for sensible change comparison:
+ found_project.pop('lastModified')
+ updated_project.pop('lastModified')
+ if 'tags' not in updated_project:
+ updated_project['tags'] = []
+
+ if updated_project != found_project:
+ changed = True
+ return resp, changed
+ # Or create new project:
+ try:
+ resp = client.create_project(**formatted_create_params)
+ changed = True
+ return resp, changed
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to create CodeBuild project")
+
+
+def update_project(client, params, module):
+ name = params['name']
+
+ try:
+ resp = client.update_project(**params)
+ return resp
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update CodeBuild project")
+
+
+def delete_project(client, name, module):
+ found = describe_project(client=client, name=name, module=module)
+ changed = False
+ if 'name' in found:
+ # Mark as changed when a project with that name existed before calling delete
+ changed = True
+ try:
+ resp = client.delete_project(name=name)
+ return resp, changed
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to delete CodeBuild project")
+
+
+def describe_project(client, name, module):
+ project = {}
+ try:
+ projects = client.batch_get_projects(names=[name])['projects']
+ if len(projects) > 0:
+ project = projects[0]
+ return project
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to describe CodeBuild projects")
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(),
+ source=dict(required=True, type='dict'),
+ artifacts=dict(required=True, type='dict'),
+ cache=dict(type='dict'),
+ environment=dict(type='dict'),
+ service_role=dict(),
+ timeout_in_minutes=dict(type='int', default=60),
+ encryption_key=dict(),
+ tags=dict(type='list'),
+ vpc_config=dict(type='dict'),
+ state=dict(choices=['present', 'absent'], default='present')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+ client_conn = module.client('codebuild')
+
+ state = module.params.get('state')
+ changed = False
+
+ if state == 'present':
+ project_result, changed = create_or_update_project(
+ client=client_conn,
+ params=module.params,
+ module=module)
+ elif state == 'absent':
+ project_result, changed = delete_project(client=client_conn, name=module.params['name'], module=module)
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(project_result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/aws_s3.py b/test/support/integration/plugins/modules/aws_s3.py
new file mode 100644
index 0000000000..54874f05ce
--- /dev/null
+++ b/test/support/integration/plugins/modules/aws_s3.py
@@ -0,0 +1,925 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: aws_s3
+short_description: manage objects in S3.
+description:
+ - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and
+ deleting both objects and buckets, retrieving objects as files or strings and generating download links.
+ This module has a dependency on boto3 and botocore.
+notes:
+ - In 2.4, this module has been renamed from C(s3) into M(aws_s3).
+version_added: "1.1"
+options:
+ bucket:
+ description:
+ - Bucket name.
+ required: true
+ type: str
+ dest:
+ description:
+ - The destination file path when downloading an object/key with a GET operation.
+ version_added: "1.3"
+ type: path
+ encrypt:
+ description:
+ - When set for PUT mode, asks for server-side encryption.
+ default: true
+ version_added: "2.0"
+ type: bool
+ encryption_mode:
+ description:
+ - What encryption mode to use if I(encrypt=true).
+ default: AES256
+ choices:
+ - AES256
+ - aws:kms
+ version_added: "2.7"
+ type: str
+ expiry:
+ description:
+ - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation.
+ default: 600
+ aliases: ['expiration']
+ type: int
+ headers:
+ description:
+ - Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
+ version_added: "2.0"
+ type: dict
+ marker:
+ description:
+ - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
+ version_added: "2.0"
+ type: str
+ max_keys:
+ description:
+ - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
+ default: 1000
+ version_added: "2.0"
+ type: int
+ metadata:
+ description:
+ - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
+ version_added: "1.6"
+ type: dict
+ mode:
+ description:
+ - Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+),
+ getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket),
+ and delobj (delete object, Ansible 2.0+).
+ required: true
+ choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
+ type: str
+ object:
+ description:
+ - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
+ type: str
+ permission:
+ description:
+ - This option lets the user set the canned permissions on the object/bucket that are created.
+ The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or
+ C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read),
+ C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list.
+ default: ['private']
+ version_added: "2.0"
+ type: list
+ elements: str
+ prefix:
+ description:
+ - Limits the response to keys that begin with the specified prefix for list mode.
+ default: ""
+ version_added: "2.0"
+ type: str
+ version:
+ description:
+ - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
+ version_added: "2.0"
+ type: str
+ overwrite:
+ description:
+ - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
+ Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0.
+ When this is set to 'different', the md5 sum of the local file is compared with the 'ETag' of the object/key in S3.
+ The ETag may or may not be an MD5 digest of the object data. See the ETag response header here
+ U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html)
+ default: 'always'
+ aliases: ['force']
+ version_added: "1.2"
+ type: str
+ retries:
+ description:
+ - On recoverable failure, how many times to retry before actually failing.
+ default: 0
+ version_added: "2.0"
+ type: int
+ aliases: ['retry']
+ s3_url:
+ description:
+ - S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS.
+ aliases: [ S3_URL ]
+ type: str
+ dualstack:
+ description:
+ - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
+ - Requires at least botocore version 1.4.45.
+ type: bool
+ default: false
+ version_added: "2.7"
+ rgw:
+ description:
+ - Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url).
+ default: false
+ version_added: "2.2"
+ type: bool
+ src:
+ description:
+ - The source file path when performing a PUT operation.
+ version_added: "1.3"
+ type: str
+ ignore_nonexistent_bucket:
+ description:
+ - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the
+ GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying
+ I(ignore_nonexistent_bucket=true)."
+ version_added: "2.3"
+ type: bool
+ encryption_kms_key_id:
+ description:
+ - KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms)
+ version_added: "2.7"
+ type: str
+requirements: [ "boto3", "botocore" ]
+author:
+ - "Lester Wade (@lwade)"
+ - "Sloane Hertel (@s-hertel)"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+- name: Simple PUT operation
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+
+- name: Simple PUT operation in Ceph RGW S3
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ rgw: true
+ s3_url: "http://localhost:8000"
+
+- name: Simple GET operation
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Get a specific version of an object.
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ version: 48c9ee5131af7a716edc22df9772aa6f
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: PUT/upload with metadata
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
+
+- name: PUT/upload with custom headers
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ headers: 'x-amz-grant-full-control=emailAddress=owner@example.com'
+
+- name: List keys simple
+ aws_s3:
+ bucket: mybucket
+ mode: list
+
+- name: List keys all options
+ aws_s3:
+ bucket: mybucket
+ mode: list
+ prefix: /my/desired/
+ marker: /my/desired/0023.txt
+ max_keys: 472
+
+- name: Create an empty bucket
+ aws_s3:
+ bucket: mybucket
+ mode: create
+ permission: public-read
+
+- name: Create a bucket with key as directory, in the EU region
+ aws_s3:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+ region: eu-west-1
+
+- name: Delete a bucket and all contents
+ aws_s3:
+ bucket: mybucket
+ mode: delete
+
+- name: GET an object but don't download if the file checksums match. New in 2.0
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+ overwrite: different
+
+- name: Delete an object from a bucket
+ aws_s3:
+ bucket: mybucket
+ object: /my/desired/key.txt
+ mode: delobj
+'''
+
+RETURN = '''
+msg:
+ description: Message indicating the status of the operation.
+ returned: always
+ type: str
+ sample: PUT operation complete
+url:
+ description: URL of the object.
+ returned: (for put and geturl operations)
+ type: str
+ sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=<access-key>&Expires=1506888865&Signature=<signature>
+expiry:
+ description: Number of seconds the presigned url is valid for.
+ returned: (for geturl operation)
+ type: int
+ sample: 600
+contents:
+ description: Contents of the object as string.
+ returned: (for getstr operation)
+ type: str
+ sample: "Hello, world!"
+s3_keys:
+ description: List of object keys.
+ returned: (for list operation)
+ type: list
+ elements: str
+ sample:
+ - prefix1/
+ - prefix1/key1
+ - prefix1/key2
+'''
+
+import mimetypes
+import os
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ssl import SSLError
+from ansible.module_utils.basic import to_text, to_native
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.aws.s3 import calculate_etag, HAS_MD5
+from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
+
+try:
+ import botocore
+except ImportError:
+ pass # will be detected by imported AnsibleAWSModule
+
+IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented']
+
+
+class Sigv4Required(Exception):
+ pass
+
+
+def key_check(module, s3, bucket, obj, version=None, validate=True):
+ exists = True
+ try:
+ if version:
+ s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ s3.head_object(Bucket=bucket, Key=obj)
+ except botocore.exceptions.ClientError as e:
+ # if a client error is thrown, check if it's a 404 error
+ # if it's a 404 error, then the object does not exist
+ error_code = int(e.response['Error']['Code'])
+ if error_code == 404:
+ exists = False
+ elif error_code == 403 and validate is False:
+ pass
+ else:
+ module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
+ return exists
+
+
+def etag_compare(module, local_file, s3, bucket, obj, version=None):
+ s3_etag = get_etag(s3, bucket, obj, version=version)
+ local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
+
+ return s3_etag == local_etag
+
+
+def get_etag(s3, bucket, obj, version=None):
+ if version:
+ key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ key_check = s3.head_object(Bucket=bucket, Key=obj)
+ if not key_check:
+ return None
+ return key_check['ETag']
+
+
+def bucket_check(module, s3, bucket, validate=True):
+ exists = True
+ try:
+ s3.head_bucket(Bucket=bucket)
+ except botocore.exceptions.ClientError as e:
+ # If a client error is thrown, then check that it was a 404 error.
+ # If it was a 404 error, then the bucket does not exist.
+ error_code = int(e.response['Error']['Code'])
+ if error_code == 404:
+ exists = False
+ elif error_code == 403 and validate is False:
+ pass
+ else:
+ module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
+ except botocore.exceptions.EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided")
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
+ return exists
+
+
+def create_bucket(module, s3, bucket, location=None):
+ if module.check_mode:
+ module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True)
+ configuration = {}
+ if location not in ('us-east-1', None):
+ configuration['LocationConstraint'] = location
+ try:
+ if len(configuration) > 0:
+ s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration)
+ else:
+ s3.create_bucket(Bucket=bucket)
+ if module.params.get('permission'):
+ # Wait for the bucket to exist before setting ACLs
+ s3.get_waiter('bucket_exists').wait(Bucket=bucket)
+ for acl in module.params.get('permission'):
+ s3.put_bucket_acl(ACL=acl, Bucket=bucket)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
+ module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
+ else:
+ module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
+
+ if bucket:
+ return True
+
+
+def paginated_list(s3, **pagination_params):
+ pg = s3.get_paginator('list_objects_v2')
+ for page in pg.paginate(**pagination_params):
+ yield [data['Key'] for data in page.get('Contents', [])]
+
+
+def paginated_versioned_list_with_fallback(s3, **pagination_params):
+ try:
+ versioned_pg = s3.get_paginator('list_object_versions')
+ for page in versioned_pg.paginate(**pagination_params):
+ delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])]
+ current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])]
+ yield delete_markers + current_objects
+ except botocore.exceptions.ClientError as e:
+ if to_text(e.response['Error']['Code']) in IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']:
+ for page in paginated_list(s3, **pagination_params):
+ yield [{'Key': data['Key']} for data in page]
+
+
+def list_keys(module, s3, bucket, prefix, marker, max_keys):
+ pagination_params = {'Bucket': bucket}
+ for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)):
+ pagination_params[param_name] = param_value
+ try:
+ keys = sum(paginated_list(s3, **pagination_params), [])
+ module.exit_json(msg="LIST operation complete", s3_keys=keys)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket))
+
+
+def delete_bucket(module, s3, bucket):
+ if module.check_mode:
+ module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
+ try:
+ exists = bucket_check(module, s3, bucket)
+ if exists is False:
+ return False
+ # if there are contents then we need to delete them before we can delete the bucket
+ for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket):
+ if keys:
+ s3.delete_objects(Bucket=bucket, Delete={'Objects': keys})
+ s3.delete_bucket(Bucket=bucket)
+ return True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket)
+
+
+def delete_key(module, s3, bucket, obj):
+ if module.check_mode:
+ module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
+ try:
+ s3.delete_object(Bucket=bucket, Key=obj)
+ module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj)
+
+
+def create_dirkey(module, s3, bucket, obj, encrypt):
+ if module.check_mode:
+ module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
+ try:
+ params = {'Bucket': bucket, 'Key': obj, 'Body': b''}
+ if encrypt:
+ params['ServerSideEncryption'] = module.params['encryption_mode']
+ if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
+ params['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
+
+ s3.put_object(**params)
+ for acl in module.params.get('permission'):
+ s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
+ module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning")
+ else:
+ module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
+ module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), changed=True)
+
+
+def path_check(path):
+ if os.path.exists(path):
+ return True
+ else:
+ return False
+
+
+def option_in_extra_args(option):
+ temp_option = option.replace('-', '').lower()
+
+ allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition',
+ 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage',
+ 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl',
+ 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP',
+ 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption',
+ 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey',
+ 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'}
+
+ if temp_option in allowed_extra_args:
+ return allowed_extra_args[temp_option]
+
+
+def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
+ if module.check_mode:
+ module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
+ try:
+ extra = {}
+ if encrypt:
+ extra['ServerSideEncryption'] = module.params['encryption_mode']
+ if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
+ extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
+ if metadata:
+ extra['Metadata'] = {}
+
+ # determine object metadata and extra arguments
+ for option in metadata:
+ extra_args_option = option_in_extra_args(option)
+ if extra_args_option is not None:
+ extra[extra_args_option] = metadata[option]
+ else:
+ extra['Metadata'][option] = metadata[option]
+
+ if 'ContentType' not in extra:
+ content_type = mimetypes.guess_type(src)[0]
+ if content_type is None:
+ # s3 default content type
+ content_type = 'binary/octet-stream'
+ extra['ContentType'] = content_type
+
+ s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to complete PUT operation.")
+ try:
+ for acl in module.params.get('permission'):
+ s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
+ module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
+ else:
+ module.fail_json_aws(e, msg="Unable to set object ACL")
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Unable to set object ACL")
+ try:
+ url = s3.generate_presigned_url(ClientMethod='put_object',
+ Params={'Bucket': bucket, 'Key': obj},
+ ExpiresIn=expiry)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to generate presigned URL")
+ module.exit_json(msg="PUT operation complete", url=url, changed=True)
+
+
+def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
+ if module.check_mode:
+ module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
+ # retries is the number of loops; range/xrange needs to be one
+ # more to get that count of loops.
+ try:
+ if version:
+ key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version)
+ else:
+ key = s3.get_object(Bucket=bucket, Key=obj)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
+ raise Sigv4Required()
+ elif e.response['Error']['Code'] not in ("403", "404"):
+ # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
+ # user does not have the s3:GetObject permission. 404 errors are handled by download_file().
+ module.fail_json_aws(e, msg="Could not find the key %s." % obj)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Could not find the key %s." % obj)
+
+ optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {}
+ for x in range(0, retries + 1):
+ try:
+ s3.download_file(bucket, obj, dest, **optional_kwargs)
+ module.exit_json(msg="GET operation complete", changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ # actually fail on last pass through the loop.
+ if x >= retries:
+ module.fail_json_aws(e, msg="Failed while downloading %s." % obj)
+ # otherwise, try again, this may be a transient timeout.
+ except SSLError as e: # will ClientError catch SSLError?
+ # actually fail on last pass through the loop.
+ if x >= retries:
+ module.fail_json_aws(e, msg="s3 download failed")
+ # otherwise, try again, this may be a transient timeout.
+
+
+def download_s3str(module, s3, bucket, obj, version=None, validate=True):
+ if module.check_mode:
+ module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
+ try:
+ if version:
+ contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read())
+ else:
+ contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read())
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
+ raise Sigv4Required()
+ else:
+ module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
+
+
+def get_download_url(module, s3, bucket, obj, expiry, changed=True):
+ try:
+ url = s3.generate_presigned_url(ClientMethod='get_object',
+ Params={'Bucket': bucket, 'Key': obj},
+ ExpiresIn=expiry)
+ module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed while getting download url.")
+
+
+def is_fakes3(s3_url):
+ """ Return True if s3_url has scheme fakes3:// """
+ if s3_url is not None:
+ return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False):
+ if s3_url and rgw: # TODO - test this
+ rgw = urlparse(s3_url)
+ params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ elif is_fakes3(s3_url):
+ fakes3 = urlparse(s3_url)
+ port = fakes3.port
+ if fakes3.scheme == 'fakes3s':
+ protocol = "https"
+ if port is None:
+ port = 443
+ else:
+ protocol = "http"
+ if port is None:
+ port = 80
+ params = dict(module=module, conn_type='client', resource='s3', region=location,
+ endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
+ use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
+ else:
+ params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms':
+ params['config'] = botocore.client.Config(signature_version='s3v4')
+ elif module.params['mode'] in ('get', 'getstr') and sig_4:
+ params['config'] = botocore.client.Config(signature_version='s3v4')
+ if module.params['dualstack']:
+ dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True})
+ if 'config' in params:
+ params['config'] = params['config'].merge(dualconf)
+ else:
+ params['config'] = dualconf
+ return boto3_conn(**params)
+
+
+def main():
+ argument_spec = dict(
+ bucket=dict(required=True),
+ dest=dict(default=None, type='path'),
+ encrypt=dict(default=True, type='bool'),
+ encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
+ expiry=dict(default=600, type='int', aliases=['expiration']),
+ headers=dict(type='dict'),
+ marker=dict(default=""),
+ max_keys=dict(default=1000, type='int'),
+ metadata=dict(type='dict'),
+ mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
+ object=dict(),
+ permission=dict(type='list', default=['private']),
+ version=dict(default=None),
+ overwrite=dict(aliases=['force'], default='always'),
+ prefix=dict(default=""),
+ retries=dict(aliases=['retry'], type='int', default=0),
+ s3_url=dict(aliases=['S3_URL']),
+ dualstack=dict(default='no', type='bool'),
+ rgw=dict(default='no', type='bool'),
+ src=dict(),
+ ignore_nonexistent_bucket=dict(default=False, type='bool'),
+ encryption_kms_key_id=dict()
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[['mode', 'put', ['src', 'object']],
+ ['mode', 'get', ['dest', 'object']],
+ ['mode', 'getstr', ['object']],
+ ['mode', 'geturl', ['object']]],
+ )
+
+ bucket = module.params.get('bucket')
+ encrypt = module.params.get('encrypt')
+ expiry = module.params.get('expiry')
+ dest = module.params.get('dest', '')
+ headers = module.params.get('headers')
+ marker = module.params.get('marker')
+ max_keys = module.params.get('max_keys')
+ metadata = module.params.get('metadata')
+ mode = module.params.get('mode')
+ obj = module.params.get('object')
+ version = module.params.get('version')
+ overwrite = module.params.get('overwrite')
+ prefix = module.params.get('prefix')
+ retries = module.params.get('retries')
+ s3_url = module.params.get('s3_url')
+ dualstack = module.params.get('dualstack')
+ rgw = module.params.get('rgw')
+ src = module.params.get('src')
+ ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')
+
+ object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
+ bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]
+
+ if overwrite not in ['always', 'never', 'different']:
+ if module.boolean(overwrite):
+ overwrite = 'always'
+ else:
+ overwrite = 'never'
+
+ if overwrite == 'different' and not HAS_MD5:
+ module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+
+ if region in ('us-east-1', '', None):
+ # default to US Standard region
+ location = 'us-east-1'
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+
+ if module.params.get('object'):
+ obj = module.params['object']
+ # If there is a top level object, do nothing - if the object starts with /
+ # remove the leading character to maintain compatibility with Ansible versions < 2.4
+ if obj.startswith('/'):
+ obj = obj[1:]
+
+ # Bucket deletion does not require obj. Prevents ambiguity with delobj.
+ if obj and mode == "delete":
+ module.fail_json(msg='Parameter obj cannot be used with mode=delete')
+
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not s3_url and 'S3_URL' in os.environ:
+ s3_url = os.environ['S3_URL']
+
+ if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
+ module.fail_json(msg='dualstack only applies to AWS S3')
+
+ if dualstack and not module.botocore_at_least('1.4.45'):
+ module.fail_json(msg='dualstack requires botocore >= 1.4.45')
+
+ # rgw requires an explicit url
+ if rgw and not s3_url:
+ module.fail_json(msg='rgw flavour requires s3_url')
+
+ # Look at s3_url and tweak connection settings
+ # if connecting to RGW, Walrus or fakes3
+ if s3_url:
+ for key in ['validate_certs', 'security_token', 'profile_name']:
+ aws_connect_kwargs.pop(key, None)
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)
+
+ validate = not ignore_nonexistent_bucket
+
+ # separate types of ACLs
+ bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
+ object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
+ error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
+ if error_acl:
+ module.fail_json(msg='Unknown permission specified: %s' % error_acl)
+
+ # First, we check to see if the bucket exists, we get "bucket" returned.
+ bucketrtn = bucket_check(module, s3, bucket, validate=validate)
+
+ if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
+ module.fail_json(msg="Source bucket cannot be found.")
+
+ if mode == 'get':
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn is False:
+ if version:
+ module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ if path_check(dest) and overwrite != 'always':
+ if overwrite == 'never':
+ module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
+ if etag_compare(module, dest, s3, bucket, obj, version=version):
+ module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
+
+ try:
+ download_s3file(module, s3, bucket, obj, dest, retries, version=version)
+ except Sigv4Required:
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
+ download_s3file(module, s3, bucket, obj, dest, retries, version=version)
+
+ if mode == 'put':
+
+ # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
+ # these were separated into the variables bucket_acl and object_acl above
+
+ if not path_check(src):
+ module.fail_json(msg="Local object for PUT does not exist")
+
+ if bucketrtn:
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ else:
+ # If the bucket doesn't exist we should create it.
+ # only use valid bucket acls for create_bucket function
+ module.params['permission'] = bucket_acl
+ create_bucket(module, s3, bucket, location)
+
+ if keyrtn and overwrite != 'always':
+ if overwrite == 'never' or etag_compare(module, src, s3, bucket, obj):
+ # Return the download URL for the existing object
+ get_download_url(module, s3, bucket, obj, expiry, changed=False)
+
+ # only use valid object acls for the upload_s3file function
+ module.params['permission'] = object_acl
+ upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
+
+ # Delete an object from a bucket, not the entire bucket
+ if mode == 'delobj':
+ if obj is None:
+ module.fail_json(msg="object parameter is required")
+ if bucket:
+ deletertn = delete_key(module, s3, bucket, obj)
+ if deletertn is True:
+ module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
+ else:
+ module.fail_json(msg="Bucket parameter is required.")
+
+ # Delete an entire bucket, including all objects in the bucket
+ if mode == 'delete':
+ if bucket:
+ deletertn = delete_bucket(module, s3, bucket)
+ if deletertn is True:
+ module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
+ else:
+ module.fail_json(msg="Bucket parameter is required.")
+
+ # Support for listing a set of keys
+ if mode == 'list':
+ exists = bucket_check(module, s3, bucket)
+
+ # If the bucket does not exist then bail out
+ if not exists:
+ module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)
+
+ list_keys(module, s3, bucket, prefix, marker, max_keys)
+
+ # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
+ # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
+ if mode == 'create':
+
+ # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
+ # these were separated above into the variables bucket_acl and object_acl
+
+ if bucket and not obj:
+ if bucketrtn:
+ module.exit_json(msg="Bucket already exists.", changed=False)
+ else:
+ # only use valid bucket acls when creating the bucket
+ module.params['permission'] = bucket_acl
+ module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
+ if bucket and obj:
+ if obj.endswith('/'):
+ dirobj = obj
+ else:
+ dirobj = obj + "/"
+ if bucketrtn:
+ if key_check(module, s3, bucket, dirobj):
+ module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
+ else:
+ # setting valid object acls for the create_dirkey function
+ module.params['permission'] = object_acl
+ create_dirkey(module, s3, bucket, dirobj, encrypt)
+ else:
+ # only use valid bucket acls for the create_bucket function
+ module.params['permission'] = bucket_acl
+ created = create_bucket(module, s3, bucket, location)
+ # only use valid object acls for the create_dirkey function
+ module.params['permission'] = object_acl
+ create_dirkey(module, s3, bucket, dirobj, encrypt)
+
+ # Support for grabbing the time-expired URL for an object in S3/Walrus.
+ if mode == 'geturl':
+ if not bucket and not obj:
+ module.fail_json(msg="Bucket and Object parameters must be set")
+
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn:
+ get_download_url(module, s3, bucket, obj, expiry)
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ if mode == 'getstr':
+ if bucket and obj:
+ keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
+ if keyrtn:
+ try:
+ download_s3str(module, s3, bucket, obj, version=version)
+ except Sigv4Required:
+ s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
+ download_s3str(module, s3, bucket, obj, version=version)
+ elif version is not None:
+ module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
+ else:
+ module.fail_json(msg="Key %s does not exist." % obj)
+
+ module.exit_json(failed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/aws_step_functions_state_machine.py b/test/support/integration/plugins/modules/aws_step_functions_state_machine.py
new file mode 100644
index 0000000000..329ee4283d
--- /dev/null
+++ b/test/support/integration/plugins/modules/aws_step_functions_state_machine.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# Copyright (c) 2019, Tom De Keyser (@tdekeyser)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: aws_step_functions_state_machine
+
+short_description: Manage AWS Step Functions state machines
+
+version_added: "2.10"
+
+description:
+ - Create, update and delete state machines in AWS Step Functions.
+ - Calling the module in C(state=present) for an existing AWS Step Functions state machine
+ will attempt to update the state machine definition, IAM Role, or tags with the provided data.
+
+options:
+ name:
+ description:
+ - Name of the state machine
+ required: true
+ type: str
+ definition:
+ description:
+ - The Amazon States Language definition of the state machine. See
+ U(https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) for more
+ information on the Amazon States Language.
+ - "This parameter is required when C(state=present)."
+ type: json
+ role_arn:
+ description:
+ - The ARN of the IAM Role that will be used by the state machine for its executions.
+ - "This parameter is required when C(state=present)."
+ type: str
+ state:
+ description:
+ - Desired state for the state machine
+ default: present
+ choices: [ present, absent ]
+ type: str
+ tags:
+ description:
+ - A hash/dictionary of tags to add to the new state machine or to add/remove from an existing one.
+ type: dict
+ purge_tags:
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter.
+ If the I(tags) parameter is not set then tags will not be modified.
+ default: yes
+ type: bool
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+
+author:
+ - Tom De Keyser (@tdekeyser)
+'''
+
+EXAMPLES = '''
+# Create a new AWS Step Functions state machine
+- name: Setup HelloWorld state machine
+ aws_step_functions_state_machine:
+ name: "HelloWorldStateMachine"
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: arn:aws:iam::987654321012:role/service-role/invokeLambdaStepFunctionsRole
+ tags:
+ project: helloWorld
+
+# Update an existing state machine
+- name: Change IAM Role and tags of HelloWorld state machine
+ aws_step_functions_state_machine:
+ name: HelloWorldStateMachine
+ definition: "{{ lookup('file','state_machine.json') }}"
+ role_arn: arn:aws:iam::987654321012:role/service-role/anotherStepFunctionsRole
+ tags:
+ otherTag: aDifferentTag
+
+# Remove the AWS Step Functions state machine
+- name: Delete HelloWorld state machine
+ aws_step_functions_state_machine:
+ name: HelloWorldStateMachine
+ state: absent
+'''
+
+RETURN = '''
+state_machine_arn:
+ description: ARN of the AWS Step Functions state machine
+ type: str
+ returned: always
+'''
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, compare_aws_tags, boto3_tag_list_to_ansible_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def manage_state_machine(state, sfn_client, module):
+ state_machine_arn = get_state_machine_arn(sfn_client, module)
+
+ if state == 'present':
+ if state_machine_arn is None:
+ create(sfn_client, module)
+ else:
+ update(state_machine_arn, sfn_client, module)
+ elif state == 'absent':
+ if state_machine_arn is not None:
+ remove(state_machine_arn, sfn_client, module)
+
+ check_mode(module, msg='State is up-to-date.')
+ module.exit_json(changed=False)
+
+
+def create(sfn_client, module):
+ check_mode(module, msg='State machine would be created.', changed=True)
+
+ tags = module.params.get('tags')
+ sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') if tags else []
+
+ state_machine = sfn_client.create_state_machine(
+ name=module.params.get('name'),
+ definition=module.params.get('definition'),
+ roleArn=module.params.get('role_arn'),
+ tags=sfn_tags
+ )
+ module.exit_json(changed=True, state_machine_arn=state_machine.get('stateMachineArn'))
+
+
+def remove(state_machine_arn, sfn_client, module):
+ check_mode(module, msg='State machine would be deleted: {0}'.format(state_machine_arn), changed=True)
+
+ sfn_client.delete_state_machine(stateMachineArn=state_machine_arn)
+ module.exit_json(changed=True, state_machine_arn=state_machine_arn)
+
+
+def update(state_machine_arn, sfn_client, module):
+ tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module)
+
+ if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove:
+ check_mode(module, msg='State machine would be updated: {0}'.format(state_machine_arn), changed=True)
+
+ sfn_client.update_state_machine(
+ stateMachineArn=state_machine_arn,
+ definition=module.params.get('definition'),
+ roleArn=module.params.get('role_arn')
+ )
+ sfn_client.untag_resource(
+ resourceArn=state_machine_arn,
+ tagKeys=tags_to_remove
+ )
+ sfn_client.tag_resource(
+ resourceArn=state_machine_arn,
+ tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name='key', tag_value_key_name='value')
+ )
+
+ module.exit_json(changed=True, state_machine_arn=state_machine_arn)
+
+
+def compare_tags(state_machine_arn, sfn_client, module):
+ new_tags = module.params.get('tags')
+ current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get('tags')
+ return compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get('purge_tags'))
+
+
+def params_changed(state_machine_arn, sfn_client, module):
+ """
+ Check whether the state machine definition or IAM Role ARN is different
+ from the existing state machine parameters.
+ """
+ current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn)
+ return current.get('definition') != module.params.get('definition') or current.get('roleArn') != module.params.get('role_arn')
+
+
+def get_state_machine_arn(sfn_client, module):
+ """
+ Finds the state machine ARN based on the name parameter. Returns None if
+ there is no state machine with this name.
+ """
+ target_name = module.params.get('name')
+ all_state_machines = sfn_client.list_state_machines(aws_retry=True).get('stateMachines')
+
+ for state_machine in all_state_machines:
+ if state_machine.get('name') == target_name:
+ return state_machine.get('stateMachineArn')
+
+
+def check_mode(module, msg='', changed=False):
+ if module.check_mode:
+ module.exit_json(changed=changed, output=msg)
+
+
+def main():
+ module_args = dict(
+ name=dict(type='str', required=True),
+ definition=dict(type='json'),
+ role_arn=dict(type='str'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ tags=dict(default=None, type='dict'),
+ purge_tags=dict(default=True, type='bool'),
+ )
+ module = AnsibleAWSModule(
+ argument_spec=module_args,
+ required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])],
+ supports_check_mode=True
+ )
+
+ sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5))
+ state = module.params.get('state')
+
+ try:
+ manage_state_machine(state, sfn_client, module)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg='Failed to manage state machine')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/aws_step_functions_state_machine_execution.py b/test/support/integration/plugins/modules/aws_step_functions_state_machine_execution.py
new file mode 100644
index 0000000000..a6e0d7182d
--- /dev/null
+++ b/test/support/integration/plugins/modules/aws_step_functions_state_machine_execution.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# Copyright (c) 2019, Prasad Katti (@prasadkatti)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+---
+module: aws_step_functions_state_machine_execution
+
+short_description: Start or stop execution of an AWS Step Functions state machine.
+
+version_added: "2.10"
+
+description:
+ - Start or stop execution of a state machine in AWS Step Functions.
+
+options:
+ action:
+ description: Desired action (start or stop) for a state machine execution.
+ default: start
+ choices: [ start, stop ]
+ type: str
+ name:
+ description: Name of the execution.
+ type: str
+ execution_input:
+ description: The JSON input data for the execution.
+ type: json
+ default: {}
+ state_machine_arn:
+ description: The ARN of the state machine that will be executed.
+ type: str
+ execution_arn:
+ description: The ARN of the execution you wish to stop.
+ type: str
+ cause:
+ description: A detailed explanation of the cause for stopping the execution.
+ type: str
+ default: ''
+ error:
+ description: The error code of the failure to pass in when stopping the execution.
+ type: str
+ default: ''
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+
+author:
+ - Prasad Katti (@prasadkatti)
+'''
+
+EXAMPLES = '''
+- name: Start an execution of a state machine
+ aws_step_functions_state_machine_execution:
+ name: an_execution_name
+ execution_input: '{ "IsHelloWorldExample": true }'
+ state_machine_arn: "arn:aws:states:us-west-2:682285639423:stateMachine:HelloWorldStateMachine"
+
+- name: Stop an execution of a state machine
+ aws_step_functions_state_machine_execution:
+ action: stop
+ execution_arn: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
+ cause: "cause of task failure"
+ error: "error code of the failure"
+'''
+
+RETURN = '''
+execution_arn:
+ description: ARN of the AWS Step Functions state machine execution.
+ type: str
+ returned: if action == start and changed == True
+ sample: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
+start_date:
+ description: The date the execution is started.
+ type: str
+ returned: if action == start and changed == True
+ sample: "2019-11-02T22:39:49.071000-07:00"
+stop_date:
+ description: The date the execution is stopped.
+ type: str
+ returned: if action == stop
+ sample: "2019-11-02T22:39:49.071000-07:00"
+'''
+
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def start_execution(module, sfn_client):
+ '''
+ start_execution uses execution name to determine if a previous execution already exists.
+ If an execution by the provided name exists, call client.start_execution will not be called.
+ '''
+
+ state_machine_arn = module.params.get('state_machine_arn')
+ name = module.params.get('name')
+ execution_input = module.params.get('execution_input')
+
+ try:
+ # list_executions is eventually consistent
+ page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn)
+
+ for execution in page_iterators.build_full_result()['executions']:
+ if name == execution['name']:
+ check_mode(module, msg='State machine execution already exists.', changed=False)
+ module.exit_json(changed=False)
+
+ check_mode(module, msg='State machine execution would be started.', changed=True)
+ res_execution = sfn_client.start_execution(
+ stateMachineArn=state_machine_arn,
+ name=name,
+ input=execution_input
+ )
+ except (ClientError, BotoCoreError) as e:
+ if e.response['Error']['Code'] == 'ExecutionAlreadyExists':
+ # this will never be executed anymore
+ module.exit_json(changed=False)
+ module.fail_json_aws(e, msg="Failed to start execution.")
+
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution))
+
+
+def stop_execution(module, sfn_client):
+
+ cause = module.params.get('cause')
+ error = module.params.get('error')
+ execution_arn = module.params.get('execution_arn')
+
+ try:
+ # describe_execution is eventually consistent
+ execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status']
+ if execution_status != 'RUNNING':
+ check_mode(module, msg='State machine execution is not running.', changed=False)
+ module.exit_json(changed=False)
+
+ check_mode(module, msg='State machine execution would be stopped.', changed=True)
+ res = sfn_client.stop_execution(
+ executionArn=execution_arn,
+ cause=cause,
+ error=error
+ )
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to stop execution.")
+
+ module.exit_json(changed=True, **camel_dict_to_snake_dict(res))
+
+
+def check_mode(module, msg='', changed=False):
+ if module.check_mode:
+ module.exit_json(changed=changed, output=msg)
+
+
+def main():
+ module_args = dict(
+ action=dict(choices=['start', 'stop'], default='start'),
+ name=dict(type='str'),
+ execution_input=dict(type='json', default={}),
+ state_machine_arn=dict(type='str'),
+ cause=dict(type='str', default=''),
+ error=dict(type='str', default=''),
+ execution_arn=dict(type='str')
+ )
+ module = AnsibleAWSModule(
+ argument_spec=module_args,
+ required_if=[('action', 'start', ['name', 'state_machine_arn']),
+ ('action', 'stop', ['execution_arn']),
+ ],
+ supports_check_mode=True
+ )
+
+ sfn_client = module.client('stepfunctions')
+
+ action = module.params.get('action')
+ if action == "start":
+ start_execution(module, sfn_client)
+ else:
+ stop_execution(module, sfn_client)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_appserviceplan.py b/test/support/integration/plugins/modules/azure_rm_appserviceplan.py
new file mode 100644
index 0000000000..ee871c352b
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_appserviceplan.py
@@ -0,0 +1,379 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_appserviceplan
+version_added: "2.7"
+short_description: Manage App Service Plan
+description:
+ - Create, update and delete instance of App Service Plan.
+
+options:
+ resource_group:
+ description:
+ - Name of the resource group to which the resource belongs.
+ required: True
+
+ name:
+ description:
+ - Unique name of the app service plan to create or update.
+ required: True
+
+ location:
+ description:
+ - Resource location. If not set, location from the resource group will be used as default.
+
+ sku:
+ description:
+ - The pricing tiers, e.g., C(F1), C(D1), C(B1), C(B2), C(B3), C(S1), C(P1), C(P1V2) etc.
+ - Please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/plans/) for more detail.
+ - For Linux app service plan, please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/) for more detail.
+ is_linux:
+ description:
+ - Describe whether to host webapp on Linux worker.
+ type: bool
+ default: false
+
+ number_of_workers:
+ description:
+ - Describe number of workers to be allocated.
+
+ state:
+ description:
+ - Assert the state of the app service plan.
+ - Use C(present) to create or update an app service plan and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Yunge Zhu (@yungezz)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a windows app service plan
+ azure_rm_appserviceplan:
+ resource_group: myResourceGroup
+ name: myAppPlan
+ location: eastus
+ sku: S1
+
+ - name: Create a linux app service plan
+ azure_rm_appserviceplan:
+ resource_group: myResourceGroup
+ name: myAppPlan
+ location: eastus
+ sku: S1
+ is_linux: true
+ number_of_workers: 1
+
+ - name: update sku of existing windows app service plan
+ azure_rm_appserviceplan:
+ resource_group: myResourceGroup
+ name: myAppPlan
+ location: eastus
+ sku: S2
+'''
+
+RETURN = '''
+azure_appserviceplan:
+ description: Facts about the current state of the app service plan.
+ returned: always
+ type: dict
+ sample: {
+ "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppPlan"
+ }
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrestazure.azure_operation import AzureOperationPoller
+ from msrest.serialization import Model
+ from azure.mgmt.web.models import (
+ app_service_plan, AppServicePlan, SkuDescription
+ )
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+def _normalize_sku(sku):
+ if sku is None:
+ return sku
+
+ sku = sku.upper()
+ if sku == 'FREE':
+ return 'F1'
+ elif sku == 'SHARED':
+ return 'D1'
+ return sku
+
+
+def get_sku_name(tier):
+ tier = tier.upper()
+ if tier == 'F1' or tier == "FREE":
+ return 'FREE'
+ elif tier == 'D1' or tier == "SHARED":
+ return 'SHARED'
+ elif tier in ['B1', 'B2', 'B3', 'BASIC']:
+ return 'BASIC'
+ elif tier in ['S1', 'S2', 'S3']:
+ return 'STANDARD'
+ elif tier in ['P1', 'P2', 'P3']:
+ return 'PREMIUM'
+ elif tier in ['P1V2', 'P2V2', 'P3V2']:
+ return 'PREMIUMV2'
+ else:
+ return None
+
+
+def appserviceplan_to_dict(plan):
+ return dict(
+ id=plan.id,
+ name=plan.name,
+ kind=plan.kind,
+ location=plan.location,
+ reserved=plan.reserved,
+ is_linux=plan.reserved,
+ provisioning_state=plan.provisioning_state,
+ status=plan.status,
+ target_worker_count=plan.target_worker_count,
+ sku=dict(
+ name=plan.sku.name,
+ size=plan.sku.size,
+ tier=plan.sku.tier,
+ family=plan.sku.family,
+ capacity=plan.sku.capacity
+ ),
+ resource_group=plan.resource_group,
+ number_of_sites=plan.number_of_sites,
+ tags=plan.tags if plan.tags else None
+ )
+
+
+class AzureRMAppServicePlans(AzureRMModuleBase):
+ """Configuration class for an Azure RM App Service Plan resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ location=dict(
+ type='str'
+ ),
+ sku=dict(
+ type='str'
+ ),
+ is_linux=dict(
+ type='bool',
+ default=False
+ ),
+ number_of_workers=dict(
+ type='str'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.name = None
+ self.location = None
+
+ self.sku = None
+ self.is_linux = None
+ self.number_of_workers = 1
+
+ self.tags = None
+
+ self.results = dict(
+ changed=False,
+ ansible_facts=dict(azure_appserviceplan=None)
+ )
+ self.state = None
+
+ super(AzureRMAppServicePlans, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=True)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if kwargs[key]:
+ setattr(self, key, kwargs[key])
+
+ old_response = None
+ response = None
+ to_be_updated = False
+
+ # set location
+ resource_group = self.get_resource_group(self.resource_group)
+ if not self.location:
+ self.location = resource_group.location
+
+ # get app service plan
+ old_response = self.get_plan()
+
+ # if not existing
+ if not old_response:
+ self.log("App Service plan doesn't exist")
+
+ if self.state == "present":
+ to_be_updated = True
+
+ if not self.sku:
+ self.fail('Please specify sku in plan when creation')
+
+ else:
+ # existing app service plan, do update
+ self.log("App Service Plan already exists")
+
+ if self.state == 'present':
+ self.log('Result: {0}'.format(old_response))
+
+ update_tags, newtags = self.update_tags(old_response.get('tags', dict()))
+
+ if update_tags:
+ to_be_updated = True
+ self.tags = newtags
+
+ # check if sku changed
+ if self.sku and _normalize_sku(self.sku) != old_response['sku']['size']:
+ to_be_updated = True
+
+ # check if number_of_workers changed
+ if self.number_of_workers and int(self.number_of_workers) != old_response['sku']['capacity']:
+ to_be_updated = True
+
+ if self.is_linux and self.is_linux != old_response['reserved']:
+ self.fail("Operation not allowed: cannot update reserved of app service plan.")
+
+ if old_response:
+ self.results['id'] = old_response['id']
+
+ if to_be_updated:
+ self.log('Need to Create/Update app service plan')
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ response = self.create_or_update_plan()
+ self.results['id'] = response['id']
+
+ if self.state == 'absent' and old_response:
+ self.log("Delete app service plan")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_plan()
+
+ self.log('App service plan instance deleted')
+
+ return self.results
+
+ def get_plan(self):
+ '''
+ Gets app service plan
+ :return: deserialized app service plan dictionary
+ '''
+ self.log("Get App Service Plan {0}".format(self.name))
+
+ try:
+ response = self.web_client.app_service_plans.get(self.resource_group, self.name)
+ if response:
+ self.log("Response : {0}".format(response))
+ self.log("App Service Plan : {0} found".format(response.name))
+
+ return appserviceplan_to_dict(response)
+ except CloudError as ex:
+ self.log("Didn't find app service plan {0} in resource group {1}".format(self.name, self.resource_group))
+
+ return False
+
+ def create_or_update_plan(self):
+ '''
+ Creates app service plan
+ :return: deserialized app service plan dictionary
+ '''
+ self.log("Create App Service Plan {0}".format(self.name))
+
+ try:
+ # normalize sku
+ sku = _normalize_sku(self.sku)
+
+ sku_def = SkuDescription(tier=get_sku_name(
+ sku), name=sku, capacity=self.number_of_workers)
+ plan_def = AppServicePlan(
+ location=self.location, app_service_plan_name=self.name, sku=sku_def, reserved=self.is_linux, tags=self.tags if self.tags else None)
+
+ response = self.web_client.app_service_plans.create_or_update(self.resource_group, self.name, plan_def)
+
+ if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
+ response = self.get_poller_result(response)
+
+ self.log("Response : {0}".format(response))
+
+ return appserviceplan_to_dict(response)
+ except CloudError as ex:
+ self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex)))
+
+ def delete_plan(self):
+ '''
+ Deletes specified App service plan in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the App service plan {0}".format(self.name))
+ try:
+ response = self.web_client.app_service_plans.delete(resource_group_name=self.resource_group,
+ name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete App service plan.')
+ self.fail(
+ "Error deleting the App service plan : {0}".format(str(e)))
+
+ return True
+
+
+def main():
+ """Main execution"""
+ AzureRMAppServicePlans()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_functionapp.py b/test/support/integration/plugins/modules/azure_rm_functionapp.py
new file mode 100644
index 0000000000..0c372a88de
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_functionapp.py
@@ -0,0 +1,421 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Thomas Stringer <tomstr@microsoft.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_functionapp
+version_added: "2.4"
+short_description: Manage Azure Function Apps
+description:
+ - Create, update or delete an Azure Function App.
+options:
+ resource_group:
+ description:
+ - Name of resource group.
+ required: true
+ aliases:
+ - resource_group_name
+ name:
+ description:
+ - Name of the Azure Function App.
+ required: true
+ location:
+ description:
+ - Valid Azure location. Defaults to location of the resource group.
+ plan:
+ description:
+ - App service plan.
+ - It can be name of existing app service plan in same resource group as function app.
+ - It can be resource id of existing app service plan.
+ - Resource id. For example /subscriptions/<subs_id>/resourceGroups/<resource_group>/providers/Microsoft.Web/serverFarms/<plan_name>.
+ - It can be a dict which contains C(name), C(resource_group).
+ - C(name). Name of app service plan.
+ - C(resource_group). Resource group name of app service plan.
+ version_added: "2.8"
+ container_settings:
+ description: Web app container settings.
+ suboptions:
+ name:
+ description:
+ - Name of container. For example "imagename:tag".
+ registry_server_url:
+ description:
+ - Container registry server url. For example C(mydockerregistry.io).
+ registry_server_user:
+ description:
+ - The container registry server user name.
+ registry_server_password:
+ description:
+ - The container registry server password.
+ version_added: "2.8"
+ storage_account:
+ description:
+ - Name of the storage account to use.
+ required: true
+ aliases:
+ - storage
+ - storage_account_name
+ app_settings:
+ description:
+ - Dictionary containing application settings.
+ state:
+ description:
+ - Assert the state of the Function App. Use C(present) to create or update a Function App and C(absent) to delete.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Thomas Stringer (@trstringer)
+'''
+
+EXAMPLES = '''
+- name: Create a function app
+ azure_rm_functionapp:
+ resource_group: myResourceGroup
+ name: myFunctionApp
+ storage_account: myStorageAccount
+
+- name: Create a function app with app settings
+ azure_rm_functionapp:
+ resource_group: myResourceGroup
+ name: myFunctionApp
+ storage_account: myStorageAccount
+ app_settings:
+ setting1: value1
+ setting2: value2
+
+- name: Create container based function app
+ azure_rm_functionapp:
+ resource_group: myResourceGroup
+ name: myFunctionApp
+ storage_account: myStorageAccount
+ plan:
+ resource_group: myResourceGroup
+ name: myAppPlan
+ container_settings:
+ name: httpd
+ registry_server_url: index.docker.io
+
+- name: Delete a function app
+ azure_rm_functionapp:
+ resource_group: myResourceGroup
+ name: myFunctionApp
+ state: absent
+'''
+
+RETURN = '''
+state:
+ description:
+ - Current state of the Azure Function App.
+ returned: success
+ type: dict
+ example:
+ id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myFunctionApp
+ name: myfunctionapp
+ kind: functionapp
+ location: East US
+ type: Microsoft.Web/sites
+ state: Running
+ host_names:
+ - myfunctionapp.azurewebsites.net
+ repository_site_name: myfunctionapp
+ usage_state: Normal
+ enabled: true
+ enabled_host_names:
+ - myfunctionapp.azurewebsites.net
+ - myfunctionapp.scm.azurewebsites.net
+ availability_state: Normal
+ host_name_ssl_states:
+ - name: myfunctionapp.azurewebsites.net
+ ssl_state: Disabled
+ host_type: Standard
+ - name: myfunctionapp.scm.azurewebsites.net
+ ssl_state: Disabled
+ host_type: Repository
+ server_farm_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/EastUSPlan
+ reserved: false
+ last_modified_time_utc: 2017-08-22T18:54:01.190Z
+ scm_site_also_stopped: false
+ client_affinity_enabled: true
+ client_cert_enabled: false
+ host_names_disabled: false
+ outbound_ip_addresses: ............
+ container_size: 1536
+ daily_memory_time_quota: 0
+ resource_group: myResourceGroup
+ default_host_name: myfunctionapp.azurewebsites.net
+''' # NOQA
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from azure.mgmt.web.models import (
+ site_config, app_service_plan, Site, SiteConfig, NameValuePair, SiteSourceControl,
+ AppServicePlan, SkuDescription
+ )
+ from azure.mgmt.resource.resources import ResourceManagementClient
+ from msrest.polling import LROPoller
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+container_settings_spec = dict(
+ name=dict(type='str', required=True),
+ registry_server_url=dict(type='str'),
+ registry_server_user=dict(type='str'),
+ registry_server_password=dict(type='str', no_log=True)
+)
+
+
+class AzureRMFunctionApp(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ resource_group=dict(type='str', required=True, aliases=['resource_group_name']),
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ location=dict(type='str'),
+ storage_account=dict(
+ type='str',
+ aliases=['storage', 'storage_account_name']
+ ),
+ app_settings=dict(type='dict'),
+ plan=dict(
+ type='raw'
+ ),
+ container_settings=dict(
+ type='dict',
+ options=container_settings_spec
+ )
+ )
+
+ self.results = dict(
+ changed=False,
+ state=dict()
+ )
+
+ self.resource_group = None
+ self.name = None
+ self.state = None
+ self.location = None
+ self.storage_account = None
+ self.app_settings = None
+ self.plan = None
+ self.container_settings = None
+
+ required_if = [('state', 'present', ['storage_account'])]
+
+ super(AzureRMFunctionApp, self).__init__(
+ self.module_arg_spec,
+ supports_check_mode=True,
+ required_if=required_if
+ )
+
+ def exec_module(self, **kwargs):
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ if self.app_settings is None:
+ self.app_settings = dict()
+
+ try:
+ resource_group = self.rm_client.resource_groups.get(self.resource_group)
+ except CloudError:
+ self.fail('Unable to retrieve resource group')
+
+ self.location = self.location or resource_group.location
+
+ try:
+ function_app = self.web_client.web_apps.get(
+ resource_group_name=self.resource_group,
+ name=self.name
+ )
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ exists = function_app is not None
+ except CloudError as exc:
+ exists = False
+
+ if self.state == 'absent':
+ if exists:
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+ try:
+ self.web_client.web_apps.delete(
+ resource_group_name=self.resource_group,
+ name=self.name
+ )
+ self.results['changed'] = True
+ except CloudError as exc:
+ self.fail('Failure while deleting web app: {0}'.format(exc))
+ else:
+ self.results['changed'] = False
+ else:
+ kind = 'functionapp'
+ linux_fx_version = None
+ if self.container_settings and self.container_settings.get('name'):
+ kind = 'functionapp,linux,container'
+ linux_fx_version = 'DOCKER|'
+ if self.container_settings.get('registry_server_url'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
+ linux_fx_version += self.container_settings['registry_server_url'] + '/'
+ linux_fx_version += self.container_settings['name']
+ if self.container_settings.get('registry_server_user'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings.get('registry_server_user')
+
+ if self.container_settings.get('registry_server_password'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings.get('registry_server_password')
+
+ if not self.plan and function_app:
+ self.plan = function_app.server_farm_id
+
+ if not exists:
+ function_app = Site(
+ location=self.location,
+ kind=kind,
+ site_config=SiteConfig(
+ app_settings=self.aggregated_app_settings(),
+ scm_type='LocalGit'
+ )
+ )
+ self.results['changed'] = True
+ else:
+ self.results['changed'], function_app = self.update(function_app)
+
+ # get app service plan
+ if self.plan:
+ if isinstance(self.plan, dict):
+ self.plan = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Web/serverfarms/{2}".format(
+ self.subscription_id,
+ self.plan.get('resource_group', self.resource_group),
+ self.plan.get('name')
+ )
+ function_app.server_farm_id = self.plan
+
+ # set linux fx version
+ if linux_fx_version:
+ function_app.site_config.linux_fx_version = linux_fx_version
+
+ if self.check_mode:
+ self.results['state'] = function_app.as_dict()
+ elif self.results['changed']:
+ try:
+ new_function_app = self.web_client.web_apps.create_or_update(
+ resource_group_name=self.resource_group,
+ name=self.name,
+ site_envelope=function_app
+ ).result()
+ self.results['state'] = new_function_app.as_dict()
+ except CloudError as exc:
+ self.fail('Error creating or updating web app: {0}'.format(exc))
+
+ return self.results
+
+ def update(self, source_function_app):
+ """Update the Site object if there are any changes"""
+
+ source_app_settings = self.web_client.web_apps.list_application_settings(
+ resource_group_name=self.resource_group,
+ name=self.name
+ )
+
+ changed, target_app_settings = self.update_app_settings(source_app_settings.properties)
+
+ source_function_app.site_config = SiteConfig(
+ app_settings=target_app_settings,
+ scm_type='LocalGit'
+ )
+
+ return changed, source_function_app
+
+ def update_app_settings(self, source_app_settings):
+ """Update app settings"""
+
+ target_app_settings = self.aggregated_app_settings()
+ target_app_settings_dict = dict([(i.name, i.value) for i in target_app_settings])
+ return target_app_settings_dict != source_app_settings, target_app_settings
+
+ def necessary_functionapp_settings(self):
+ """Construct the necessary app settings required for an Azure Function App"""
+
+ function_app_settings = []
+
+ if self.container_settings is None:
+ for key in ['AzureWebJobsStorage', 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING', 'AzureWebJobsDashboard']:
+ function_app_settings.append(NameValuePair(name=key, value=self.storage_connection_string))
+ function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~1'))
+ function_app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='6.5.0'))
+ function_app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=self.name))
+ else:
+ function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
+ function_app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE', value=False))
+ function_app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=self.storage_connection_string))
+
+ return function_app_settings
+
+ def aggregated_app_settings(self):
+ """Combine both system and user app settings"""
+
+ function_app_settings = self.necessary_functionapp_settings()
+ for app_setting_key in self.app_settings:
+ found_setting = None
+ for s in function_app_settings:
+ if s.name == app_setting_key:
+ found_setting = s
+ break
+ if found_setting:
+ found_setting.value = self.app_settings[app_setting_key]
+ else:
+ function_app_settings.append(NameValuePair(
+ name=app_setting_key,
+ value=self.app_settings[app_setting_key]
+ ))
+ return function_app_settings
+
+ @property
+ def storage_connection_string(self):
+ """Construct the storage account connection string"""
+
+ return 'DefaultEndpointsProtocol=https;AccountName={0};AccountKey={1}'.format(
+ self.storage_account,
+ self.storage_key
+ )
+
+ @property
+ def storage_key(self):
+ """Retrieve the storage account key"""
+
+ return self.storage_client.storage_accounts.list_keys(
+ resource_group_name=self.resource_group,
+ account_name=self.storage_account
+ ).keys[0].value
+
+
+def main():
+ """Main function execution"""
+
+ AzureRMFunctionApp()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_functionapp_info.py b/test/support/integration/plugins/modules/azure_rm_functionapp_info.py
new file mode 100644
index 0000000000..0cd5b6f60b
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_functionapp_info.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 Thomas Stringer, <tomstr@microsoft.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_functionapp_info
+version_added: "2.9"
+short_description: Get Azure Function App facts
+description:
+ - Get facts for one Azure Function App or all Function Apps within a resource group.
+options:
+ name:
+ description:
+ - Only show results for a specific Function App.
+ resource_group:
+ description:
+ - Limit results to a resource group. Required when filtering by name.
+ aliases:
+ - resource_group_name
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Thomas Stringer (@trstringer)
+'''
+
+EXAMPLES = '''
+ - name: Get facts for one Function App
+ azure_rm_functionapp_info:
+ resource_group: myResourceGroup
+ name: myfunctionapp
+
+ - name: Get facts for all Function Apps in a resource group
+ azure_rm_functionapp_info:
+ resource_group: myResourceGroup
+
+ - name: Get facts for all Function Apps by tags
+ azure_rm_functionapp_info:
+ tags:
+ - testing
+'''
+
+RETURN = '''
+azure_functionapps:
+ description:
+ - List of Azure Function Apps dicts.
+ returned: always
+ type: list
+ example:
+ id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/sites/myfunctionapp
+ name: myfunctionapp
+ kind: functionapp
+ location: East US
+ type: Microsoft.Web/sites
+ state: Running
+ host_names:
+ - myfunctionapp.azurewebsites.net
+ repository_site_name: myfunctionapp
+ usage_state: Normal
+ enabled: true
+ enabled_host_names:
+ - myfunctionapp.azurewebsites.net
+ - myfunctionapp.scm.azurewebsites.net
+ availability_state: Normal
+ host_name_ssl_states:
+ - name: myfunctionapp.azurewebsites.net
+ ssl_state: Disabled
+ host_type: Standard
+ - name: myfunctionapp.scm.azurewebsites.net
+ ssl_state: Disabled
+ host_type: Repository
+ server_farm_id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/serverfarms/EastUSPlan
+ reserved: false
+ last_modified_time_utc: 2017-08-22T18:54:01.190Z
+ scm_site_also_stopped: false
+ client_affinity_enabled: true
+ client_cert_enabled: false
+ host_names_disabled: false
+ outbound_ip_addresses: ............
+ container_size: 1536
+ daily_memory_time_quota: 0
+ resource_group: myResourceGroup
+ default_host_name: myfunctionapp.azurewebsites.net
+'''
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+except Exception:
+ # This is handled in azure_rm_common
+ pass
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+
+class AzureRMFunctionAppInfo(AzureRMModuleBase):
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ name=dict(type='str'),
+ resource_group=dict(type='str', aliases=['resource_group_name']),
+ tags=dict(type='list'),
+ )
+
+ self.results = dict(
+ changed=False,
+ ansible_info=dict(azure_functionapps=[])
+ )
+
+ self.name = None
+ self.resource_group = None
+ self.tags = None
+
+ super(AzureRMFunctionAppInfo, self).__init__(
+ self.module_arg_spec,
+ supports_tags=False,
+ facts_module=True
+ )
+
+ def exec_module(self, **kwargs):
+
+ is_old_facts = self.module._name == 'azure_rm_functionapp_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_functionapp_facts' module has been renamed to 'azure_rm_functionapp_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if self.name and not self.resource_group:
+ self.fail("Parameter error: resource group required when filtering by name.")
+
+ if self.name:
+ self.results['ansible_info']['azure_functionapps'] = self.get_functionapp()
+ elif self.resource_group:
+ self.results['ansible_info']['azure_functionapps'] = self.list_resource_group()
+ else:
+ self.results['ansible_info']['azure_functionapps'] = self.list_all()
+
+ return self.results
+
+ def get_functionapp(self):
+ self.log('Get properties for Function App {0}'.format(self.name))
+ function_app = None
+ result = []
+
+ try:
+ function_app = self.web_client.web_apps.get(
+ self.resource_group,
+ self.name
+ )
+ except CloudError:
+ pass
+
+ if function_app and self.has_tags(function_app.tags, self.tags):
+ result = function_app.as_dict()
+
+ return [result]
+
+ def list_resource_group(self):
+ self.log('List items')
+ try:
+ response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
+ except Exception as exc:
+ self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc)))
+
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(item.as_dict())
+ return results
+
+ def list_all(self):
+ self.log('List all items')
+ try:
+ response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
+ except Exception as exc:
+ self.fail("Error listing all items - {0}".format(str(exc)))
+
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(item.as_dict())
+ return results
+
+
+def main():
+ AzureRMFunctionAppInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py
new file mode 100644
index 0000000000..212cf7959d
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbconfiguration
+version_added: "2.8"
+short_description: Manage Configuration instance
+description:
+ - Create, update and delete instance of Configuration.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource.
+ required: True
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ name:
+ description:
+ - The name of the server configuration.
+ required: True
+ value:
+ description:
+ - Value of the configuration.
+ state:
+ description:
+ - Assert the state of the MariaDB configuration. Use C(present) to update setting, or C(absent) to reset to default value.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+'''
+
+EXAMPLES = '''
+ - name: Update SQL Server setting
+ azure_rm_mariadbconfiguration:
+ resource_group: myResourceGroup
+ server_name: myServer
+ name: event_scheduler
+ value: "ON"
+'''
+
+RETURN = '''
+id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myServer/confi
+ gurations/event_scheduler"
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from azure.mgmt.rdbms.mysql import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class Actions:
+ NoAction, Create, Update, Delete = range(4)
+
+
+class AzureRMMariaDbConfiguration(AzureRMModuleBase):
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ value=dict(
+ type='str'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ self.value = None
+
+ self.results = dict(changed=False)
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ super(AzureRMMariaDbConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=False)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec.keys()):
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+
+ old_response = None
+ response = None
+
+ old_response = self.get_configuration()
+
+ if not old_response:
+ self.log("Configuration instance doesn't exist")
+ if self.state == 'absent':
+ self.log("Old instance didn't exist")
+ else:
+ self.to_do = Actions.Create
+ else:
+ self.log("Configuration instance already exists")
+ if self.state == 'absent' and old_response['source'] == 'user-override':
+ self.to_do = Actions.Delete
+ elif self.state == 'present':
+ self.log("Need to check if Configuration instance has to be deleted or may be updated")
+ if self.value != old_response.get('value'):
+ self.to_do = Actions.Update
+
+ if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
+ self.log("Need to Create / Update the Configuration instance")
+
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+
+ response = self.create_update_configuration()
+
+ self.results['changed'] = True
+ self.log("Creation / Update done")
+ elif self.to_do == Actions.Delete:
+ self.log("Configuration instance deleted")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_configuration()
+ else:
+ self.log("Configuration instance unchanged")
+ self.results['changed'] = False
+ response = old_response
+
+ if response:
+ self.results["id"] = response["id"]
+
+ return self.results
+
+ def create_update_configuration(self):
+ self.log("Creating / Updating the Configuration instance {0}".format(self.name))
+
+ try:
+ response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ configuration_name=self.name,
+ value=self.value,
+ source='user-override')
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the Configuration instance.')
+ self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
+ return response.as_dict()
+
+ def delete_configuration(self):
+ self.log("Deleting the Configuration instance {0}".format(self.name))
+ try:
+ response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ configuration_name=self.name,
+ source='system-default')
+ except CloudError as e:
+ self.log('Error attempting to delete the Configuration instance.')
+ self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
+
+ return True
+
+ def get_configuration(self):
+ self.log("Checking if the Configuration instance {0} is present".format(self.name))
+ found = False
+ try:
+ response = self.mariadb_client.configurations.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ configuration_name=self.name)
+ found = True
+ self.log("Response : {0}".format(response))
+ self.log("Configuration instance : {0} found".format(response.name))
+ except CloudError as e:
+ self.log('Did not find the Configuration instance.')
+ if found is True:
+ return response.as_dict()
+
+ return False
+
+
+def main():
+ """Main execution"""
+ AzureRMMariaDbConfiguration()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py
new file mode 100644
index 0000000000..ad38f1255f
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbconfiguration_info.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbconfiguration_info
+version_added: "2.9"
+short_description: Get Azure MariaDB Configuration facts
+description:
+ - Get facts of Azure MariaDB Configuration.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ type: str
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ type: str
+ name:
+ description:
+ - Setting name.
+ type: str
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Get specific setting of MariaDB Server
+ azure_rm_mariadbconfiguration_info:
+ resource_group: myResourceGroup
+ server_name: testserver
+ name: deadlock_timeout
+
+ - name: Get all settings of MariaDB Server
+ azure_rm_mariadbconfiguration_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+'''
+
+RETURN = '''
+settings:
+ description:
+ - A list of dictionaries containing MariaDB Server settings.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Setting resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver
+ /configurations/deadlock_timeout"
+ name:
+ description:
+ - Setting name.
+ returned: always
+ type: str
+ sample: deadlock_timeout
+ value:
+ description:
+ - Setting value.
+ returned: always
+ type: raw
+ sample: 1000
+ description:
+ description:
+ - Description of the configuration.
+ returned: always
+ type: str
+ sample: Deadlock timeout.
+ source:
+ description:
+ - Source of the configuration.
+ returned: always
+ type: str
+ sample: system-default
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_operation import AzureOperationPoller
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMMariaDbConfigurationInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(changed=False)
+ self.mgmt_client = None
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ super(AzureRMMariaDbConfigurationInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_mariadbconfiguration_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_mariadbconfiguration_facts' module has been renamed to 'azure_rm_mariadbconfiguration_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ if self.name is not None:
+ self.results['settings'] = self.get()
+ else:
+ self.results['settings'] = self.list_by_server()
+ return self.results
+
+ def get(self):
+ '''
+ Gets facts of the specified MariaDB Configuration.
+
+ :return: deserialized MariaDB Configurationinstance state dictionary
+ '''
+ response = None
+ results = []
+ try:
+ response = self.mgmt_client.configurations.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ configuration_name=self.name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for Configurations.')
+
+ if response is not None:
+ results.append(self.format_item(response))
+
+ return results
+
+ def list_by_server(self):
+ '''
+ Gets facts of the specified MariaDB Configuration.
+
+ :return: deserialized MariaDB Configurationinstance state dictionary
+ '''
+ response = None
+ results = []
+ try:
+ response = self.mgmt_client.configurations.list_by_server(resource_group_name=self.resource_group,
+ server_name=self.server_name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for Configurations.')
+
+ if response is not None:
+ for item in response:
+ results.append(self.format_item(item))
+
+ return results
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'server_name': self.server_name,
+ 'id': d['id'],
+ 'name': d['name'],
+ 'value': d['value'],
+ 'description': d['description'],
+ 'source': d['source']
+ }
+ return d
+
+
+def main():
+ AzureRMMariaDbConfigurationInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py
new file mode 100644
index 0000000000..8492b96854
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbdatabase
+version_added: "2.8"
+short_description: Manage MariaDB Database instance
+description:
+ - Create, update and delete instance of MariaDB Database.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ name:
+ description:
+ - The name of the database.
+ required: True
+ charset:
+ description:
+ - The charset of the database. Check MariaDB documentation for possible values.
+ - This is only set on creation, use I(force_update) to recreate a database if the values don't match.
+ collation:
+ description:
+ - The collation of the database. Check MariaDB documentation for possible values.
+ - This is only set on creation, use I(force_update) to recreate a database if the values don't match.
+ force_update:
+ description:
+ - When set to C(true), will delete and recreate the existing MariaDB database if any of the properties don't match what is set.
+ - When set to C(false), no change will occur to the database even if any of the properties do not match.
+ type: bool
+ default: 'no'
+ state:
+ description:
+ - Assert the state of the MariaDB Database. Use C(present) to create or update a database and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Create (or update) MariaDB Database
+ azure_rm_mariadbdatabase:
+ resource_group: myResourceGroup
+ server_name: testserver
+ name: db1
+'''
+
+RETURN = '''
+id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/databases/db1
+name:
+ description:
+ - Resource name.
+ returned: always
+ type: str
+ sample: db1
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class Actions:
+ NoAction, Create, Update, Delete = range(4)
+
+
+class AzureRMMariaDbDatabase(AzureRMModuleBase):
+ """Configuration class for an Azure RM MariaDB Database resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ charset=dict(
+ type='str'
+ ),
+ collation=dict(
+ type='str'
+ ),
+ force_update=dict(
+ type='bool',
+ default=False
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ self.force_update = None
+ self.parameters = dict()
+
+ self.results = dict(changed=False)
+ self.mgmt_client = None
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ super(AzureRMMariaDbDatabase, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()):
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs[key] is not None:
+ if key == "charset":
+ self.parameters["charset"] = kwargs[key]
+ elif key == "collation":
+ self.parameters["collation"] = kwargs[key]
+
+ old_response = None
+ response = None
+
+ self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ resource_group = self.get_resource_group(self.resource_group)
+
+ old_response = self.get_mariadbdatabase()
+
+ if not old_response:
+ self.log("MariaDB Database instance doesn't exist")
+ if self.state == 'absent':
+ self.log("Old instance didn't exist")
+ else:
+ self.to_do = Actions.Create
+ else:
+ self.log("MariaDB Database instance already exists")
+ if self.state == 'absent':
+ self.to_do = Actions.Delete
+ elif self.state == 'present':
+ self.log("Need to check if MariaDB Database instance has to be deleted or may be updated")
+ if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']):
+ self.to_do = Actions.Update
+ if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']):
+ self.to_do = Actions.Update
+ if self.to_do == Actions.Update:
+ if self.force_update:
+ if not self.check_mode:
+ self.delete_mariadbdatabase()
+ else:
+ self.fail("Database properties cannot be updated without setting 'force_update' option")
+ self.to_do = Actions.NoAction
+
+ if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
+ self.log("Need to Create / Update the MariaDB Database instance")
+
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+
+ response = self.create_update_mariadbdatabase()
+ self.results['changed'] = True
+ self.log("Creation / Update done")
+ elif self.to_do == Actions.Delete:
+ self.log("MariaDB Database instance deleted")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_mariadbdatabase()
+ # make sure instance is actually deleted, for some Azure resources, instance is hanging around
+ # for some time after deletion -- this should be really fixed in Azure
+ while self.get_mariadbdatabase():
+ time.sleep(20)
+ else:
+ self.log("MariaDB Database instance unchanged")
+ self.results['changed'] = False
+ response = old_response
+
+ if response:
+ self.results["id"] = response["id"]
+ self.results["name"] = response["name"]
+
+ return self.results
+
+ def create_update_mariadbdatabase(self):
+ '''
+ Creates or updates MariaDB Database with the specified configuration.
+
+ :return: deserialized MariaDB Database instance state dictionary
+ '''
+ self.log("Creating / Updating the MariaDB Database instance {0}".format(self.name))
+
+ try:
+ response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ database_name=self.name,
+ parameters=self.parameters)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the MariaDB Database instance.')
+ self.fail("Error creating the MariaDB Database instance: {0}".format(str(exc)))
+ return response.as_dict()
+
+ def delete_mariadbdatabase(self):
+ '''
+ Deletes specified MariaDB Database instance in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the MariaDB Database instance {0}".format(self.name))
+ try:
+ response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ database_name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the MariaDB Database instance.')
+ self.fail("Error deleting the MariaDB Database instance: {0}".format(str(e)))
+
+ return True
+
+ def get_mariadbdatabase(self):
+ '''
+ Gets the properties of the specified MariaDB Database.
+
+ :return: deserialized MariaDB Database instance state dictionary
+ '''
+ self.log("Checking if the MariaDB Database instance {0} is present".format(self.name))
+ found = False
+ try:
+ response = self.mgmt_client.databases.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ database_name=self.name)
+ found = True
+ self.log("Response : {0}".format(response))
+ self.log("MariaDB Database instance : {0} found".format(response.name))
+ except CloudError as e:
+ self.log('Did not find the MariaDB Database instance.')
+ if found is True:
+ return response.as_dict()
+
+ return False
+
+
+def main():
+ """Main execution"""
+ AzureRMMariaDbDatabase()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py
new file mode 100644
index 0000000000..61e33015b1
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbdatabase_info.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbdatabase_info
+version_added: "2.9"
+short_description: Get Azure MariaDB Database facts
+description:
+ - Get facts of MariaDB Database.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ type: str
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ type: str
+ name:
+ description:
+ - The name of the database.
+ type: str
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Get instance of MariaDB Database
+ azure_rm_mariadbdatabase_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+ name: database_name
+
+ - name: List instances of MariaDB Database
+ azure_rm_mariadbdatabase_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+'''
+
+RETURN = '''
+databases:
+ description:
+ - A list of dictionaries containing facts for MariaDB Databases.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser
+ ver/databases/db1"
+ resource_group:
+ description:
+ - Resource group name.
+ returned: always
+ type: str
+ sample: testrg
+ server_name:
+ description:
+ - Server name.
+ returned: always
+ type: str
+ sample: testserver
+ name:
+ description:
+ - Resource name.
+ returned: always
+ type: str
+ sample: db1
+ charset:
+ description:
+ - The charset of the database.
+ returned: always
+ type: str
+ sample: UTF8
+ collation:
+ description:
+ - The collation of the database.
+ returned: always
+ type: str
+ sample: English_United States.1252
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if (self.resource_group is not None and
+ self.server_name is not None and
+ self.name is not None):
+ self.results['databases'] = self.get()
+ elif (self.resource_group is not None and
+ self.server_name is not None):
+ self.results['databases'] = self.list_by_server()
+ return self.results
+
+ def get(self):
+ response = None
+ results = []
+ try:
+ response = self.mariadb_client.databases.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ database_name=self.name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for Databases.')
+
+ if response is not None:
+ results.append(self.format_item(response))
+
+ return results
+
+ def list_by_server(self):
+ response = None
+ results = []
+ try:
+ response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group,
+ server_name=self.server_name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
+
+ if response is not None:
+ for item in response:
+ results.append(self.format_item(item))
+
+ return results
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'server_name': self.server_name,
+ 'name': d['name'],
+ 'charset': d['charset'],
+ 'collation': d['collation']
+ }
+ return d
+
+
+def main():
+ AzureRMMariaDbDatabaseInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py
new file mode 100644
index 0000000000..1fc8c5e79e
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbfirewallrule
+version_added: "2.8"
+short_description: Manage MariaDB firewall rule instance
+description:
+ - Create, update and delete instance of MariaDB firewall rule.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ name:
+ description:
+ - The name of the MariaDB firewall rule.
+ required: True
+ start_ip_address:
+ description:
+ - The start IP address of the MariaDB firewall rule. Must be IPv4 format.
+ end_ip_address:
+ description:
+ - The end IP address of the MariaDB firewall rule. Must be IPv4 format.
+ state:
+ description:
+ - Assert the state of the MariaDB firewall rule. Use C(present) to create or update a rule and C(absent) to ensure it is not present.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Create (or update) MariaDB firewall rule
+ azure_rm_mariadbfirewallrule:
+ resource_group: myResourceGroup
+ server_name: testserver
+ name: rule1
+ start_ip_address: 10.0.0.17
+ end_ip_address: 10.0.0.20
+'''
+
+RETURN = '''
+id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire
+ wallRules/rule1"
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class Actions:
+ NoAction, Create, Update, Delete = range(4)
+
+
+class AzureRMMariaDbFirewallRule(AzureRMModuleBase):
+ """Configuration class for an Azure RM MariaDB firewall rule resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ start_ip_address=dict(
+ type='str'
+ ),
+ end_ip_address=dict(
+ type='str'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ self.start_ip_address = None
+ self.end_ip_address = None
+
+ self.results = dict(changed=False)
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ super(AzureRMMariaDbFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()):
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+
+ old_response = None
+ response = None
+
+ resource_group = self.get_resource_group(self.resource_group)
+
+ old_response = self.get_firewallrule()
+
+ if not old_response:
+ self.log("MariaDB firewall rule instance doesn't exist")
+ if self.state == 'absent':
+ self.log("Old instance didn't exist")
+ else:
+ self.to_do = Actions.Create
+ else:
+ self.log("MariaDB firewall rule instance already exists")
+ if self.state == 'absent':
+ self.to_do = Actions.Delete
+ elif self.state == 'present':
+ self.log("Need to check if MariaDB firewall rule instance has to be deleted or may be updated")
+ if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
+ self.to_do = Actions.Update
+ if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
+ self.to_do = Actions.Update
+
+ if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
+ self.log("Need to Create / Update the MariaDB firewall rule instance")
+
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+
+ response = self.create_update_firewallrule()
+
+ if not old_response:
+ self.results['changed'] = True
+ else:
+ self.results['changed'] = old_response.__ne__(response)
+ self.log("Creation / Update done")
+ elif self.to_do == Actions.Delete:
+ self.log("MariaDB firewall rule instance deleted")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_firewallrule()
+ # make sure instance is actually deleted, for some Azure resources, instance is hanging around
+ # for some time after deletion -- this should be really fixed in Azure
+ while self.get_firewallrule():
+ time.sleep(20)
+ else:
+ self.log("MariaDB firewall rule instance unchanged")
+ self.results['changed'] = False
+ response = old_response
+
+ if response:
+ self.results["id"] = response["id"]
+
+ return self.results
+
+ def create_update_firewallrule(self):
+ '''
+ Creates or updates MariaDB firewall rule with the specified configuration.
+
+ :return: deserialized MariaDB firewall rule instance state dictionary
+ '''
+ self.log("Creating / Updating the MariaDB firewall rule instance {0}".format(self.name))
+
+ try:
+ response = self.mariadb_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ firewall_rule_name=self.name,
+ start_ip_address=self.start_ip_address,
+ end_ip_address=self.end_ip_address)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the MariaDB firewall rule instance.')
+ self.fail("Error creating the MariaDB firewall rule instance: {0}".format(str(exc)))
+ return response.as_dict()
+
+ def delete_firewallrule(self):
+ '''
+ Deletes specified MariaDB firewall rule instance in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the MariaDB firewall rule instance {0}".format(self.name))
+ try:
+ response = self.mariadb_client.firewall_rules.delete(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ firewall_rule_name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the MariaDB firewall rule instance.')
+ self.fail("Error deleting the MariaDB firewall rule instance: {0}".format(str(e)))
+
+ return True
+
+ def get_firewallrule(self):
+ '''
+ Gets the properties of the specified MariaDB firewall rule.
+
+ :return: deserialized MariaDB firewall rule instance state dictionary
+ '''
+ self.log("Checking if the MariaDB firewall rule instance {0} is present".format(self.name))
+ found = False
+ try:
+ response = self.mariadb_client.firewall_rules.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ firewall_rule_name=self.name)
+ found = True
+ self.log("Response : {0}".format(response))
+ self.log("MariaDB firewall rule instance : {0} found".format(response.name))
+ except CloudError as e:
+ self.log('Did not find the MariaDB firewall rule instance.')
+ if found is True:
+ return response.as_dict()
+
+ return False
+
+
+def main():
+ """Main execution"""
+ AzureRMMariaDbFirewallRule()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py
new file mode 100644
index 0000000000..45557b5113
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbfirewallrule_info.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbfirewallrule_info
+version_added: "2.9"
+short_description: Get Azure MariaDB Firewall Rule facts
+description:
+ - Get facts of Azure MariaDB Firewall Rule.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group.
+ required: True
+ type: str
+ server_name:
+ description:
+ - The name of the server.
+ required: True
+ type: str
+ name:
+ description:
+ - The name of the server firewall rule.
+ type: str
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Get instance of MariaDB Firewall Rule
+ azure_rm_mariadbfirewallrule_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+ name: firewall_rule_name
+
+ - name: List instances of MariaDB Firewall Rule
+ azure_rm_mariadbfirewallrule_info:
+ resource_group: myResourceGroup
+ server_name: server_name
+'''
+
+RETURN = '''
+rules:
+ description:
+ - A list of dictionaries containing facts for MariaDB Firewall Rule.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire
+ wallRules/rule1"
+ server_name:
+ description:
+ - The name of the server.
+ returned: always
+ type: str
+ sample: testserver
+ name:
+ description:
+ - Resource name.
+ returned: always
+ type: str
+ sample: rule1
+ start_ip_address:
+ description:
+ - The start IP address of the MariaDB firewall rule.
+ returned: always
+ type: str
+ sample: 10.0.0.16
+ end_ip_address:
+ description:
+ - The end IP address of the MariaDB firewall rule.
+ returned: always
+ type: str
+ sample: 10.0.0.18
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_operation import AzureOperationPoller
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMMariaDbFirewallRuleInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ server_name=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.mgmt_client = None
+ self.resource_group = None
+ self.server_name = None
+ self.name = None
+ super(AzureRMMariaDbFirewallRuleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_mariadbfirewallrule_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_mariadbfirewallrule_facts' module has been renamed to 'azure_rm_mariadbfirewallrule_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ if (self.name is not None):
+ self.results['rules'] = self.get()
+ else:
+ self.results['rules'] = self.list_by_server()
+ return self.results
+
+ def get(self):
+ response = None
+ results = []
+ try:
+ response = self.mgmt_client.firewall_rules.get(resource_group_name=self.resource_group,
+ server_name=self.server_name,
+ firewall_rule_name=self.name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for FirewallRules.')
+
+ if response is not None:
+ results.append(self.format_item(response))
+
+ return results
+
+ def list_by_server(self):
+ response = None
+ results = []
+ try:
+ response = self.mgmt_client.firewall_rules.list_by_server(resource_group_name=self.resource_group,
+ server_name=self.server_name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for FirewallRules.')
+
+ if response is not None:
+ for item in response:
+ results.append(self.format_item(item))
+
+ return results
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'resource_group': self.resource_group,
+ 'id': d['id'],
+ 'server_name': self.server_name,
+ 'name': d['name'],
+ 'start_ip_address': d['start_ip_address'],
+ 'end_ip_address': d['end_ip_address']
+ }
+ return d
+
+
+def main():
+ AzureRMMariaDbFirewallRuleInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbserver.py b/test/support/integration/plugins/modules/azure_rm_mariadbserver.py
new file mode 100644
index 0000000000..30a2998844
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbserver.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbserver
+version_added: "2.8"
+short_description: Manage MariaDB Server instance
+description:
+ - Create, update and delete instance of MariaDB Server.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ name:
+ description:
+ - The name of the server.
+ required: True
+ sku:
+ description:
+ - The SKU (pricing tier) of the server.
+ suboptions:
+ name:
+ description:
+ - The name of the SKU, typically, tier + family + cores, for example C(B_Gen4_1), C(GP_Gen5_8).
+ tier:
+ description:
+ - The tier of the particular SKU, for example C(Basic).
+ choices:
+ - basic
+ - standard
+ capacity:
+ description:
+ - The scale up/out capacity, representing server's compute units.
+ type: int
+ size:
+ description:
+ - The size code, to be interpreted by resource as appropriate.
+ location:
+ description:
+ - Resource location. If not set, location from the resource group will be used as default.
+ storage_mb:
+ description:
+ - The maximum storage allowed for a server.
+ type: int
+ version:
+ description:
+ - Server version.
+ choices:
+ - 10.2
+ enforce_ssl:
+ description:
+ - Enable SSL enforcement.
+ type: bool
+ default: False
+ admin_username:
+ description:
+ - The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation).
+ admin_password:
+ description:
+ - The password of the administrator login.
+ create_mode:
+ description:
+ - Create mode of SQL Server.
+ default: Default
+ state:
+ description:
+ - Assert the state of the MariaDB Server. Use C(present) to create or update a server and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Create (or update) MariaDB Server
+ azure_rm_mariadbserver:
+ resource_group: myResourceGroup
+ name: testserver
+ sku:
+ name: B_Gen5_1
+ tier: Basic
+ location: eastus
+ storage_mb: 1024
+ enforce_ssl: True
+ version: 10.2
+ admin_username: cloudsa
+ admin_password: password
+'''
+
+RETURN = '''
+id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/mariadbsrv1b6dd89593
+version:
+ description:
+ - Server version. Possible values include C(10.2).
+ returned: always
+ type: str
+ sample: 10.2
+state:
+ description:
+ - A state of a server that is visible to user. Possible values include C(Ready), C(Dropping), C(Disabled).
+ returned: always
+ type: str
+ sample: Ready
+fully_qualified_domain_name:
+ description:
+ - The fully qualified domain name of a server.
+ returned: always
+ type: str
+ sample: mariadbsrv1b6dd89593.mariadb.database.azure.com
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class Actions:
+ NoAction, Create, Update, Delete = range(4)
+
+
+class AzureRMMariaDbServers(AzureRMModuleBase):
+ """Configuration class for an Azure RM MariaDB Server resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ sku=dict(
+ type='dict'
+ ),
+ location=dict(
+ type='str'
+ ),
+ storage_mb=dict(
+ type='int'
+ ),
+ version=dict(
+ type='str',
+ choices=['10.2']
+ ),
+ enforce_ssl=dict(
+ type='bool',
+ default=False
+ ),
+ create_mode=dict(
+ type='str',
+ default='Default'
+ ),
+ admin_username=dict(
+ type='str'
+ ),
+ admin_password=dict(
+ type='str',
+ no_log=True
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ self.resource_group = None
+ self.name = None
+ self.parameters = dict()
+ self.tags = None
+
+ self.results = dict(changed=False)
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ super(AzureRMMariaDbServers, self).__init__(derived_arg_spec=self.module_arg_spec,
+ supports_check_mode=True,
+ supports_tags=True)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs[key] is not None:
+ if key == "sku":
+ ev = kwargs[key]
+ if 'tier' in ev:
+ if ev['tier'] == 'basic':
+ ev['tier'] = 'Basic'
+ elif ev['tier'] == 'standard':
+ ev['tier'] = 'Standard'
+ self.parameters["sku"] = ev
+ elif key == "location":
+ self.parameters["location"] = kwargs[key]
+ elif key == "storage_mb":
+ self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})["storage_mb"] = kwargs[key]
+ elif key == "version":
+ self.parameters.setdefault("properties", {})["version"] = kwargs[key]
+ elif key == "enforce_ssl":
+ self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[key] else 'Disabled'
+ elif key == "create_mode":
+ self.parameters.setdefault("properties", {})["create_mode"] = kwargs[key]
+ elif key == "admin_username":
+ self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key]
+ elif key == "admin_password":
+ self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key]
+
+ old_response = None
+ response = None
+
+ resource_group = self.get_resource_group(self.resource_group)
+
+ if "location" not in self.parameters:
+ self.parameters["location"] = resource_group.location
+
+ old_response = self.get_mariadbserver()
+
+ if not old_response:
+ self.log("MariaDB Server instance doesn't exist")
+ if self.state == 'absent':
+ self.log("Old instance didn't exist")
+ else:
+ self.to_do = Actions.Create
+ else:
+ self.log("MariaDB Server instance already exists")
+ if self.state == 'absent':
+ self.to_do = Actions.Delete
+ elif self.state == 'present':
+ self.log("Need to check if MariaDB Server instance has to be deleted or may be updated")
+ update_tags, newtags = self.update_tags(old_response.get('tags', {}))
+ if update_tags:
+ self.tags = newtags
+ self.to_do = Actions.Update
+
+ if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
+ self.log("Need to Create / Update the MariaDB Server instance")
+
+ if self.check_mode:
+ self.results['changed'] = True
+ return self.results
+
+ response = self.create_update_mariadbserver()
+
+ if not old_response:
+ self.results['changed'] = True
+ else:
+ self.results['changed'] = old_response.__ne__(response)
+ self.log("Creation / Update done")
+ elif self.to_do == Actions.Delete:
+ self.log("MariaDB Server instance deleted")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_mariadbserver()
+ # make sure instance is actually deleted, for some Azure resources, instance is hanging around
+ # for some time after deletion -- this should be really fixed in Azure
+ while self.get_mariadbserver():
+ time.sleep(20)
+ else:
+ self.log("MariaDB Server instance unchanged")
+ self.results['changed'] = False
+ response = old_response
+
+ if response:
+ self.results["id"] = response["id"]
+ self.results["version"] = response["version"]
+ self.results["state"] = response["user_visible_state"]
+ self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"]
+
+ return self.results
+
+ def create_update_mariadbserver(self):
+ '''
+ Creates or updates MariaDB Server with the specified configuration.
+
+ :return: deserialized MariaDB Server instance state dictionary
+ '''
+ self.log("Creating / Updating the MariaDB Server instance {0}".format(self.name))
+
+ try:
+ self.parameters['tags'] = self.tags
+ if self.to_do == Actions.Create:
+ response = self.mariadb_client.servers.create(resource_group_name=self.resource_group,
+ server_name=self.name,
+ parameters=self.parameters)
+ else:
+ # structure of parameters for update must be changed
+ self.parameters.update(self.parameters.pop("properties", {}))
+ response = self.mariadb_client.servers.update(resource_group_name=self.resource_group,
+ server_name=self.name,
+ parameters=self.parameters)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the MariaDB Server instance.')
+ self.fail("Error creating the MariaDB Server instance: {0}".format(str(exc)))
+ return response.as_dict()
+
+ def delete_mariadbserver(self):
+ '''
+ Deletes specified MariaDB Server instance in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the MariaDB Server instance {0}".format(self.name))
+ try:
+ response = self.mariadb_client.servers.delete(resource_group_name=self.resource_group,
+ server_name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the MariaDB Server instance.')
+ self.fail("Error deleting the MariaDB Server instance: {0}".format(str(e)))
+
+ return True
+
+ def get_mariadbserver(self):
+ '''
+ Gets the properties of the specified MariaDB Server.
+
+ :return: deserialized MariaDB Server instance state dictionary
+ '''
+ self.log("Checking if the MariaDB Server instance {0} is present".format(self.name))
+ found = False
+ try:
+ response = self.mariadb_client.servers.get(resource_group_name=self.resource_group,
+ server_name=self.name)
+ found = True
+ self.log("Response : {0}".format(response))
+ self.log("MariaDB Server instance : {0} found".format(response.name))
+ except CloudError as e:
+ self.log('Did not find the MariaDB Server instance.')
+ if found is True:
+ return response.as_dict()
+
+ return False
+
+
+def main():
+ """Main execution"""
+ AzureRMMariaDbServers()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py b/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py
new file mode 100644
index 0000000000..ffe52c5d37
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_mariadbserver_info.py
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
+# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_mariadbserver_info
+version_added: "2.9"
+short_description: Get Azure MariaDB Server facts
+description:
+ - Get facts of MariaDB Server.
+
+options:
+ resource_group:
+ description:
+ - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
+ required: True
+ type: str
+ name:
+ description:
+ - The name of the server.
+ type: str
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+ type: list
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+ - Matti Ranta (@techknowlogick)
+
+'''
+
+EXAMPLES = '''
+ - name: Get instance of MariaDB Server
+ azure_rm_mariadbserver_info:
+ resource_group: myResourceGroup
+ name: server_name
+
+ - name: List instances of MariaDB Server
+ azure_rm_mariadbserver_info:
+ resource_group: myResourceGroup
+'''
+
+RETURN = '''
+servers:
+ description:
+ - A list of dictionaries containing facts for MariaDB servers.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myabdud1223
+ resource_group:
+ description:
+ - Resource group name.
+ returned: always
+ type: str
+ sample: myResourceGroup
+ name:
+ description:
+ - Resource name.
+ returned: always
+ type: str
+ sample: myabdud1223
+ location:
+ description:
+ - The location the resource resides in.
+ returned: always
+ type: str
+ sample: eastus
+ sku:
+ description:
+ - The SKU of the server.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description:
+ - The name of the SKU.
+ returned: always
+ type: str
+ sample: GP_Gen4_2
+ tier:
+ description:
+ - The tier of the particular SKU.
+ returned: always
+ type: str
+ sample: GeneralPurpose
+ capacity:
+ description:
+ - The scale capacity.
+ returned: always
+ type: int
+ sample: 2
+ storage_mb:
+ description:
+ - The maximum storage allowed for a server.
+ returned: always
+ type: int
+ sample: 128000
+ enforce_ssl:
+ description:
+ - Enable SSL enforcement.
+ returned: always
+ type: bool
+ sample: False
+ admin_username:
+ description:
+ - The administrator's login name of a server.
+ returned: always
+ type: str
+ sample: serveradmin
+ version:
+ description:
+ - Server version.
+ returned: always
+ type: str
+ sample: "9.6"
+ user_visible_state:
+ description:
+ - A state of a server that is visible to user.
+ returned: always
+ type: str
+ sample: Ready
+ fully_qualified_domain_name:
+ description:
+ - The fully qualified domain name of a server.
+ returned: always
+ type: str
+ sample: myabdud1223.mys.database.azure.com
+ tags:
+ description:
+ - Tags assigned to the resource. Dictionary of string:string pairs.
+ type: dict
+ sample: { tag1: abc }
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
+ from msrest.serialization import Model
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMMariaDbServerInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str'
+ ),
+ tags=dict(
+ type='list'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False
+ )
+ self.resource_group = None
+ self.name = None
+ self.tags = None
+ super(AzureRMMariaDbServerInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_mariadbserver_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_mariadbserver_facts' module has been renamed to 'azure_rm_mariadbserver_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if (self.resource_group is not None and
+ self.name is not None):
+ self.results['servers'] = self.get()
+ elif (self.resource_group is not None):
+ self.results['servers'] = self.list_by_resource_group()
+ return self.results
+
+ def get(self):
+ response = None
+ results = []
+ try:
+ response = self.mariadb_client.servers.get(resource_group_name=self.resource_group,
+ server_name=self.name)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for MariaDB Server.')
+
+ if response and self.has_tags(response.tags, self.tags):
+ results.append(self.format_item(response))
+
+ return results
+
+ def list_by_resource_group(self):
+ response = None
+ results = []
+ try:
+ response = self.mariadb_client.servers.list_by_resource_group(resource_group_name=self.resource_group)
+ self.log("Response : {0}".format(response))
+ except CloudError as e:
+ self.log('Could not get facts for MariaDB Servers.')
+
+ if response is not None:
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ results.append(self.format_item(item))
+
+ return results
+
+ def format_item(self, item):
+ d = item.as_dict()
+ d = {
+ 'id': d['id'],
+ 'resource_group': self.resource_group,
+ 'name': d['name'],
+ 'sku': d['sku'],
+ 'location': d['location'],
+ 'storage_mb': d['storage_profile']['storage_mb'],
+ 'version': d['version'],
+ 'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'),
+ 'admin_username': d['administrator_login'],
+ 'user_visible_state': d['user_visible_state'],
+ 'fully_qualified_domain_name': d['fully_qualified_domain_name'],
+ 'tags': d.get('tags')
+ }
+
+ return d
+
+
+def main():
+ AzureRMMariaDbServerInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_resource.py b/test/support/integration/plugins/modules/azure_rm_resource.py
new file mode 100644
index 0000000000..6ea3e3bb9b
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_resource.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_resource
+version_added: "2.6"
+short_description: Create any Azure resource
+description:
+ - Create, update or delete any Azure resource using Azure REST API.
+ - This module gives access to resources that are not supported via Ansible modules.
+ - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API.
+
+options:
+ url:
+ description:
+ - Azure RM Resource URL.
+ api_version:
+ description:
+ - Specific API version to be used.
+ provider:
+ description:
+ - Provider type.
+ - Required if URL is not specified.
+ resource_group:
+ description:
+ - Resource group to be used.
+ - Required if URL is not specified.
+ resource_type:
+ description:
+ - Resource type.
+ - Required if URL is not specified.
+ resource_name:
+ description:
+ - Resource name.
+ - Required if URL Is not specified.
+ subresource:
+ description:
+ - List of subresources.
+ suboptions:
+ namespace:
+ description:
+ - Subresource namespace.
+ type:
+ description:
+ - Subresource type.
+ name:
+ description:
+ - Subresource name.
+ body:
+ description:
+ - The body of the HTTP request/response to the web service.
+ method:
+ description:
+ - The HTTP method of the request or response. It must be uppercase.
+ choices:
+ - GET
+ - PUT
+ - POST
+ - HEAD
+ - PATCH
+ - DELETE
+ - MERGE
+ default: "PUT"
+ status_code:
+ description:
+ - A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes.
+ type: list
+ default: [ 200, 201, 202 ]
+ idempotency:
+ description:
+ - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body).
+ default: no
+ type: bool
+ polling_timeout:
+ description:
+ - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body).
+ default: 0
+ type: int
+ version_added: "2.8"
+ polling_interval:
+ description:
+ - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body).
+ default: 60
+ type: int
+ version_added: "2.8"
+ state:
+ description:
+ - Assert the state of the resource. Use C(present) to create or update resource or C(absent) to delete resource.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+
+'''
+
+EXAMPLES = '''
+ - name: Update scaleset info using azure_rm_resource
+ azure_rm_resource:
+ resource_group: myResourceGroup
+ provider: compute
+ resource_type: virtualmachinescalesets
+ resource_name: myVmss
+ api_version: "2017-12-01"
+ body: { body }
+'''
+
+RETURN = '''
+response:
+ description:
+ - Response specific to resource type.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Resource ID.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Storage/storageAccounts/staccb57dc95183"
+ kind:
+ description:
+ - The kind of storage.
+ type: str
+ returned: always
+ sample: Storage
+ location:
+ description:
+ - The resource location, defaults to location of the resource group.
+ type: str
+ returned: always
+ sample: eastus
+ name:
+ description:
+ The storage account name.
+ type: str
+ returned: always
+ sample: staccb57dc95183
+ properties:
+ description:
+ - The storage account's related properties.
+ type: dict
+ returned: always
+ sample: {
+ "creationTime": "2019-06-13T06:34:33.0996676Z",
+ "encryption": {
+ "keySource": "Microsoft.Storage",
+ "services": {
+ "blob": {
+ "enabled": true,
+ "lastEnabledTime": "2019-06-13T06:34:33.1934074Z"
+ },
+ "file": {
+ "enabled": true,
+ "lastEnabledTime": "2019-06-13T06:34:33.1934074Z"
+ }
+ }
+ },
+ "networkAcls": {
+ "bypass": "AzureServices",
+ "defaultAction": "Allow",
+ "ipRules": [],
+ "virtualNetworkRules": []
+ },
+ "primaryEndpoints": {
+ "blob": "https://staccb57dc95183.blob.core.windows.net/",
+ "file": "https://staccb57dc95183.file.core.windows.net/",
+ "queue": "https://staccb57dc95183.queue.core.windows.net/",
+ "table": "https://staccb57dc95183.table.core.windows.net/"
+ },
+ "primaryLocation": "eastus",
+ "provisioningState": "Succeeded",
+ "secondaryLocation": "westus",
+ "statusOfPrimary": "available",
+ "statusOfSecondary": "available",
+ "supportsHttpsTrafficOnly": false
+ }
+ sku:
+ description:
+ - The storage account SKU.
+ type: dict
+ returned: always
+ sample: {
+ "name": "Standard_GRS",
+ "tier": "Standard"
+ }
+ tags:
+ description:
+ - Resource tags.
+ type: dict
+ returned: always
+ sample: { 'key1': 'value1' }
+ type:
+ description:
+ - The resource type.
+ type: str
+ returned: always
+ sample: "Microsoft.Storage/storageAccounts"
+
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+from ansible.module_utils.azure_rm_common_rest import GenericRestClient
+from ansible.module_utils.common.dict_transformations import dict_merge
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.service_client import ServiceClient
+ from msrestazure.tools import resource_id, is_valid_resource_id
+ import json
+
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMResource(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ url=dict(
+ type='str'
+ ),
+ provider=dict(
+ type='str',
+ ),
+ resource_group=dict(
+ type='str',
+ ),
+ resource_type=dict(
+ type='str',
+ ),
+ resource_name=dict(
+ type='str',
+ ),
+ subresource=dict(
+ type='list',
+ default=[]
+ ),
+ api_version=dict(
+ type='str'
+ ),
+ method=dict(
+ type='str',
+ default='PUT',
+ choices=["GET", "PUT", "POST", "HEAD", "PATCH", "DELETE", "MERGE"]
+ ),
+ body=dict(
+ type='raw'
+ ),
+ status_code=dict(
+ type='list',
+ default=[200, 201, 202]
+ ),
+ idempotency=dict(
+ type='bool',
+ default=False
+ ),
+ polling_timeout=dict(
+ type='int',
+ default=0
+ ),
+ polling_interval=dict(
+ type='int',
+ default=60
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ changed=False,
+ response=None
+ )
+ self.mgmt_client = None
+ self.url = None
+ self.api_version = None
+ self.provider = None
+ self.resource_group = None
+ self.resource_type = None
+ self.resource_name = None
+ self.subresource_type = None
+ self.subresource_name = None
+ self.subresource = []
+ self.method = None
+ self.status_code = []
+ self.idempotency = False
+ self.polling_timeout = None
+ self.polling_interval = None
+ self.state = None
+ self.body = None
+ super(AzureRMResource, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ if self.state == 'absent':
+ self.method = 'DELETE'
+ self.status_code.append(204)
+
+ if self.url is None:
+ orphan = None
+ rargs = dict()
+ rargs['subscription'] = self.subscription_id
+ rargs['resource_group'] = self.resource_group
+ if not (self.provider is None or self.provider.lower().startswith('.microsoft')):
+ rargs['namespace'] = "Microsoft." + self.provider
+ else:
+ rargs['namespace'] = self.provider
+
+ if self.resource_type is not None and self.resource_name is not None:
+ rargs['type'] = self.resource_type
+ rargs['name'] = self.resource_name
+ for i in range(len(self.subresource)):
+ resource_ns = self.subresource[i].get('namespace', None)
+ resource_type = self.subresource[i].get('type', None)
+ resource_name = self.subresource[i].get('name', None)
+ if resource_type is not None and resource_name is not None:
+ rargs['child_namespace_' + str(i + 1)] = resource_ns
+ rargs['child_type_' + str(i + 1)] = resource_type
+ rargs['child_name_' + str(i + 1)] = resource_name
+ else:
+ orphan = resource_type
+ else:
+ orphan = self.resource_type
+
+ self.url = resource_id(**rargs)
+
+ if orphan is not None:
+ self.url += '/' + orphan
+
+ # if api_version was not specified, get latest one
+ if not self.api_version:
+ try:
+ # extract provider and resource type
+ if "/providers/" in self.url:
+ provider = self.url.split("/providers/")[1].split("/")[0]
+ resourceType = self.url.split(provider + "/")[1].split("/")[0]
+ url = "/subscriptions/" + self.subscription_id + "/providers/" + provider
+ api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text)
+ for rt in api_versions['resourceTypes']:
+ if rt['resourceType'].lower() == resourceType.lower():
+ self.api_version = rt['apiVersions'][0]
+ break
+ else:
+ # if there's no provider in API version, assume Microsoft.Resources
+ self.api_version = '2018-05-01'
+ if not self.api_version:
+ self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType))
+ except Exception as exc:
+ self.fail("Failed to obtain API version: {0}".format(str(exc)))
+
+ query_parameters = {}
+ query_parameters['api-version'] = self.api_version
+
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+
+ needs_update = True
+ response = None
+
+ if self.idempotency:
+ original = self.mgmt_client.query(self.url, "GET", query_parameters, None, None, [200, 404], 0, 0)
+
+ if original.status_code == 404:
+ if self.state == 'absent':
+ needs_update = False
+ else:
+ try:
+ response = json.loads(original.text)
+ needs_update = (dict_merge(response, self.body) != response)
+ except Exception:
+ pass
+
+ if needs_update:
+ response = self.mgmt_client.query(self.url,
+ self.method,
+ query_parameters,
+ header_parameters,
+ self.body,
+ self.status_code,
+ self.polling_timeout,
+ self.polling_interval)
+ if self.state == 'present':
+ try:
+ response = json.loads(response.text)
+ except Exception:
+ response = response.text
+ else:
+ response = None
+
+ self.results['response'] = response
+ self.results['changed'] = needs_update
+
+ return self.results
+
+
+def main():
+ AzureRMResource()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_resource_info.py b/test/support/integration/plugins/modules/azure_rm_resource_info.py
new file mode 100644
index 0000000000..354cd79578
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_resource_info.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_resource_info
+version_added: "2.9"
+short_description: Generic facts of Azure resources
+description:
+ - Obtain facts of any resource using Azure REST API.
+ - This module gives access to resources that are not supported via Ansible modules.
+ - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API.
+
+options:
+ url:
+ description:
+ - Azure RM Resource URL.
+ api_version:
+ description:
+ - Specific API version to be used.
+ provider:
+ description:
+ - Provider type, should be specified in no URL is given.
+ resource_group:
+ description:
+ - Resource group to be used.
+ - Required if URL is not specified.
+ resource_type:
+ description:
+ - Resource type.
+ resource_name:
+ description:
+ - Resource name.
+ subresource:
+ description:
+ - List of subresources.
+ suboptions:
+ namespace:
+ description:
+ - Subresource namespace.
+ type:
+ description:
+ - Subresource type.
+ name:
+ description:
+ - Subresource name.
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Zim Kalinowski (@zikalino)
+
+'''
+
+EXAMPLES = '''
+ - name: Get scaleset info
+ azure_rm_resource_info:
+ resource_group: myResourceGroup
+ provider: compute
+ resource_type: virtualmachinescalesets
+ resource_name: myVmss
+ api_version: "2017-12-01"
+
+ - name: Query all the resources in the resource group
+ azure_rm_resource_info:
+ resource_group: "{{ resource_group }}"
+ resource_type: resources
+'''
+
+RETURN = '''
+response:
+ description:
+ - Response specific to resource type.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - Id of the Azure resource.
+ type: str
+ returned: always
+ sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/virtualMachines/myVM"
+ location:
+ description:
+ - Resource location.
+ type: str
+ returned: always
+ sample: eastus
+ name:
+ description:
+ - Resource name.
+ type: str
+ returned: always
+ sample: myVM
+ properties:
+ description:
+ - Specifies the virtual machine's property.
+ type: complex
+ returned: always
+ contains:
+ diagnosticsProfile:
+ description:
+ - Specifies the boot diagnostic settings state.
+ type: complex
+ returned: always
+ contains:
+ bootDiagnostics:
+ description:
+ - A debugging feature, which to view Console Output and Screenshot to diagnose VM status.
+ type: dict
+ returned: always
+ sample: {
+ "enabled": true,
+ "storageUri": "https://vxisurgdiag.blob.core.windows.net/"
+ }
+ hardwareProfile:
+ description:
+ - Specifies the hardware settings for the virtual machine.
+ type: dict
+ returned: always
+ sample: {
+ "vmSize": "Standard_D2s_v3"
+ }
+ networkProfile:
+ description:
+ - Specifies the network interfaces of the virtual machine.
+ type: complex
+ returned: always
+ contains:
+ networkInterfaces:
+ description:
+ - Describes a network interface reference.
+ type: list
+ returned: always
+ sample:
+ - {
+ "id": "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/networkInterfaces/myvm441"
+ }
+ osProfile:
+ description:
+ - Specifies the operating system settings for the virtual machine.
+ type: complex
+ returned: always
+ contains:
+ adminUsername:
+ description:
+ - Specifies the name of the administrator account.
+ type: str
+ returned: always
+ sample: azureuser
+ allowExtensionOperations:
+ description:
+ - Specifies whether extension operations should be allowed on the virtual machine.
+ - This may only be set to False when no extensions are present on the virtual machine.
+ type: bool
+ returned: always
+ sample: true
+ computerName:
+ description:
+ - Specifies the host OS name of the virtual machine.
+ type: str
+ returned: always
+ sample: myVM
+ requireGuestProvisionSignale:
+ description:
+ - Specifies the host require guest provision signal or not.
+ type: bool
+ returned: always
+ sample: true
+ secrets:
+ description:
+ - Specifies set of certificates that should be installed onto the virtual machine.
+ type: list
+ returned: always
+ sample: []
+ linuxConfiguration:
+ description:
+ - Specifies the Linux operating system settings on the virtual machine.
+ type: dict
+ returned: when OS type is Linux
+ sample: {
+ "disablePasswordAuthentication": false,
+ "provisionVMAgent": true
+ }
+ provisioningState:
+ description:
+ - The provisioning state.
+ type: str
+ returned: always
+ sample: Succeeded
+ vmID:
+ description:
+ - Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure laaS VMs SMBIOS.
+ - It can be read using platform BIOS commands.
+ type: str
+ returned: always
+ sample: "eb86d9bb-6725-4787-a487-2e497d5b340c"
+ storageProfile:
+ description:
+ - Specifies the storage account type for the managed disk.
+ type: complex
+ returned: always
+ contains:
+ dataDisks:
+ description:
+ - Specifies the parameters that are used to add a data disk to virtual machine.
+ type: list
+ returned: always
+ sample:
+ - {
+ "caching": "None",
+ "createOption": "Attach",
+ "diskSizeGB": 1023,
+ "lun": 2,
+ "managedDisk": {
+ "id": "/subscriptions/xxxx....xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk2",
+ "storageAccountType": "StandardSSD_LRS"
+ },
+ "name": "testdisk2"
+ }
+ - {
+ "caching": "None",
+ "createOption": "Attach",
+ "diskSizeGB": 1023,
+ "lun": 1,
+ "managedDisk": {
+ "id": "/subscriptions/xxxx...xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk3",
+ "storageAccountType": "StandardSSD_LRS"
+ },
+ "name": "testdisk3"
+ }
+
+ imageReference:
+ description:
+ - Specifies information about the image to use.
+ type: dict
+ returned: always
+ sample: {
+ "offer": "UbuntuServer",
+ "publisher": "Canonical",
+ "sku": "18.04-LTS",
+ "version": "latest"
+ }
+ osDisk:
+ description:
+ - Specifies information about the operating system disk used by the virtual machine.
+ type: dict
+ returned: always
+ sample: {
+ "caching": "ReadWrite",
+ "createOption": "FromImage",
+ "diskSizeGB": 30,
+ "managedDisk": {
+ "id": "/subscriptions/xxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/disks/myVM_disk1_xxx",
+ "storageAccountType": "Premium_LRS"
+ },
+ "name": "myVM_disk1_xxx",
+ "osType": "Linux"
+ }
+ type:
+ description:
+ - The type of identity used for the virtual machine.
+ type: str
+ returned: always
+ sample: "Microsoft.Compute/virtualMachines"
+'''
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+from ansible.module_utils.azure_rm_common_rest import GenericRestClient
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.service_client import ServiceClient
+ from msrestazure.tools import resource_id, is_valid_resource_id
+ import json
+
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMResourceInfo(AzureRMModuleBase):
+ def __init__(self):
+ # define user inputs into argument
+ self.module_arg_spec = dict(
+ url=dict(
+ type='str'
+ ),
+ provider=dict(
+ type='str'
+ ),
+ resource_group=dict(
+ type='str'
+ ),
+ resource_type=dict(
+ type='str'
+ ),
+ resource_name=dict(
+ type='str'
+ ),
+ subresource=dict(
+ type='list',
+ default=[]
+ ),
+ api_version=dict(
+ type='str'
+ )
+ )
+ # store the results of the module operation
+ self.results = dict(
+ response=[]
+ )
+ self.mgmt_client = None
+ self.url = None
+ self.api_version = None
+ self.provider = None
+ self.resource_group = None
+ self.resource_type = None
+ self.resource_name = None
+ self.subresource = []
+ super(AzureRMResourceInfo, self).__init__(self.module_arg_spec, supports_tags=False)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_resource_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_resource_facts' module has been renamed to 'azure_rm_resource_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+ self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
+ base_url=self._cloud_environment.endpoints.resource_manager)
+
+ if self.url is None:
+ orphan = None
+ rargs = dict()
+ rargs['subscription'] = self.subscription_id
+ rargs['resource_group'] = self.resource_group
+ if not (self.provider is None or self.provider.lower().startswith('.microsoft')):
+ rargs['namespace'] = "Microsoft." + self.provider
+ else:
+ rargs['namespace'] = self.provider
+
+ if self.resource_type is not None and self.resource_name is not None:
+ rargs['type'] = self.resource_type
+ rargs['name'] = self.resource_name
+ for i in range(len(self.subresource)):
+ resource_ns = self.subresource[i].get('namespace', None)
+ resource_type = self.subresource[i].get('type', None)
+ resource_name = self.subresource[i].get('name', None)
+ if resource_type is not None and resource_name is not None:
+ rargs['child_namespace_' + str(i + 1)] = resource_ns
+ rargs['child_type_' + str(i + 1)] = resource_type
+ rargs['child_name_' + str(i + 1)] = resource_name
+ else:
+ orphan = resource_type
+ else:
+ orphan = self.resource_type
+
+ self.url = resource_id(**rargs)
+
+ if orphan is not None:
+ self.url += '/' + orphan
+
+ # if api_version was not specified, get latest one
+ if not self.api_version:
+ try:
+ # extract provider and resource type
+ if "/providers/" in self.url:
+ provider = self.url.split("/providers/")[1].split("/")[0]
+ resourceType = self.url.split(provider + "/")[1].split("/")[0]
+ url = "/subscriptions/" + self.subscription_id + "/providers/" + provider
+ api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text)
+ for rt in api_versions['resourceTypes']:
+ if rt['resourceType'].lower() == resourceType.lower():
+ self.api_version = rt['apiVersions'][0]
+ break
+ else:
+ # if there's no provider in API version, assume Microsoft.Resources
+ self.api_version = '2018-05-01'
+ if not self.api_version:
+ self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType))
+ except Exception as exc:
+ self.fail("Failed to obtain API version: {0}".format(str(exc)))
+
+ self.results['url'] = self.url
+
+ query_parameters = {}
+ query_parameters['api-version'] = self.api_version
+
+ header_parameters = {}
+ header_parameters['Content-Type'] = 'application/json; charset=utf-8'
+ skiptoken = None
+
+ while True:
+ if skiptoken:
+ query_parameters['skiptoken'] = skiptoken
+ response = self.mgmt_client.query(self.url, "GET", query_parameters, header_parameters, None, [200, 404], 0, 0)
+ try:
+ response = json.loads(response.text)
+ if isinstance(response, dict):
+ if response.get('value'):
+ self.results['response'] = self.results['response'] + response['value']
+ skiptoken = response.get('nextLink')
+ else:
+ self.results['response'] = self.results['response'] + [response]
+ except Exception as e:
+ self.fail('Failed to parse response: ' + str(e))
+ if not skiptoken:
+ break
+ return self.results
+
+
+def main():
+ AzureRMResourceInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_storageaccount.py b/test/support/integration/plugins/modules/azure_rm_storageaccount.py
new file mode 100644
index 0000000000..d4158bbda8
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_storageaccount.py
@@ -0,0 +1,684 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
+# Chris Houseknecht, <house@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_storageaccount
+version_added: "2.1"
+short_description: Manage Azure storage accounts
+description:
+ - Create, update or delete a storage account.
+options:
+ resource_group:
+ description:
+ - Name of the resource group to use.
+ required: true
+ aliases:
+ - resource_group_name
+ name:
+ description:
+ - Name of the storage account to update or create.
+ state:
+ description:
+ - State of the storage account. Use C(present) to create or update a storage account and use C(absent) to delete an account.
+ default: present
+ choices:
+ - absent
+ - present
+ location:
+ description:
+ - Valid Azure location. Defaults to location of the resource group.
+ account_type:
+ description:
+ - Type of storage account. Required when creating a storage account.
+ - C(Standard_ZRS) and C(Premium_LRS) accounts cannot be changed to other account types.
+ - Other account types cannot be changed to C(Standard_ZRS) or C(Premium_LRS).
+ choices:
+ - Premium_LRS
+ - Standard_GRS
+ - Standard_LRS
+ - StandardSSD_LRS
+ - Standard_RAGRS
+ - Standard_ZRS
+ - Premium_ZRS
+ aliases:
+ - type
+ custom_domain:
+ description:
+ - User domain assigned to the storage account.
+ - Must be a dictionary with I(name) and I(use_sub_domain) keys where I(name) is the CNAME source.
+ - Only one custom domain is supported per storage account at this time.
+ - To clear the existing custom domain, use an empty string for the custom domain name property.
+ - Can be added to an existing storage account. Will be ignored during storage account creation.
+ aliases:
+ - custom_dns_domain_suffix
+ kind:
+ description:
+ - The kind of storage.
+ default: 'Storage'
+ choices:
+ - Storage
+ - StorageV2
+ - BlobStorage
+ version_added: "2.2"
+ access_tier:
+ description:
+ - The access tier for this storage account. Required when I(kind=BlobStorage).
+ choices:
+ - Hot
+ - Cool
+ version_added: "2.4"
+ force_delete_nonempty:
+ description:
+ - Attempt deletion if resource already exists and cannot be updated.
+ type: bool
+ aliases:
+ - force
+ https_only:
+ description:
+ - Allows https traffic only to storage service when set to C(true).
+ type: bool
+ version_added: "2.8"
+ blob_cors:
+ description:
+ - Specifies CORS rules for the Blob service.
+ - You can include up to five CorsRule elements in the request.
+ - If no blob_cors elements are included in the argument list, nothing about CORS will be changed.
+ - If you want to delete all CORS rules and disable CORS for the Blob service, explicitly set I(blob_cors=[]).
+ type: list
+ version_added: "2.8"
+ suboptions:
+ allowed_origins:
+ description:
+ - A list of origin domains that will be allowed via CORS, or "*" to allow all domains.
+ type: list
+ required: true
+ allowed_methods:
+ description:
+ - A list of HTTP methods that are allowed to be executed by the origin.
+ type: list
+ required: true
+ max_age_in_seconds:
+ description:
+ - The number of seconds that the client/browser should cache a preflight response.
+ type: int
+ required: true
+ exposed_headers:
+ description:
+ - A list of response headers to expose to CORS clients.
+ type: list
+ required: true
+ allowed_headers:
+ description:
+ - A list of headers allowed to be part of the cross-origin request.
+ type: list
+ required: true
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+ - Matt Davis (@nitzmahone)
+'''
+
+EXAMPLES = '''
+ - name: remove account, if it exists
+ azure_rm_storageaccount:
+ resource_group: myResourceGroup
+ name: clh0002
+ state: absent
+
+ - name: create an account
+ azure_rm_storageaccount:
+ resource_group: myResourceGroup
+ name: clh0002
+ type: Standard_RAGRS
+ tags:
+ testing: testing
+ delete: on-exit
+
+ - name: create an account with blob CORS
+ azure_rm_storageaccount:
+ resource_group: myResourceGroup
+ name: clh002
+ type: Standard_RAGRS
+ blob_cors:
+ - allowed_origins:
+ - http://www.example.com/
+ allowed_methods:
+ - GET
+ - POST
+ allowed_headers:
+ - x-ms-meta-data*
+ - x-ms-meta-target*
+ - x-ms-meta-abc
+ exposed_headers:
+ - x-ms-meta-*
+ max_age_in_seconds: 200
+'''
+
+
+RETURN = '''
+state:
+ description:
+ - Current state of the storage account.
+ returned: always
+ type: complex
+ contains:
+ account_type:
+ description:
+ - Type of storage account.
+ returned: always
+ type: str
+ sample: Standard_RAGRS
+ custom_domain:
+ description:
+ - User domain assigned to the storage account.
+ returned: always
+ type: complex
+ contains:
+ name:
+ description:
+ - CNAME source.
+ returned: always
+ type: str
+ sample: testaccount
+ use_sub_domain:
+ description:
+ - Whether to use sub domain.
+ returned: always
+ type: bool
+ sample: true
+ id:
+ description:
+ - Resource ID.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/clh0003"
+ location:
+ description:
+ - Valid Azure location. Defaults to location of the resource group.
+ returned: always
+ type: str
+ sample: eastus2
+ name:
+ description:
+ - Name of the storage account to update or create.
+ returned: always
+ type: str
+ sample: clh0003
+ primary_endpoints:
+ description:
+ - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the primary location.
+ returned: always
+ type: dict
+ sample: {
+ "blob": "https://clh0003.blob.core.windows.net/",
+ "queue": "https://clh0003.queue.core.windows.net/",
+ "table": "https://clh0003.table.core.windows.net/"
+ }
+ primary_location:
+ description:
+ - The location of the primary data center for the storage account.
+ returned: always
+ type: str
+ sample: eastus2
+ provisioning_state:
+ description:
+ - The status of the storage account.
+ - Possible values include C(Creating), C(ResolvingDNS), C(Succeeded).
+ returned: always
+ type: str
+ sample: Succeeded
+ resource_group:
+ description:
+ - The resource group's name.
+ returned: always
+ type: str
+ sample: Testing
+ secondary_endpoints:
+ description:
+ - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the secondary location.
+ returned: always
+ type: dict
+ sample: {
+ "blob": "https://clh0003-secondary.blob.core.windows.net/",
+ "queue": "https://clh0003-secondary.queue.core.windows.net/",
+ "table": "https://clh0003-secondary.table.core.windows.net/"
+ }
+ secondary_location:
+ description:
+ - The location of the geo-replicated secondary for the storage account.
+ returned: always
+ type: str
+ sample: centralus
+ status_of_primary:
+ description:
+ - The status of the primary location of the storage account; either C(available) or C(unavailable).
+ returned: always
+ type: str
+ sample: available
+ status_of_secondary:
+ description:
+ - The status of the secondary location of the storage account; either C(available) or C(unavailable).
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description:
+ - Resource tags.
+ returned: always
+ type: dict
+ sample: { 'tags1': 'value1' }
+ type:
+ description:
+ - The storage account type.
+ returned: always
+ type: str
+ sample: "Microsoft.Storage/storageAccounts"
+'''
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from azure.storage.cloudstorageaccount import CloudStorageAccount
+ from azure.common import AzureMissingResourceHttpError
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+import copy
+from ansible.module_utils.azure_rm_common import AZURE_SUCCESS_STATE, AzureRMModuleBase
+from ansible.module_utils._text import to_native
+
+cors_rule_spec = dict(
+ allowed_origins=dict(type='list', elements='str', required=True),
+ allowed_methods=dict(type='list', elements='str', required=True),
+ max_age_in_seconds=dict(type='int', required=True),
+ exposed_headers=dict(type='list', elements='str', required=True),
+ allowed_headers=dict(type='list', elements='str', required=True),
+)
+
+
+def compare_cors(cors1, cors2):
+ if len(cors1) != len(cors2):
+ return False
+ copy2 = copy.copy(cors2)
+ for rule1 in cors1:
+ matched = False
+ for rule2 in copy2:
+ if (rule1['max_age_in_seconds'] == rule2['max_age_in_seconds']
+ and set(rule1['allowed_methods']) == set(rule2['allowed_methods'])
+ and set(rule1['allowed_origins']) == set(rule2['allowed_origins'])
+ and set(rule1['allowed_headers']) == set(rule2['allowed_headers'])
+ and set(rule1['exposed_headers']) == set(rule2['exposed_headers'])):
+ matched = True
+ copy2.remove(rule2)
+ if not matched:
+ return False
+ return True
+
+
+class AzureRMStorageAccount(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ account_type=dict(type='str',
+ choices=['Premium_LRS', 'Standard_GRS', 'Standard_LRS', 'StandardSSD_LRS', 'Standard_RAGRS', 'Standard_ZRS', 'Premium_ZRS'],
+ aliases=['type']),
+ custom_domain=dict(type='dict', aliases=['custom_dns_domain_suffix']),
+ location=dict(type='str'),
+ name=dict(type='str', required=True),
+ resource_group=dict(required=True, type='str', aliases=['resource_group_name']),
+ state=dict(default='present', choices=['present', 'absent']),
+ force_delete_nonempty=dict(type='bool', default=False, aliases=['force']),
+ tags=dict(type='dict'),
+ kind=dict(type='str', default='Storage', choices=['Storage', 'StorageV2', 'BlobStorage']),
+ access_tier=dict(type='str', choices=['Hot', 'Cool']),
+ https_only=dict(type='bool', default=False),
+ blob_cors=dict(type='list', options=cors_rule_spec, elements='dict')
+ )
+
+ self.results = dict(
+ changed=False,
+ state=dict()
+ )
+
+ self.account_dict = None
+ self.resource_group = None
+ self.name = None
+ self.state = None
+ self.location = None
+ self.account_type = None
+ self.custom_domain = None
+ self.tags = None
+ self.force_delete_nonempty = None
+ self.kind = None
+ self.access_tier = None
+ self.https_only = None
+ self.blob_cors = None
+
+ super(AzureRMStorageAccount, self).__init__(self.module_arg_spec,
+ supports_check_mode=True)
+
+ def exec_module(self, **kwargs):
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ setattr(self, key, kwargs[key])
+
+ resource_group = self.get_resource_group(self.resource_group)
+ if not self.location:
+ # Set default location
+ self.location = resource_group.location
+
+ if len(self.name) < 3 or len(self.name) > 24:
+ self.fail("Parameter error: name length must be between 3 and 24 characters.")
+
+ if self.custom_domain:
+ if self.custom_domain.get('name', None) is None:
+ self.fail("Parameter error: expecting custom_domain to have a name attribute of type string.")
+ if self.custom_domain.get('use_sub_domain', None) is None:
+ self.fail("Parameter error: expecting custom_domain to have a use_sub_domain "
+ "attribute of type boolean.")
+
+ self.account_dict = self.get_account()
+
+ if self.state == 'present' and self.account_dict and \
+ self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE:
+ self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state "
+ "to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE))
+
+ if self.account_dict is not None:
+ self.results['state'] = self.account_dict
+ else:
+ self.results['state'] = dict()
+
+ if self.state == 'present':
+ if not self.account_dict:
+ self.results['state'] = self.create_account()
+ else:
+ self.update_account()
+ elif self.state == 'absent' and self.account_dict:
+ self.delete_account()
+ self.results['state'] = dict(Status='Deleted')
+
+ return self.results
+
+ def check_name_availability(self):
+ self.log('Checking name availability for {0}'.format(self.name))
+ try:
+ response = self.storage_client.storage_accounts.check_name_availability(self.name)
+ except CloudError as e:
+ self.log('Error attempting to validate name.')
+ self.fail("Error checking name availability: {0}".format(str(e)))
+ if not response.name_available:
+ self.log('Error name not available.')
+ self.fail("{0} - {1}".format(response.message, response.reason))
+
+ def get_account(self):
+ self.log('Get properties for account {0}'.format(self.name))
+ account_obj = None
+ blob_service_props = None
+ account_dict = None
+
+ try:
+ account_obj = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name)
+ blob_service_props = self.storage_client.blob_services.get_service_properties(self.resource_group, self.name)
+ except CloudError:
+ pass
+
+ if account_obj:
+ account_dict = self.account_obj_to_dict(account_obj, blob_service_props)
+
+ return account_dict
+
+ def account_obj_to_dict(self, account_obj, blob_service_props=None):
+ account_dict = dict(
+ id=account_obj.id,
+ name=account_obj.name,
+ location=account_obj.location,
+ resource_group=self.resource_group,
+ type=account_obj.type,
+ access_tier=(account_obj.access_tier.value
+ if account_obj.access_tier is not None else None),
+ sku_tier=account_obj.sku.tier.value,
+ sku_name=account_obj.sku.name.value,
+ provisioning_state=account_obj.provisioning_state.value,
+ secondary_location=account_obj.secondary_location,
+ status_of_primary=(account_obj.status_of_primary.value
+ if account_obj.status_of_primary is not None else None),
+ status_of_secondary=(account_obj.status_of_secondary.value
+ if account_obj.status_of_secondary is not None else None),
+ primary_location=account_obj.primary_location,
+ https_only=account_obj.enable_https_traffic_only
+ )
+ account_dict['custom_domain'] = None
+ if account_obj.custom_domain:
+ account_dict['custom_domain'] = dict(
+ name=account_obj.custom_domain.name,
+ use_sub_domain=account_obj.custom_domain.use_sub_domain
+ )
+
+ account_dict['primary_endpoints'] = None
+ if account_obj.primary_endpoints:
+ account_dict['primary_endpoints'] = dict(
+ blob=account_obj.primary_endpoints.blob,
+ queue=account_obj.primary_endpoints.queue,
+ table=account_obj.primary_endpoints.table
+ )
+ account_dict['secondary_endpoints'] = None
+ if account_obj.secondary_endpoints:
+ account_dict['secondary_endpoints'] = dict(
+ blob=account_obj.secondary_endpoints.blob,
+ queue=account_obj.secondary_endpoints.queue,
+ table=account_obj.secondary_endpoints.table
+ )
+ account_dict['tags'] = None
+ if account_obj.tags:
+ account_dict['tags'] = account_obj.tags
+ if blob_service_props and blob_service_props.cors and blob_service_props.cors.cors_rules:
+ account_dict['blob_cors'] = [dict(
+ allowed_origins=[to_native(y) for y in x.allowed_origins],
+ allowed_methods=[to_native(y) for y in x.allowed_methods],
+ max_age_in_seconds=x.max_age_in_seconds,
+ exposed_headers=[to_native(y) for y in x.exposed_headers],
+ allowed_headers=[to_native(y) for y in x.allowed_headers]
+ ) for x in blob_service_props.cors.cors_rules]
+ return account_dict
+
+ def update_account(self):
+ self.log('Update storage account {0}'.format(self.name))
+ if bool(self.https_only) != bool(self.account_dict.get('https_only')):
+ self.results['changed'] = True
+ self.account_dict['https_only'] = self.https_only
+ if not self.check_mode:
+ try:
+ parameters = self.storage_models.StorageAccountUpdateParameters(enable_https_traffic_only=self.https_only)
+ self.storage_client.storage_accounts.update(self.resource_group,
+ self.name,
+ parameters)
+ except Exception as exc:
+ self.fail("Failed to update account type: {0}".format(str(exc)))
+
+ if self.account_type:
+ if self.account_type != self.account_dict['sku_name']:
+ # change the account type
+ SkuName = self.storage_models.SkuName
+ if self.account_dict['sku_name'] in [SkuName.premium_lrs, SkuName.standard_zrs]:
+ self.fail("Storage accounts of type {0} and {1} cannot be changed.".format(
+ SkuName.premium_lrs, SkuName.standard_zrs))
+ if self.account_type in [SkuName.premium_lrs, SkuName.standard_zrs]:
+ self.fail("Storage account of type {0} cannot be changed to a type of {1} or {2}.".format(
+ self.account_dict['sku_name'], SkuName.premium_lrs, SkuName.standard_zrs))
+
+ self.results['changed'] = True
+ self.account_dict['sku_name'] = self.account_type
+
+ if self.results['changed'] and not self.check_mode:
+ # Perform the update. The API only allows changing one attribute per call.
+ try:
+ self.log("sku_name: %s" % self.account_dict['sku_name'])
+ self.log("sku_tier: %s" % self.account_dict['sku_tier'])
+ sku = self.storage_models.Sku(name=SkuName(self.account_dict['sku_name']))
+ sku.tier = self.storage_models.SkuTier(self.account_dict['sku_tier'])
+ parameters = self.storage_models.StorageAccountUpdateParameters(sku=sku)
+ self.storage_client.storage_accounts.update(self.resource_group,
+ self.name,
+ parameters)
+ except Exception as exc:
+ self.fail("Failed to update account type: {0}".format(str(exc)))
+
+ if self.custom_domain:
+ if not self.account_dict['custom_domain'] or self.account_dict['custom_domain'] != self.custom_domain:
+ self.results['changed'] = True
+ self.account_dict['custom_domain'] = self.custom_domain
+
+ if self.results['changed'] and not self.check_mode:
+ new_domain = self.storage_models.CustomDomain(name=self.custom_domain['name'],
+ use_sub_domain=self.custom_domain['use_sub_domain'])
+ parameters = self.storage_models.StorageAccountUpdateParameters(custom_domain=new_domain)
+ try:
+ self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
+ except Exception as exc:
+ self.fail("Failed to update custom domain: {0}".format(str(exc)))
+
+ if self.access_tier:
+ if not self.account_dict['access_tier'] or self.account_dict['access_tier'] != self.access_tier:
+ self.results['changed'] = True
+ self.account_dict['access_tier'] = self.access_tier
+
+ if self.results['changed'] and not self.check_mode:
+ parameters = self.storage_models.StorageAccountUpdateParameters(access_tier=self.access_tier)
+ try:
+ self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
+ except Exception as exc:
+ self.fail("Failed to update access tier: {0}".format(str(exc)))
+
+ update_tags, self.account_dict['tags'] = self.update_tags(self.account_dict['tags'])
+ if update_tags:
+ self.results['changed'] = True
+ if not self.check_mode:
+ parameters = self.storage_models.StorageAccountUpdateParameters(tags=self.account_dict['tags'])
+ try:
+ self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
+ except Exception as exc:
+ self.fail("Failed to update tags: {0}".format(str(exc)))
+
+ if self.blob_cors and not compare_cors(self.account_dict.get('blob_cors', []), self.blob_cors):
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.set_blob_cors()
+
+ def create_account(self):
+ self.log("Creating account {0}".format(self.name))
+
+ if not self.location:
+ self.fail('Parameter error: location required when creating a storage account.')
+
+ if not self.account_type:
+ self.fail('Parameter error: account_type required when creating a storage account.')
+
+ if not self.access_tier and self.kind == 'BlobStorage':
+ self.fail('Parameter error: access_tier required when creating a storage account of type BlobStorage.')
+
+ self.check_name_availability()
+ self.results['changed'] = True
+
+ if self.check_mode:
+ account_dict = dict(
+ location=self.location,
+ account_type=self.account_type,
+ name=self.name,
+ resource_group=self.resource_group,
+ enable_https_traffic_only=self.https_only,
+ tags=dict()
+ )
+ if self.tags:
+ account_dict['tags'] = self.tags
+ if self.blob_cors:
+ account_dict['blob_cors'] = self.blob_cors
+ return account_dict
+ sku = self.storage_models.Sku(name=self.storage_models.SkuName(self.account_type))
+ sku.tier = self.storage_models.SkuTier.standard if 'Standard' in self.account_type else \
+ self.storage_models.SkuTier.premium
+ parameters = self.storage_models.StorageAccountCreateParameters(sku=sku,
+ kind=self.kind,
+ location=self.location,
+ tags=self.tags,
+ access_tier=self.access_tier)
+ self.log(str(parameters))
+ try:
+ poller = self.storage_client.storage_accounts.create(self.resource_group, self.name, parameters)
+ self.get_poller_result(poller)
+ except CloudError as e:
+ self.log('Error creating storage account.')
+ self.fail("Failed to create account: {0}".format(str(e)))
+ if self.blob_cors:
+ self.set_blob_cors()
+ # the poller doesn't actually return anything
+ return self.get_account()
+
+ def delete_account(self):
+ if self.account_dict['provisioning_state'] == self.storage_models.ProvisioningState.succeeded.value and \
+ not self.force_delete_nonempty and self.account_has_blob_containers():
+ self.fail("Account contains blob containers. Is it in use? Use the force_delete_nonempty option to attempt deletion.")
+
+ self.log('Delete storage account {0}'.format(self.name))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ status = self.storage_client.storage_accounts.delete(self.resource_group, self.name)
+ self.log("delete status: ")
+ self.log(str(status))
+ except CloudError as e:
+ self.fail("Failed to delete the account: {0}".format(str(e)))
+ return True
+
+ def account_has_blob_containers(self):
+ '''
+ If there are blob containers, then there are likely VMs depending on this account and it should
+ not be deleted.
+ '''
+ self.log('Checking for existing blob containers')
+ blob_service = self.get_blob_client(self.resource_group, self.name)
+ try:
+ response = blob_service.list_containers()
+ except AzureMissingResourceHttpError:
+ # No blob storage available?
+ return False
+
+ if len(response.items) > 0:
+ return True
+ return False
+
+ def set_blob_cors(self):
+ try:
+ cors_rules = self.storage_models.CorsRules(cors_rules=[self.storage_models.CorsRule(**x) for x in self.blob_cors])
+ self.storage_client.blob_services.set_service_properties(self.resource_group,
+ self.name,
+ self.storage_models.BlobServiceProperties(cors=cors_rules))
+ except Exception as exc:
+ self.fail("Failed to set CORS rules: {0}".format(str(exc)))
+
+
+def main():
+ AzureRMStorageAccount()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_webapp.py b/test/support/integration/plugins/modules/azure_rm_webapp.py
new file mode 100644
index 0000000000..4f185f4580
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_webapp.py
@@ -0,0 +1,1070 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_webapp
+version_added: "2.7"
+short_description: Manage Web App instances
+description:
+ - Create, update and delete instance of Web App.
+
+options:
+ resource_group:
+ description:
+ - Name of the resource group to which the resource belongs.
+ required: True
+ name:
+ description:
+ - Unique name of the app to create or update. To create or update a deployment slot, use the {slot} parameter.
+ required: True
+
+ location:
+ description:
+ - Resource location. If not set, location from the resource group will be used as default.
+
+ plan:
+ description:
+ - App service plan. Required for creation.
+ - Can be name of existing app service plan in same resource group as web app.
+ - Can be the resource ID of an existing app service plan. For example
+ /subscriptions/<subs_id>/resourceGroups/<resource_group>/providers/Microsoft.Web/serverFarms/<plan_name>.
+ - Can be a dict containing five parameters, defined below.
+ - C(name), name of app service plan.
+ - C(resource_group), resource group of the app service plan.
+ - C(sku), SKU of app service plan, allowed values listed on U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/).
+ - C(is_linux), whether or not the app service plan is Linux. defaults to C(False).
+ - C(number_of_workers), number of workers for app service plan.
+
+ frameworks:
+ description:
+ - Set of run time framework settings. Each setting is a dictionary.
+ - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
+ suboptions:
+ name:
+ description:
+ - Name of the framework.
+ - Supported framework list for Windows web app and Linux web app is different.
+ - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018.
+ - Windows web apps support multiple framework at the same time.
+ - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018.
+ - Linux web apps support only one framework.
+ - Java framework is mutually exclusive with others.
+ choices:
+ - java
+ - net_framework
+ - php
+ - python
+ - ruby
+ - dotnetcore
+ - node
+ version:
+ description:
+ - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info.
+ - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5.
+ - C(php) supported value sample, C(5.5), C(5.6), C(7.0).
+ - C(python) supported value sample, C(5.5), C(5.6), C(7.0).
+ - C(node) supported value sample, C(6.6), C(6.9).
+ - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2).
+ - C(ruby) supported value sample, C(2.3).
+ - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app.
+ settings:
+ description:
+ - List of settings of the framework.
+ suboptions:
+ java_container:
+ description:
+ - Name of Java container.
+ - Supported only when I(frameworks=java). Sample values C(Tomcat), C(Jetty).
+ java_container_version:
+ description:
+ - Version of Java container.
+ - Supported only when I(frameworks=java).
+ - Sample values for C(Tomcat), C(8.0), C(8.5), C(9.0). For C(Jetty,), C(9.1), C(9.3).
+
+ container_settings:
+ description:
+ - Web app container settings.
+ suboptions:
+ name:
+ description:
+ - Name of container, for example C(imagename:tag).
+ registry_server_url:
+ description:
+ - Container registry server URL, for example C(mydockerregistry.io).
+ registry_server_user:
+ description:
+ - The container registry server user name.
+ registry_server_password:
+ description:
+ - The container registry server password.
+
+ scm_type:
+ description:
+ - Repository type of deployment source, for example C(LocalGit), C(GitHub).
+ - List of supported values maintained at U(https://docs.microsoft.com/en-us/rest/api/appservice/webapps/createorupdate#scmtype).
+
+ deployment_source:
+ description:
+ - Deployment source for git.
+ suboptions:
+ url:
+ description:
+ - Repository url of deployment source.
+
+ branch:
+ description:
+ - The branch name of the repository.
+ startup_file:
+ description:
+ - The web's startup file.
+ - Used only for Linux web apps.
+
+ client_affinity_enabled:
+ description:
+ - Whether or not to send session affinity cookies, which route client requests in the same session to the same instance.
+ type: bool
+ default: True
+
+ https_only:
+ description:
+ - Configures web site to accept only https requests.
+ type: bool
+
+ dns_registration:
+ description:
+ - Whether or not the web app hostname is registered with DNS on creation. Set to C(false) to register.
+ type: bool
+
+ skip_custom_domain_verification:
+ description:
+ - Whether or not to skip verification of custom (non *.azurewebsites.net) domains associated with web app. Set to C(true) to skip.
+ type: bool
+
+ ttl_in_seconds:
+ description:
+ - Time to live in seconds for web app default domain name.
+
+ app_settings:
+ description:
+ - Configure web app application settings. Suboptions are in key value pair format.
+
+ purge_app_settings:
+ description:
+ - Purge any existing application settings. Replace web app application settings with app_settings.
+ type: bool
+
+ app_state:
+ description:
+ - Start/Stop/Restart the web app.
+ type: str
+ choices:
+ - started
+ - stopped
+ - restarted
+ default: started
+
+ state:
+ description:
+ - State of the Web App.
+ - Use C(present) to create or update a Web App and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Yunge Zhu (@yungezz)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a windows web app with non-exist app service plan
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myWinWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ is_linux: false
+ sku: S1
+
+ - name: Create a docker web app with some app settings, with docker image
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myDockerWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ is_linux: true
+ sku: S1
+ number_of_workers: 2
+ app_settings:
+ testkey: testvalue
+ testkey2: testvalue2
+ container_settings:
+ name: ansible/ansible:ubuntu1404
+
+ - name: Create a docker web app with private acr registry
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myDockerWebapp
+ plan: myAppServicePlan
+ app_settings:
+ testkey: testvalue
+ container_settings:
+ name: ansible/ubuntu1404
+ registry_server_url: myregistry.io
+ registry_server_user: user
+ registry_server_password: pass
+
+ - name: Create a linux web app with Node 6.6 framework
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myLinuxWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ app_settings:
+ testkey: testvalue
+ frameworks:
+ - name: "node"
+ version: "6.6"
+
+ - name: Create a windows web app with node, php
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myWinWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ app_settings:
+ testkey: testvalue
+ frameworks:
+ - name: "node"
+ version: 6.6
+ - name: "php"
+ version: "7.0"
+
+ - name: Create a stage deployment slot for an existing web app
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myWebapp/slots/stage
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ app_settings:
+ testkey:testvalue
+
+ - name: Create a linux web app with java framework
+ azure_rm_webapp:
+ resource_group: myResourceGroup
+ name: myLinuxWebapp
+ plan:
+ resource_group: myAppServicePlan_rg
+ name: myAppServicePlan
+ app_settings:
+ testkey: testvalue
+ frameworks:
+ - name: "java"
+ version: "8"
+ settings:
+ java_container: "Tomcat"
+ java_container_version: "8.5"
+'''
+
+RETURN = '''
+azure_webapp:
+ description:
+ - ID of current web app.
+ returned: always
+ type: str
+ sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp"
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrest.serialization import Model
+ from azure.mgmt.web.models import (
+ site_config, app_service_plan, Site,
+ AppServicePlan, SkuDescription, NameValuePair
+ )
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+container_settings_spec = dict(
+ name=dict(type='str', required=True),
+ registry_server_url=dict(type='str'),
+ registry_server_user=dict(type='str'),
+ registry_server_password=dict(type='str', no_log=True)
+)
+
+deployment_source_spec = dict(
+ url=dict(type='str'),
+ branch=dict(type='str')
+)
+
+
+framework_settings_spec = dict(
+ java_container=dict(type='str', required=True),
+ java_container_version=dict(type='str', required=True)
+)
+
+
+framework_spec = dict(
+ name=dict(
+ type='str',
+ required=True,
+ choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']),
+ version=dict(type='str', required=True),
+ settings=dict(type='dict', options=framework_settings_spec)
+)
+
+
+def _normalize_sku(sku):
+ if sku is None:
+ return sku
+
+ sku = sku.upper()
+ if sku == 'FREE':
+ return 'F1'
+ elif sku == 'SHARED':
+ return 'D1'
+ return sku
+
+
+def get_sku_name(tier):
+ tier = tier.upper()
+ if tier == 'F1' or tier == "FREE":
+ return 'FREE'
+ elif tier == 'D1' or tier == "SHARED":
+ return 'SHARED'
+ elif tier in ['B1', 'B2', 'B3', 'BASIC']:
+ return 'BASIC'
+ elif tier in ['S1', 'S2', 'S3']:
+ return 'STANDARD'
+ elif tier in ['P1', 'P2', 'P3']:
+ return 'PREMIUM'
+ elif tier in ['P1V2', 'P2V2', 'P3V2']:
+ return 'PREMIUMV2'
+ else:
+ return None
+
+
+def appserviceplan_to_dict(plan):
+ return dict(
+ id=plan.id,
+ name=plan.name,
+ kind=plan.kind,
+ location=plan.location,
+ reserved=plan.reserved,
+ is_linux=plan.reserved,
+ provisioning_state=plan.provisioning_state,
+ tags=plan.tags if plan.tags else None
+ )
+
+
+def webapp_to_dict(webapp):
+ return dict(
+ id=webapp.id,
+ name=webapp.name,
+ location=webapp.location,
+ client_cert_enabled=webapp.client_cert_enabled,
+ enabled=webapp.enabled,
+ reserved=webapp.reserved,
+ client_affinity_enabled=webapp.client_affinity_enabled,
+ server_farm_id=webapp.server_farm_id,
+ host_names_disabled=webapp.host_names_disabled,
+ https_only=webapp.https_only if hasattr(webapp, 'https_only') else None,
+ skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None,
+ ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None,
+ state=webapp.state,
+ tags=webapp.tags if webapp.tags else None
+ )
+
+
+class Actions:
+ CreateOrUpdate, UpdateAppSettings, Delete = range(3)
+
+
+class AzureRMWebApps(AzureRMModuleBase):
+ """Configuration class for an Azure RM Web App resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ location=dict(
+ type='str'
+ ),
+ plan=dict(
+ type='raw'
+ ),
+ frameworks=dict(
+ type='list',
+ elements='dict',
+ options=framework_spec
+ ),
+ container_settings=dict(
+ type='dict',
+ options=container_settings_spec
+ ),
+ scm_type=dict(
+ type='str',
+ ),
+ deployment_source=dict(
+ type='dict',
+ options=deployment_source_spec
+ ),
+ startup_file=dict(
+ type='str'
+ ),
+ client_affinity_enabled=dict(
+ type='bool',
+ default=True
+ ),
+ dns_registration=dict(
+ type='bool'
+ ),
+ https_only=dict(
+ type='bool'
+ ),
+ skip_custom_domain_verification=dict(
+ type='bool'
+ ),
+ ttl_in_seconds=dict(
+ type='int'
+ ),
+ app_settings=dict(
+ type='dict'
+ ),
+ purge_app_settings=dict(
+ type='bool',
+ default=False
+ ),
+ app_state=dict(
+ type='str',
+ choices=['started', 'stopped', 'restarted'],
+ default='started'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ mutually_exclusive = [['container_settings', 'frameworks']]
+
+ self.resource_group = None
+ self.name = None
+ self.location = None
+
+ # update in create_or_update as parameters
+ self.client_affinity_enabled = True
+ self.dns_registration = None
+ self.skip_custom_domain_verification = None
+ self.ttl_in_seconds = None
+ self.https_only = None
+
+ self.tags = None
+
+ # site config, e.g app settings, ssl
+ self.site_config = dict()
+ self.app_settings = dict()
+ self.app_settings_strDic = None
+
+ # app service plan
+ self.plan = None
+
+ # siteSourceControl
+ self.deployment_source = dict()
+
+ # site, used at level creation, or update. e.g windows/linux, client_affinity etc first level args
+ self.site = None
+
+ # property for internal usage, not used for sdk
+ self.container_settings = None
+
+ self.purge_app_settings = False
+ self.app_state = 'started'
+
+ self.results = dict(
+ changed=False,
+ id=None,
+ )
+ self.state = None
+ self.to_do = []
+
+ self.frameworks = None
+
+ # set site_config value from kwargs
+ self.site_config_updatable_properties = ["net_framework_version",
+ "java_version",
+ "php_version",
+ "python_version",
+ "scm_type"]
+
+ # updatable_properties
+ self.updatable_properties = ["client_affinity_enabled",
+ "force_dns_registration",
+ "https_only",
+ "skip_custom_domain_verification",
+ "ttl_in_seconds"]
+
+ self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java']
+ self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java']
+
+ super(AzureRMWebApps, self).__init__(derived_arg_spec=self.module_arg_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ supports_tags=True)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs[key] is not None:
+ if key == "scm_type":
+ self.site_config[key] = kwargs[key]
+
+ old_response = None
+ response = None
+ to_be_updated = False
+
+ # set location
+ resource_group = self.get_resource_group(self.resource_group)
+ if not self.location:
+ self.location = resource_group.location
+
+ # get existing web app
+ old_response = self.get_webapp()
+
+ if old_response:
+ self.results['id'] = old_response['id']
+
+ if self.state == 'present':
+ if not self.plan and not old_response:
+ self.fail("Please specify plan for newly created web app.")
+
+ if not self.plan:
+ self.plan = old_response['server_farm_id']
+
+ self.plan = self.parse_resource_to_dict(self.plan)
+
+ # get app service plan
+ is_linux = False
+ old_plan = self.get_app_service_plan()
+ if old_plan:
+ is_linux = old_plan['reserved']
+ else:
+ is_linux = self.plan['is_linux'] if 'is_linux' in self.plan else False
+
+ if self.frameworks:
+ # java is mutually exclusive with other frameworks
+ if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks):
+ self.fail('Java is mutually exclusive with other frameworks.')
+
+ if is_linux:
+ if len(self.frameworks) != 1:
+ self.fail('Can specify one framework only for Linux web app.')
+
+ if self.frameworks[0]['name'] not in self.supported_linux_frameworks:
+ self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name']))
+
+ self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper()
+
+ if self.frameworks[0]['name'] == 'java':
+ if self.frameworks[0]['version'] != '8':
+ self.fail("Linux web app only supports java 8.")
+ if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() != 'tomcat':
+ self.fail("Linux web app only supports tomcat container.")
+
+ if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() == 'tomcat':
+ self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8'
+ else:
+ self.site_config['linux_fx_version'] = 'JAVA|8-jre8'
+ else:
+ for fx in self.frameworks:
+ if fx.get('name') not in self.supported_windows_frameworks:
+ self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name')))
+ else:
+ self.site_config[fx.get('name') + '_version'] = fx.get('version')
+
+ if 'settings' in fx and fx['settings'] is not None:
+ for key, value in fx['settings'].items():
+ self.site_config[key] = value
+
+ if not self.app_settings:
+ self.app_settings = dict()
+
+ if self.container_settings:
+ linux_fx_version = 'DOCKER|'
+
+ if self.container_settings.get('registry_server_url'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
+
+ linux_fx_version += self.container_settings['registry_server_url'] + '/'
+
+ linux_fx_version += self.container_settings['name']
+
+ self.site_config['linux_fx_version'] = linux_fx_version
+
+ if self.container_settings.get('registry_server_user'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user']
+
+ if self.container_settings.get('registry_server_password'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password']
+
+ # init site
+ self.site = Site(location=self.location, site_config=self.site_config)
+
+ if self.https_only is not None:
+ self.site.https_only = self.https_only
+
+ if self.client_affinity_enabled:
+ self.site.client_affinity_enabled = self.client_affinity_enabled
+
+ # check if the web app already present in the resource group
+ if not old_response:
+ self.log("Web App instance doesn't exist")
+
+ to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
+ self.site.tags = self.tags
+
+ # service plan is required for creation
+ if not self.plan:
+ self.fail("Please specify app service plan in plan parameter.")
+
+ if not old_plan:
+ # no existing service plan, create one
+ if (not self.plan.get('name') or not self.plan.get('sku')):
+ self.fail('Please specify name, is_linux, sku in plan')
+
+ if 'location' not in self.plan:
+ plan_resource_group = self.get_resource_group(self.plan['resource_group'])
+ self.plan['location'] = plan_resource_group.location
+
+ old_plan = self.create_app_service_plan()
+
+ self.site.server_farm_id = old_plan['id']
+
+ # if linux, setup startup_file
+ if old_plan['is_linux']:
+ if hasattr(self, 'startup_file'):
+ self.site_config['app_command_line'] = self.startup_file
+
+ # set app setting
+ if self.app_settings:
+ app_settings = []
+ for key in self.app_settings.keys():
+ app_settings.append(NameValuePair(name=key, value=self.app_settings[key]))
+
+ self.site_config['app_settings'] = app_settings
+ else:
+ # existing web app, do update
+ self.log("Web App instance already exists")
+
+ self.log('Result: {0}'.format(old_response))
+
+ update_tags, self.site.tags = self.update_tags(old_response.get('tags', None))
+
+ if update_tags:
+ to_be_updated = True
+
+ # check if root level property changed
+ if self.is_updatable_property_changed(old_response):
+ to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
+
+ # check if site_config changed
+ old_config = self.get_webapp_configuration()
+
+ if self.is_site_config_changed(old_config):
+ to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
+
+ # check if linux_fx_version changed
+ if old_config.linux_fx_version != self.site_config.get('linux_fx_version', ''):
+ to_be_updated = True
+ self.to_do.append(Actions.CreateOrUpdate)
+
+ self.app_settings_strDic = self.list_app_settings()
+
+ # purge existing app_settings:
+ if self.purge_app_settings:
+ to_be_updated = True
+ self.app_settings_strDic = dict()
+ self.to_do.append(Actions.UpdateAppSettings)
+
+ # check if app settings changed
+ if self.purge_app_settings or self.is_app_settings_changed():
+ to_be_updated = True
+ self.to_do.append(Actions.UpdateAppSettings)
+
+ if self.app_settings:
+ for key in self.app_settings.keys():
+ self.app_settings_strDic[key] = self.app_settings[key]
+
+ elif self.state == 'absent':
+ if old_response:
+ self.log("Delete Web App instance")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_webapp()
+
+ self.log('Web App instance deleted')
+
+ else:
+ self.fail("Web app {0} not exists.".format(self.name))
+
+ if to_be_updated:
+ self.log('Need to Create/Update web app')
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ if Actions.CreateOrUpdate in self.to_do:
+ response = self.create_update_webapp()
+
+ self.results['id'] = response['id']
+
+ if Actions.UpdateAppSettings in self.to_do:
+ update_response = self.update_app_settings()
+ self.results['id'] = update_response.id
+
+ webapp = None
+ if old_response:
+ webapp = old_response
+ if response:
+ webapp = response
+
+ if webapp:
+ if (webapp['state'] != 'Stopped' and self.app_state == 'stopped') or \
+ (webapp['state'] != 'Running' and self.app_state == 'started') or \
+ self.app_state == 'restarted':
+
+ self.results['changed'] = True
+ if self.check_mode:
+ return self.results
+
+ self.set_webapp_state(self.app_state)
+
+ return self.results
+
+ # compare existing web app with input, determine weather it's update operation
+ def is_updatable_property_changed(self, existing_webapp):
+ for property_name in self.updatable_properties:
+ if hasattr(self, property_name) and getattr(self, property_name) is not None and \
+ getattr(self, property_name) != existing_webapp.get(property_name, None):
+ return True
+
+ return False
+
+ # compare xxx_version
+ def is_site_config_changed(self, existing_config):
+ for fx_version in self.site_config_updatable_properties:
+ if self.site_config.get(fx_version):
+ if not getattr(existing_config, fx_version) or \
+ getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper():
+ return True
+
+ return False
+
+ # comparing existing app setting with input, determine whether it's changed
+ def is_app_settings_changed(self):
+ if self.app_settings:
+ if self.app_settings_strDic:
+ for key in self.app_settings.keys():
+ if self.app_settings[key] != self.app_settings_strDic.get(key, None):
+ return True
+ else:
+ return True
+ return False
+
+ # comparing deployment source with input, determine wheather it's changed
+ def is_deployment_source_changed(self, existing_webapp):
+ if self.deployment_source:
+ if self.deployment_source.get('url') \
+ and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']:
+ return True
+
+ if self.deployment_source.get('branch') \
+ and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']:
+ return True
+
+ return False
+
+ def create_update_webapp(self):
+ '''
+ Creates or updates Web App with the specified configuration.
+
+ :return: deserialized Web App instance state dictionary
+ '''
+ self.log(
+ "Creating / Updating the Web App instance {0}".format(self.name))
+
+ try:
+ skip_dns_registration = self.dns_registration
+ force_dns_registration = None if self.dns_registration is None else not self.dns_registration
+
+ response = self.web_client.web_apps.create_or_update(resource_group_name=self.resource_group,
+ name=self.name,
+ site_envelope=self.site,
+ skip_dns_registration=skip_dns_registration,
+ skip_custom_domain_verification=self.skip_custom_domain_verification,
+ force_dns_registration=force_dns_registration,
+ ttl_in_seconds=self.ttl_in_seconds)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the Web App instance.')
+ self.fail(
+ "Error creating the Web App instance: {0}".format(str(exc)))
+ return webapp_to_dict(response)
+
+ def delete_webapp(self):
+ '''
+ Deletes specified Web App instance in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the Web App instance {0}".format(self.name))
+ try:
+ response = self.web_client.web_apps.delete(resource_group_name=self.resource_group,
+ name=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the Web App instance.')
+ self.fail(
+ "Error deleting the Web App instance: {0}".format(str(e)))
+
+ return True
+
+ def get_webapp(self):
+ '''
+ Gets the properties of the specified Web App.
+
+ :return: deserialized Web App instance state dictionary
+ '''
+ self.log(
+ "Checking if the Web App instance {0} is present".format(self.name))
+
+ response = None
+
+ try:
+ response = self.web_client.web_apps.get(resource_group_name=self.resource_group,
+ name=self.name)
+
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ if response is not None:
+ self.log("Response : {0}".format(response))
+ self.log("Web App instance : {0} found".format(response.name))
+ return webapp_to_dict(response)
+
+ except CloudError as ex:
+ pass
+
+ self.log("Didn't find web app {0} in resource group {1}".format(
+ self.name, self.resource_group))
+
+ return False
+
+ def get_app_service_plan(self):
+ '''
+ Gets app service plan
+ :return: deserialized app service plan dictionary
+ '''
+ self.log("Get App Service Plan {0}".format(self.plan['name']))
+
+ try:
+ response = self.web_client.app_service_plans.get(
+ resource_group_name=self.plan['resource_group'],
+ name=self.plan['name'])
+
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ if response is not None:
+ self.log("Response : {0}".format(response))
+ self.log("App Service Plan : {0} found".format(response.name))
+
+ return appserviceplan_to_dict(response)
+ except CloudError as ex:
+ pass
+
+ self.log("Didn't find app service plan {0} in resource group {1}".format(
+ self.plan['name'], self.plan['resource_group']))
+
+ return False
+
+ def create_app_service_plan(self):
+ '''
+ Creates app service plan
+ :return: deserialized app service plan dictionary
+ '''
+ self.log("Create App Service Plan {0}".format(self.plan['name']))
+
+ try:
+ # normalize sku
+ sku = _normalize_sku(self.plan['sku'])
+
+ sku_def = SkuDescription(tier=get_sku_name(
+ sku), name=sku, capacity=(self.plan.get('number_of_workers', None)))
+ plan_def = AppServicePlan(
+ location=self.plan['location'], app_service_plan_name=self.plan['name'], sku=sku_def, reserved=(self.plan.get('is_linux', None)))
+
+ poller = self.web_client.app_service_plans.create_or_update(
+ self.plan['resource_group'], self.plan['name'], plan_def)
+
+ if isinstance(poller, LROPoller):
+ response = self.get_poller_result(poller)
+
+ self.log("Response : {0}".format(response))
+
+ return appserviceplan_to_dict(response)
+ except CloudError as ex:
+ self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(
+ self.plan['name'], self.plan['resource_group'], str(ex)))
+
+ def list_app_settings(self):
+ '''
+ List application settings
+ :return: deserialized list response
+ '''
+ self.log("List application setting")
+
+ try:
+
+ response = self.web_client.web_apps.list_application_settings(
+ resource_group_name=self.resource_group, name=self.name)
+ self.log("Response : {0}".format(response))
+
+ return response.properties
+ except CloudError as ex:
+ self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def update_app_settings(self):
+ '''
+ Update application settings
+ :return: deserialized updating response
+ '''
+ self.log("Update application setting")
+
+ try:
+ response = self.web_client.web_apps.update_application_settings(
+ resource_group_name=self.resource_group, name=self.name, properties=self.app_settings_strDic)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to update application settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def create_or_update_source_control(self):
+ '''
+ Update site source control
+ :return: deserialized updating response
+ '''
+ self.log("Update site source control")
+
+ if self.deployment_source is None:
+ return False
+
+ self.deployment_source['is_manual_integration'] = False
+ self.deployment_source['is_mercurial'] = False
+
+ try:
+ response = self.web_client.web_client.create_or_update_source_control(
+ self.resource_group, self.name, self.deployment_source)
+ self.log("Response : {0}".format(response))
+
+ return response.as_dict()
+ except CloudError as ex:
+ self.fail("Failed to update site source control for web app {0} in resource group {1}".format(
+ self.name, self.resource_group))
+
+ def get_webapp_configuration(self):
+ '''
+ Get web app configuration
+ :return: deserialized web app configuration response
+ '''
+ self.log("Get web app configuration")
+
+ try:
+
+ response = self.web_client.web_apps.get_configuration(
+ resource_group_name=self.resource_group, name=self.name)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.log("Failed to get configuration for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ return False
+
+ def set_webapp_state(self, appstate):
+ '''
+ Start/stop/restart web app
+ :return: deserialized updating response
+ '''
+ try:
+ if appstate == 'started':
+ response = self.web_client.web_apps.start(resource_group_name=self.resource_group, name=self.name)
+ elif appstate == 'stopped':
+ response = self.web_client.web_apps.stop(resource_group_name=self.resource_group, name=self.name)
+ elif appstate == 'restarted':
+ response = self.web_client.web_apps.restart(resource_group_name=self.resource_group, name=self.name)
+ else:
+ self.fail("Invalid web app state {0}".format(appstate))
+
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.log("Failed to {0} web app {1} in resource group {2}, request_id {3} - {4}".format(
+ appstate, self.name, self.resource_group, request_id, str(ex)))
+
+
+def main():
+ """Main execution"""
+ AzureRMWebApps()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_webapp_info.py b/test/support/integration/plugins/modules/azure_rm_webapp_info.py
new file mode 100644
index 0000000000..4a3b4cd484
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_webapp_info.py
@@ -0,0 +1,488 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_webapp_info
+
+version_added: "2.9"
+
+short_description: Get Azure web app facts
+
+description:
+ - Get facts for a specific web app or all web app in a resource group, or all web app in current subscription.
+
+options:
+ name:
+ description:
+ - Only show results for a specific web app.
+ resource_group:
+ description:
+ - Limit results by resource group.
+ return_publish_profile:
+ description:
+ - Indicate whether to return publishing profile of the web app.
+ default: False
+ type: bool
+ tags:
+ description:
+ - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - Yunge Zhu (@yungezz)
+'''
+
+EXAMPLES = '''
+ - name: Get facts for web app by name
+ azure_rm_webapp_info:
+ resource_group: myResourceGroup
+ name: winwebapp1
+
+ - name: Get facts for web apps in resource group
+ azure_rm_webapp_info:
+ resource_group: myResourceGroup
+
+ - name: Get facts for web apps with tags
+ azure_rm_webapp_info:
+ tags:
+ - testtag
+ - foo:bar
+'''
+
+RETURN = '''
+webapps:
+ description:
+ - List of web apps.
+ returned: always
+ type: complex
+ contains:
+ id:
+ description:
+ - ID of the web app.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp
+ name:
+ description:
+ - Name of the web app.
+ returned: always
+ type: str
+ sample: winwebapp1
+ resource_group:
+ description:
+ - Resource group of the web app.
+ returned: always
+ type: str
+ sample: myResourceGroup
+ location:
+ description:
+ - Location of the web app.
+ returned: always
+ type: str
+ sample: eastus
+ plan:
+ description:
+ - ID of app service plan used by the web app.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppServicePlan
+ app_settings:
+ description:
+ - App settings of the application. Only returned when web app has app settings.
+ returned: always
+ type: dict
+ sample: {
+ "testkey": "testvalue",
+ "testkey2": "testvalue2"
+ }
+ frameworks:
+ description:
+ - Frameworks of the application. Only returned when web app has frameworks.
+ returned: always
+ type: list
+ sample: [
+ {
+ "name": "net_framework",
+ "version": "v4.0"
+ },
+ {
+ "name": "java",
+ "settings": {
+ "java_container": "tomcat",
+ "java_container_version": "8.5"
+ },
+ "version": "1.7"
+ },
+ {
+ "name": "php",
+ "version": "5.6"
+ }
+ ]
+ availability_state:
+ description:
+ - Availability of this web app.
+ returned: always
+ type: str
+ sample: Normal
+ default_host_name:
+ description:
+ - Host name of the web app.
+ returned: always
+ type: str
+ sample: vxxisurg397winapp4.azurewebsites.net
+ enabled:
+ description:
+ - Indicates the web app enabled or not.
+ returned: always
+ type: bool
+ sample: true
+ enabled_host_names:
+ description:
+ - Enabled host names of the web app.
+ returned: always
+ type: list
+ sample: [
+ "vxxisurg397winapp4.azurewebsites.net",
+ "vxxisurg397winapp4.scm.azurewebsites.net"
+ ]
+ host_name_ssl_states:
+ description:
+ - SSL state per host names of the web app.
+ returned: always
+ type: list
+ sample: [
+ {
+ "hostType": "Standard",
+ "name": "vxxisurg397winapp4.azurewebsites.net",
+ "sslState": "Disabled"
+ },
+ {
+ "hostType": "Repository",
+ "name": "vxxisurg397winapp4.scm.azurewebsites.net",
+ "sslState": "Disabled"
+ }
+ ]
+ host_names:
+ description:
+ - Host names of the web app.
+ returned: always
+ type: list
+ sample: [
+ "vxxisurg397winapp4.azurewebsites.net"
+ ]
+ outbound_ip_addresses:
+ description:
+ - Outbound IP address of the web app.
+ returned: always
+ type: str
+ sample: "40.71.11.131,40.85.166.200,168.62.166.67,137.135.126.248,137.135.121.45"
+ ftp_publish_url:
+ description:
+ - Publishing URL of the web app when deployment type is FTP.
+ returned: always
+ type: str
+ sample: ftp://xxxx.ftp.azurewebsites.windows.net
+ state:
+ description:
+ - State of the web app.
+ returned: always
+ type: str
+ sample: running
+ publishing_username:
+ description:
+ - Publishing profile user name.
+ returned: only when I(return_publish_profile=True).
+ type: str
+ sample: "$vxxisuRG397winapp4"
+ publishing_password:
+ description:
+ - Publishing profile password.
+ returned: only when I(return_publish_profile=True).
+ type: str
+ sample: "uvANsPQpGjWJmrFfm4Ssd5rpBSqGhjMk11pMSgW2vCsQtNx9tcgZ0xN26s9A"
+ tags:
+ description:
+ - Tags assigned to the resource. Dictionary of string:string pairs.
+ returned: always
+ type: dict
+ sample: { tag1: abc }
+'''
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from azure.common import AzureMissingResourceHttpError, AzureHttpError
+except Exception:
+ # This is handled in azure_rm_common
+ pass
+
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+AZURE_OBJECT_CLASS = 'WebApp'
+
+
+class AzureRMWebAppInfo(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ name=dict(type='str'),
+ resource_group=dict(type='str'),
+ tags=dict(type='list'),
+ return_publish_profile=dict(type='bool', default=False),
+ )
+
+ self.results = dict(
+ changed=False,
+ webapps=[],
+ )
+
+ self.name = None
+ self.resource_group = None
+ self.tags = None
+ self.return_publish_profile = False
+
+ self.framework_names = ['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']
+
+ super(AzureRMWebAppInfo, self).__init__(self.module_arg_spec,
+ supports_tags=False,
+ facts_module=True)
+
+ def exec_module(self, **kwargs):
+ is_old_facts = self.module._name == 'azure_rm_webapp_facts'
+ if is_old_facts:
+ self.module.deprecate("The 'azure_rm_webapp_facts' module has been renamed to 'azure_rm_webapp_info'", version='2.13')
+
+ for key in self.module_arg_spec:
+ setattr(self, key, kwargs[key])
+
+ if self.name:
+ self.results['webapps'] = self.list_by_name()
+ elif self.resource_group:
+ self.results['webapps'] = self.list_by_resource_group()
+ else:
+ self.results['webapps'] = self.list_all()
+
+ return self.results
+
+ def list_by_name(self):
+ self.log('Get web app {0}'.format(self.name))
+ item = None
+ result = []
+
+ try:
+ item = self.web_client.web_apps.get(self.resource_group, self.name)
+ except CloudError:
+ pass
+
+ if item and self.has_tags(item.tags, self.tags):
+ curated_result = self.get_curated_webapp(self.resource_group, self.name, item)
+ result = [curated_result]
+
+ return result
+
+ def list_by_resource_group(self):
+ self.log('List web apps in resource groups {0}'.format(self.resource_group))
+ try:
+ response = list(self.web_client.web_apps.list_by_resource_group(self.resource_group))
+ except CloudError as exc:
+ request_id = exc.request_id if exc.request_id else ''
+ self.fail("Error listing web apps in resource groups {0}, request id: {1} - {2}".format(self.resource_group, request_id, str(exc)))
+
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ curated_output = self.get_curated_webapp(self.resource_group, item.name, item)
+ results.append(curated_output)
+ return results
+
+ def list_all(self):
+ self.log('List web apps in current subscription')
+ try:
+ response = list(self.web_client.web_apps.list())
+ except CloudError as exc:
+ request_id = exc.request_id if exc.request_id else ''
+ self.fail("Error listing web apps, request id {0} - {1}".format(request_id, str(exc)))
+
+ results = []
+ for item in response:
+ if self.has_tags(item.tags, self.tags):
+ curated_output = self.get_curated_webapp(item.resource_group, item.name, item)
+ results.append(curated_output)
+ return results
+
+ def list_webapp_configuration(self, resource_group, name):
+ self.log('Get web app {0} configuration'.format(name))
+
+ response = []
+
+ try:
+ response = self.web_client.web_apps.get_configuration(resource_group_name=resource_group, name=name)
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.fail('Error getting web app {0} configuration, request id {1} - {2}'.format(name, request_id, str(ex)))
+
+ return response.as_dict()
+
+ def list_webapp_appsettings(self, resource_group, name):
+ self.log('Get web app {0} app settings'.format(name))
+
+ response = []
+
+ try:
+ response = self.web_client.web_apps.list_application_settings(resource_group_name=resource_group, name=name)
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.fail('Error getting web app {0} app settings, request id {1} - {2}'.format(name, request_id, str(ex)))
+
+ return response.as_dict()
+
+ def get_publish_credentials(self, resource_group, name):
+ self.log('Get web app {0} publish credentials'.format(name))
+ try:
+ poller = self.web_client.web_apps.list_publishing_credentials(resource_group, name)
+ if isinstance(poller, LROPoller):
+ response = self.get_poller_result(poller)
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.fail('Error getting web app {0} publishing credentials - {1}'.format(request_id, str(ex)))
+ return response
+
+ def get_webapp_ftp_publish_url(self, resource_group, name):
+ import xmltodict
+
+ self.log('Get web app {0} app publish profile'.format(name))
+
+ url = None
+ try:
+ content = self.web_client.web_apps.list_publishing_profile_xml_with_secrets(resource_group_name=resource_group, name=name)
+ if not content:
+ return url
+
+ full_xml = ''
+ for f in content:
+ full_xml += f.decode()
+ profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
+
+ if not profiles:
+ return url
+
+ for profile in profiles:
+ if profile['@publishMethod'] == 'FTP':
+ url = profile['@publishUrl']
+
+ except CloudError as ex:
+ self.fail('Error getting web app {0} app settings'.format(name))
+
+ return url
+
+ def get_curated_webapp(self, resource_group, name, webapp):
+ pip = self.serialize_obj(webapp, AZURE_OBJECT_CLASS)
+
+ try:
+ site_config = self.list_webapp_configuration(resource_group, name)
+ app_settings = self.list_webapp_appsettings(resource_group, name)
+ publish_cred = self.get_publish_credentials(resource_group, name)
+ ftp_publish_url = self.get_webapp_ftp_publish_url(resource_group, name)
+ except CloudError as ex:
+ pass
+ return self.construct_curated_webapp(webapp=pip,
+ configuration=site_config,
+ app_settings=app_settings,
+ deployment_slot=None,
+ ftp_publish_url=ftp_publish_url,
+ publish_credentials=publish_cred)
+
+ def construct_curated_webapp(self,
+ webapp,
+ configuration=None,
+ app_settings=None,
+ deployment_slot=None,
+ ftp_publish_url=None,
+ publish_credentials=None):
+ curated_output = dict()
+ curated_output['id'] = webapp['id']
+ curated_output['name'] = webapp['name']
+ curated_output['resource_group'] = webapp['properties']['resourceGroup']
+ curated_output['location'] = webapp['location']
+ curated_output['plan'] = webapp['properties']['serverFarmId']
+ curated_output['tags'] = webapp.get('tags', None)
+
+ # important properties from output. not match input arguments.
+ curated_output['app_state'] = webapp['properties']['state']
+ curated_output['availability_state'] = webapp['properties']['availabilityState']
+ curated_output['default_host_name'] = webapp['properties']['defaultHostName']
+ curated_output['host_names'] = webapp['properties']['hostNames']
+ curated_output['enabled'] = webapp['properties']['enabled']
+ curated_output['enabled_host_names'] = webapp['properties']['enabledHostNames']
+ curated_output['host_name_ssl_states'] = webapp['properties']['hostNameSslStates']
+ curated_output['outbound_ip_addresses'] = webapp['properties']['outboundIpAddresses']
+
+ # curated site_config
+ if configuration:
+ curated_output['frameworks'] = []
+ for fx_name in self.framework_names:
+ fx_version = configuration.get(fx_name + '_version', None)
+ if fx_version:
+ fx = {
+ 'name': fx_name,
+ 'version': fx_version
+ }
+ # java container setting
+ if fx_name == 'java':
+ if configuration['java_container'] and configuration['java_container_version']:
+ settings = {
+ 'java_container': configuration['java_container'].lower(),
+ 'java_container_version': configuration['java_container_version']
+ }
+ fx['settings'] = settings
+
+ curated_output['frameworks'].append(fx)
+
+ # linux_fx_version
+ if configuration.get('linux_fx_version', None):
+ tmp = configuration.get('linux_fx_version').split("|")
+ if len(tmp) == 2:
+ curated_output['frameworks'].append({'name': tmp[0].lower(), 'version': tmp[1]})
+
+ # curated app_settings
+ if app_settings and app_settings.get('properties', None):
+ curated_output['app_settings'] = dict()
+ for item in app_settings['properties']:
+ curated_output['app_settings'][item] = app_settings['properties'][item]
+
+ # curated deploymenet_slot
+ if deployment_slot:
+ curated_output['deployment_slot'] = deployment_slot
+
+ # ftp_publish_url
+ if ftp_publish_url:
+ curated_output['ftp_publish_url'] = ftp_publish_url
+
+ # curated publish credentials
+ if publish_credentials and self.return_publish_profile:
+ curated_output['publishing_username'] = publish_credentials.publishing_user_name
+ curated_output['publishing_password'] = publish_credentials.publishing_password
+ return curated_output
+
+
+def main():
+ AzureRMWebAppInfo()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/azure_rm_webappslot.py b/test/support/integration/plugins/modules/azure_rm_webappslot.py
new file mode 100644
index 0000000000..ddba710b9d
--- /dev/null
+++ b/test/support/integration/plugins/modules/azure_rm_webappslot.py
@@ -0,0 +1,1058 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: azure_rm_webappslot
+version_added: "2.8"
+short_description: Manage Azure Web App slot
+description:
+ - Create, update and delete Azure Web App slot.
+
+options:
+ resource_group:
+ description:
+ - Name of the resource group to which the resource belongs.
+ required: True
+ name:
+ description:
+ - Unique name of the deployment slot to create or update.
+ required: True
+ webapp_name:
+ description:
+ - Web app name which this deployment slot belongs to.
+ required: True
+ location:
+ description:
+ - Resource location. If not set, location from the resource group will be used as default.
+ configuration_source:
+ description:
+ - Source slot to clone configurations from when creating slot. Use webapp's name to refer to the production slot.
+ auto_swap_slot_name:
+ description:
+ - Used to configure target slot name to auto swap, or disable auto swap.
+ - Set it target slot name to auto swap.
+ - Set it to False to disable auto slot swap.
+ swap:
+ description:
+ - Swap deployment slots of a web app.
+ suboptions:
+ action:
+ description:
+ - Swap types.
+ - C(preview) is to apply target slot settings on source slot first.
+ - C(swap) is to complete swapping.
+ - C(reset) is to reset the swap.
+ choices:
+ - preview
+ - swap
+ - reset
+ default: preview
+ target_slot:
+ description:
+ - Name of target slot to swap. If set to None, then swap with production slot.
+ preserve_vnet:
+ description:
+ - C(True) to preserve virtual network to the slot during swap. Otherwise C(False).
+ type: bool
+ default: True
+ frameworks:
+ description:
+ - Set of run time framework settings. Each setting is a dictionary.
+ - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
+ suboptions:
+ name:
+ description:
+ - Name of the framework.
+ - Supported framework list for Windows web app and Linux web app is different.
+ - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018.
+ - Windows web apps support multiple framework at same time.
+ - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018.
+ - Linux web apps support only one framework.
+ - Java framework is mutually exclusive with others.
+ choices:
+ - java
+ - net_framework
+ - php
+ - python
+ - ruby
+ - dotnetcore
+ - node
+ version:
+ description:
+ - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info.
+ - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5.
+ - C(php) supported value sample, C(5.5), C(5.6), C(7.0).
+ - C(python) supported value sample, C(5.5), C(5.6), C(7.0).
+ - C(node) supported value sample, C(6.6), C(6.9).
+ - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2).
+ - C(ruby) supported value sample, 2.3.
+ - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app.
+ settings:
+ description:
+ - List of settings of the framework.
+ suboptions:
+ java_container:
+ description:
+ - Name of Java container. This is supported by specific framework C(java) onlys, for example C(Tomcat), C(Jetty).
+ java_container_version:
+ description:
+ - Version of Java container. This is supported by specific framework C(java) only.
+ - For C(Tomcat), for example C(8.0), C(8.5), C(9.0). For C(Jetty), for example C(9.1), C(9.3).
+ container_settings:
+ description:
+ - Web app slot container settings.
+ suboptions:
+ name:
+ description:
+ - Name of container, for example C(imagename:tag).
+ registry_server_url:
+ description:
+ - Container registry server URL, for example C(mydockerregistry.io).
+ registry_server_user:
+ description:
+ - The container registry server user name.
+ registry_server_password:
+ description:
+ - The container registry server password.
+ startup_file:
+ description:
+ - The slot startup file.
+ - This only applies for Linux web app slot.
+ app_settings:
+ description:
+ - Configure web app slot application settings. Suboptions are in key value pair format.
+ purge_app_settings:
+ description:
+ - Purge any existing application settings. Replace slot application settings with app_settings.
+ type: bool
+ deployment_source:
+ description:
+ - Deployment source for git.
+ suboptions:
+ url:
+ description:
+ - Repository URL of deployment source.
+ branch:
+ description:
+ - The branch name of the repository.
+ app_state:
+ description:
+ - Start/Stop/Restart the slot.
+ type: str
+ choices:
+ - started
+ - stopped
+ - restarted
+ default: started
+ state:
+ description:
+ - State of the Web App deployment slot.
+ - Use C(present) to create or update a slot and C(absent) to delete it.
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+ - azure
+ - azure_tags
+
+author:
+ - Yunge Zhu(@yungezz)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a webapp slot
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ configuration_source: myJavaWebApp
+ app_settings:
+ testkey: testvalue
+
+ - name: swap the slot with production slot
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ swap:
+ action: swap
+
+ - name: stop the slot
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ app_state: stopped
+
+ - name: udpate a webapp slot app settings
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ app_settings:
+ testkey: testvalue2
+
+ - name: udpate a webapp slot frameworks
+ azure_rm_webappslot:
+ resource_group: myResourceGroup
+ webapp_name: myJavaWebApp
+ name: stage
+ frameworks:
+ - name: "node"
+ version: "10.1"
+'''
+
+RETURN = '''
+id:
+ description:
+ - ID of current slot.
+ returned: always
+ type: str
+ sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/testapp/slots/stage1
+'''
+
+import time
+from ansible.module_utils.azure_rm_common import AzureRMModuleBase
+
+try:
+ from msrestazure.azure_exceptions import CloudError
+ from msrest.polling import LROPoller
+ from msrest.serialization import Model
+ from azure.mgmt.web.models import (
+ site_config, app_service_plan, Site,
+ AppServicePlan, SkuDescription, NameValuePair
+ )
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+swap_spec = dict(
+ action=dict(
+ type='str',
+ choices=[
+ 'preview',
+ 'swap',
+ 'reset'
+ ],
+ default='preview'
+ ),
+ target_slot=dict(
+ type='str'
+ ),
+ preserve_vnet=dict(
+ type='bool',
+ default=True
+ )
+)
+
+container_settings_spec = dict(
+ name=dict(type='str', required=True),
+ registry_server_url=dict(type='str'),
+ registry_server_user=dict(type='str'),
+ registry_server_password=dict(type='str', no_log=True)
+)
+
+deployment_source_spec = dict(
+ url=dict(type='str'),
+ branch=dict(type='str')
+)
+
+
+framework_settings_spec = dict(
+ java_container=dict(type='str', required=True),
+ java_container_version=dict(type='str', required=True)
+)
+
+
+framework_spec = dict(
+ name=dict(
+ type='str',
+ required=True,
+ choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']),
+ version=dict(type='str', required=True),
+ settings=dict(type='dict', options=framework_settings_spec)
+)
+
+
+def webapp_to_dict(webapp):
+ return dict(
+ id=webapp.id,
+ name=webapp.name,
+ location=webapp.location,
+ client_cert_enabled=webapp.client_cert_enabled,
+ enabled=webapp.enabled,
+ reserved=webapp.reserved,
+ client_affinity_enabled=webapp.client_affinity_enabled,
+ server_farm_id=webapp.server_farm_id,
+ host_names_disabled=webapp.host_names_disabled,
+ https_only=webapp.https_only if hasattr(webapp, 'https_only') else None,
+ skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None,
+ ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None,
+ state=webapp.state,
+ tags=webapp.tags if webapp.tags else None
+ )
+
+
+def slot_to_dict(slot):
+ return dict(
+ id=slot.id,
+ resource_group=slot.resource_group,
+ server_farm_id=slot.server_farm_id,
+ target_swap_slot=slot.target_swap_slot,
+ enabled_host_names=slot.enabled_host_names,
+ slot_swap_status=slot.slot_swap_status,
+ name=slot.name,
+ location=slot.location,
+ enabled=slot.enabled,
+ reserved=slot.reserved,
+ host_names_disabled=slot.host_names_disabled,
+ state=slot.state,
+ repository_site_name=slot.repository_site_name,
+ default_host_name=slot.default_host_name,
+ kind=slot.kind,
+ site_config=slot.site_config,
+ tags=slot.tags if slot.tags else None
+ )
+
+
+class Actions:
+ NoAction, CreateOrUpdate, UpdateAppSettings, Delete = range(4)
+
+
+class AzureRMWebAppSlots(AzureRMModuleBase):
+ """Configuration class for an Azure RM Web App slot resource"""
+
+ def __init__(self):
+ self.module_arg_spec = dict(
+ resource_group=dict(
+ type='str',
+ required=True
+ ),
+ name=dict(
+ type='str',
+ required=True
+ ),
+ webapp_name=dict(
+ type='str',
+ required=True
+ ),
+ location=dict(
+ type='str'
+ ),
+ configuration_source=dict(
+ type='str'
+ ),
+ auto_swap_slot_name=dict(
+ type='raw'
+ ),
+ swap=dict(
+ type='dict',
+ options=swap_spec
+ ),
+ frameworks=dict(
+ type='list',
+ elements='dict',
+ options=framework_spec
+ ),
+ container_settings=dict(
+ type='dict',
+ options=container_settings_spec
+ ),
+ deployment_source=dict(
+ type='dict',
+ options=deployment_source_spec
+ ),
+ startup_file=dict(
+ type='str'
+ ),
+ app_settings=dict(
+ type='dict'
+ ),
+ purge_app_settings=dict(
+ type='bool',
+ default=False
+ ),
+ app_state=dict(
+ type='str',
+ choices=['started', 'stopped', 'restarted'],
+ default='started'
+ ),
+ state=dict(
+ type='str',
+ default='present',
+ choices=['present', 'absent']
+ )
+ )
+
+ mutually_exclusive = [['container_settings', 'frameworks']]
+
+ self.resource_group = None
+ self.name = None
+ self.webapp_name = None
+ self.location = None
+
+ self.auto_swap_slot_name = None
+ self.swap = None
+ self.tags = None
+ self.startup_file = None
+ self.configuration_source = None
+ self.clone = False
+
+ # site config, e.g app settings, ssl
+ self.site_config = dict()
+ self.app_settings = dict()
+ self.app_settings_strDic = None
+
+ # siteSourceControl
+ self.deployment_source = dict()
+
+ # site, used at level creation, or update.
+ self.site = None
+
+ # property for internal usage, not used for sdk
+ self.container_settings = None
+
+ self.purge_app_settings = False
+ self.app_state = 'started'
+
+ self.results = dict(
+ changed=False,
+ id=None,
+ )
+ self.state = None
+ self.to_do = Actions.NoAction
+
+ self.frameworks = None
+
+ # set site_config value from kwargs
+ self.site_config_updatable_frameworks = ["net_framework_version",
+ "java_version",
+ "php_version",
+ "python_version",
+ "linux_fx_version"]
+
+ self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java']
+ self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java']
+
+ super(AzureRMWebAppSlots, self).__init__(derived_arg_spec=self.module_arg_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ supports_tags=True)
+
+ def exec_module(self, **kwargs):
+ """Main module execution method"""
+
+ for key in list(self.module_arg_spec.keys()) + ['tags']:
+ if hasattr(self, key):
+ setattr(self, key, kwargs[key])
+ elif kwargs[key] is not None:
+ if key == "scm_type":
+ self.site_config[key] = kwargs[key]
+
+ old_response = None
+ response = None
+ to_be_updated = False
+
+ # set location
+ resource_group = self.get_resource_group(self.resource_group)
+ if not self.location:
+ self.location = resource_group.location
+
+ # get web app
+ webapp_response = self.get_webapp()
+
+ if not webapp_response:
+ self.fail("Web app {0} does not exist in resource group {1}.".format(self.webapp_name, self.resource_group))
+
+ # get slot
+ old_response = self.get_slot()
+
+ # set is_linux
+ is_linux = True if webapp_response['reserved'] else False
+
+ if self.state == 'present':
+ if self.frameworks:
+ # java is mutually exclusive with other frameworks
+ if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks):
+ self.fail('Java is mutually exclusive with other frameworks.')
+
+ if is_linux:
+ if len(self.frameworks) != 1:
+ self.fail('Can specify one framework only for Linux web app.')
+
+ if self.frameworks[0]['name'] not in self.supported_linux_frameworks:
+ self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name']))
+
+ self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper()
+
+ if self.frameworks[0]['name'] == 'java':
+ if self.frameworks[0]['version'] != '8':
+ self.fail("Linux web app only supports java 8.")
+
+ if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \
+ self.frameworks[0]['settings']['java_container'].lower() != 'tomcat':
+ self.fail("Linux web app only supports tomcat container.")
+
+ if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \
+ self.frameworks[0]['settings']['java_container'].lower() == 'tomcat':
+ self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8'
+ else:
+ self.site_config['linux_fx_version'] = 'JAVA|8-jre8'
+ else:
+ for fx in self.frameworks:
+ if fx.get('name') not in self.supported_windows_frameworks:
+ self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name')))
+ else:
+ self.site_config[fx.get('name') + '_version'] = fx.get('version')
+
+ if 'settings' in fx and fx['settings'] is not None:
+ for key, value in fx['settings'].items():
+ self.site_config[key] = value
+
+ if not self.app_settings:
+ self.app_settings = dict()
+
+ if self.container_settings:
+ linux_fx_version = 'DOCKER|'
+
+ if self.container_settings.get('registry_server_url'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
+
+ linux_fx_version += self.container_settings['registry_server_url'] + '/'
+
+ linux_fx_version += self.container_settings['name']
+
+ self.site_config['linux_fx_version'] = linux_fx_version
+
+ if self.container_settings.get('registry_server_user'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user']
+
+ if self.container_settings.get('registry_server_password'):
+ self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password']
+
+ # set auto_swap_slot_name
+ if self.auto_swap_slot_name and isinstance(self.auto_swap_slot_name, str):
+ self.site_config['auto_swap_slot_name'] = self.auto_swap_slot_name
+ if self.auto_swap_slot_name is False:
+ self.site_config['auto_swap_slot_name'] = None
+
+ # init site
+ self.site = Site(location=self.location, site_config=self.site_config)
+
+ # check if the slot already present in the webapp
+ if not old_response:
+ self.log("Web App slot doesn't exist")
+
+ to_be_updated = True
+ self.to_do = Actions.CreateOrUpdate
+ self.site.tags = self.tags
+
+ # if linux, setup startup_file
+ if self.startup_file:
+ self.site_config['app_command_line'] = self.startup_file
+
+ # set app setting
+ if self.app_settings:
+ app_settings = []
+ for key in self.app_settings.keys():
+ app_settings.append(NameValuePair(name=key, value=self.app_settings[key]))
+
+ self.site_config['app_settings'] = app_settings
+
+ # clone slot
+ if self.configuration_source:
+ self.clone = True
+
+ else:
+ # existing slot, do update
+ self.log("Web App slot already exists")
+
+ self.log('Result: {0}'.format(old_response))
+
+ update_tags, self.site.tags = self.update_tags(old_response.get('tags', None))
+
+ if update_tags:
+ to_be_updated = True
+
+ # check if site_config changed
+ old_config = self.get_configuration_slot(self.name)
+
+ if self.is_site_config_changed(old_config):
+ to_be_updated = True
+ self.to_do = Actions.CreateOrUpdate
+
+ self.app_settings_strDic = self.list_app_settings_slot(self.name)
+
+ # purge existing app_settings:
+ if self.purge_app_settings:
+ to_be_updated = True
+ self.to_do = Actions.UpdateAppSettings
+ self.app_settings_strDic = dict()
+
+ # check if app settings changed
+ if self.purge_app_settings or self.is_app_settings_changed():
+ to_be_updated = True
+ self.to_do = Actions.UpdateAppSettings
+
+ if self.app_settings:
+ for key in self.app_settings.keys():
+ self.app_settings_strDic[key] = self.app_settings[key]
+
+ elif self.state == 'absent':
+ if old_response:
+ self.log("Delete Web App slot")
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ self.delete_slot()
+
+ self.log('Web App slot deleted')
+
+ else:
+ self.log("Web app slot {0} not exists.".format(self.name))
+
+ if to_be_updated:
+ self.log('Need to Create/Update web app')
+ self.results['changed'] = True
+
+ if self.check_mode:
+ return self.results
+
+ if self.to_do == Actions.CreateOrUpdate:
+ response = self.create_update_slot()
+
+ self.results['id'] = response['id']
+
+ if self.clone:
+ self.clone_slot()
+
+ if self.to_do == Actions.UpdateAppSettings:
+ self.update_app_settings_slot()
+
+ slot = None
+ if response:
+ slot = response
+ if old_response:
+ slot = old_response
+
+ if slot:
+ if (slot['state'] != 'Stopped' and self.app_state == 'stopped') or \
+ (slot['state'] != 'Running' and self.app_state == 'started') or \
+ self.app_state == 'restarted':
+
+ self.results['changed'] = True
+ if self.check_mode:
+ return self.results
+
+ self.set_state_slot(self.app_state)
+
+ if self.swap:
+ self.results['changed'] = True
+ if self.check_mode:
+ return self.results
+
+ self.swap_slot()
+
+ return self.results
+
+ # compare site config
+ def is_site_config_changed(self, existing_config):
+ for fx_version in self.site_config_updatable_frameworks:
+ if self.site_config.get(fx_version):
+ if not getattr(existing_config, fx_version) or \
+ getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper():
+ return True
+
+ if self.auto_swap_slot_name is False and existing_config.auto_swap_slot_name is not None:
+ return True
+ elif self.auto_swap_slot_name and self.auto_swap_slot_name != getattr(existing_config, 'auto_swap_slot_name', None):
+ return True
+ return False
+
+ # comparing existing app setting with input, determine whether it's changed
+ def is_app_settings_changed(self):
+ if self.app_settings:
+ if len(self.app_settings_strDic) != len(self.app_settings):
+ return True
+
+ if self.app_settings_strDic != self.app_settings:
+ return True
+ return False
+
+ # comparing deployment source with input, determine whether it's changed
+ def is_deployment_source_changed(self, existing_webapp):
+ if self.deployment_source:
+ if self.deployment_source.get('url') \
+ and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']:
+ return True
+
+ if self.deployment_source.get('branch') \
+ and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']:
+ return True
+
+ return False
+
+ def create_update_slot(self):
+ '''
+ Creates or updates Web App slot with the specified configuration.
+
+ :return: deserialized Web App instance state dictionary
+ '''
+ self.log(
+ "Creating / Updating the Web App slot {0}".format(self.name))
+
+ try:
+ response = self.web_client.web_apps.create_or_update_slot(resource_group_name=self.resource_group,
+ slot=self.name,
+ name=self.webapp_name,
+ site_envelope=self.site)
+ if isinstance(response, LROPoller):
+ response = self.get_poller_result(response)
+
+ except CloudError as exc:
+ self.log('Error attempting to create the Web App slot instance.')
+ self.fail("Error creating the Web App slot: {0}".format(str(exc)))
+ return slot_to_dict(response)
+
+ def delete_slot(self):
+ '''
+ Deletes specified Web App slot in the specified subscription and resource group.
+
+ :return: True
+ '''
+ self.log("Deleting the Web App slot {0}".format(self.name))
+ try:
+ response = self.web_client.web_apps.delete_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name)
+ except CloudError as e:
+ self.log('Error attempting to delete the Web App slot.')
+ self.fail(
+ "Error deleting the Web App slots: {0}".format(str(e)))
+
+ return True
+
+ def get_webapp(self):
+ '''
+ Gets the properties of the specified Web App.
+
+ :return: deserialized Web App instance state dictionary
+ '''
+ self.log(
+ "Checking if the Web App instance {0} is present".format(self.webapp_name))
+
+ response = None
+
+ try:
+ response = self.web_client.web_apps.get(resource_group_name=self.resource_group,
+ name=self.webapp_name)
+
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ if response is not None:
+ self.log("Response : {0}".format(response))
+ self.log("Web App instance : {0} found".format(response.name))
+ return webapp_to_dict(response)
+
+ except CloudError as ex:
+ pass
+
+ self.log("Didn't find web app {0} in resource group {1}".format(
+ self.webapp_name, self.resource_group))
+
+ return False
+
+ def get_slot(self):
+ '''
+ Gets the properties of the specified Web App slot.
+
+ :return: deserialized Web App slot state dictionary
+ '''
+ self.log(
+ "Checking if the Web App slot {0} is present".format(self.name))
+
+ response = None
+
+ try:
+ response = self.web_client.web_apps.get_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name)
+
+ # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
+ if response is not None:
+ self.log("Response : {0}".format(response))
+ self.log("Web App slot: {0} found".format(response.name))
+ return slot_to_dict(response)
+
+ except CloudError as ex:
+ pass
+
+ self.log("Does not find web app slot {0} in resource group {1}".format(self.name, self.resource_group))
+
+ return False
+
+ def list_app_settings(self):
+ '''
+ List webapp application settings
+ :return: deserialized list response
+ '''
+ self.log("List webapp application setting")
+
+ try:
+
+ response = self.web_client.web_apps.list_application_settings(
+ resource_group_name=self.resource_group, name=self.webapp_name)
+ self.log("Response : {0}".format(response))
+
+ return response.properties
+ except CloudError as ex:
+ self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def list_app_settings_slot(self, slot_name):
+ '''
+ List application settings
+ :return: deserialized list response
+ '''
+ self.log("List application setting")
+
+ try:
+
+ response = self.web_client.web_apps.list_application_settings_slot(
+ resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name)
+ self.log("Response : {0}".format(response))
+
+ return response.properties
+ except CloudError as ex:
+ self.fail("Failed to list application settings for web app slot {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def update_app_settings_slot(self, slot_name=None, app_settings=None):
+ '''
+ Update application settings
+ :return: deserialized updating response
+ '''
+ self.log("Update application setting")
+
+ if slot_name is None:
+ slot_name = self.name
+ if app_settings is None:
+ app_settings = self.app_settings_strDic
+ try:
+ response = self.web_client.web_apps.update_application_settings_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=slot_name,
+ kind=None,
+ properties=app_settings)
+ self.log("Response : {0}".format(response))
+
+ return response.as_dict()
+ except CloudError as ex:
+ self.fail("Failed to update application settings for web app slot {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ return response
+
+ def create_or_update_source_control_slot(self):
+ '''
+ Update site source control
+ :return: deserialized updating response
+ '''
+ self.log("Update site source control")
+
+ if self.deployment_source is None:
+ return False
+
+ self.deployment_source['is_manual_integration'] = False
+ self.deployment_source['is_mercurial'] = False
+
+ try:
+ response = self.web_client.web_client.create_or_update_source_control_slot(
+ resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ site_source_control=self.deployment_source,
+ slot=self.name)
+ self.log("Response : {0}".format(response))
+
+ return response.as_dict()
+ except CloudError as ex:
+ self.fail("Failed to update site source control for web app slot {0} in resource group {1}: {2}".format(
+ self.name, self.resource_group, str(ex)))
+
+ def get_configuration(self):
+ '''
+ Get web app configuration
+ :return: deserialized web app configuration response
+ '''
+ self.log("Get web app configuration")
+
+ try:
+
+ response = self.web_client.web_apps.get_configuration(
+ resource_group_name=self.resource_group, name=self.webapp_name)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to get configuration for web app {0} in resource group {1}: {2}".format(
+ self.webapp_name, self.resource_group, str(ex)))
+
+ def get_configuration_slot(self, slot_name):
+ '''
+ Get slot configuration
+ :return: deserialized slot configuration response
+ '''
+ self.log("Get web app slot configuration")
+
+ try:
+
+ response = self.web_client.web_apps.get_configuration_slot(
+ resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to get configuration for web app slot {0} in resource group {1}: {2}".format(
+ slot_name, self.resource_group, str(ex)))
+
+ def update_configuration_slot(self, slot_name=None, site_config=None):
+ '''
+ Update slot configuration
+ :return: deserialized slot configuration response
+ '''
+ self.log("Update web app slot configuration")
+
+ if slot_name is None:
+ slot_name = self.name
+ if site_config is None:
+ site_config = self.site_config
+ try:
+
+ response = self.web_client.web_apps.update_configuration_slot(
+ resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name, site_config=site_config)
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to update configuration for web app slot {0} in resource group {1}: {2}".format(
+ slot_name, self.resource_group, str(ex)))
+
+ def set_state_slot(self, appstate):
+ '''
+ Start/stop/restart web app slot
+ :return: deserialized updating response
+ '''
+ try:
+ if appstate == 'started':
+ response = self.web_client.web_apps.start_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
+ elif appstate == 'stopped':
+ response = self.web_client.web_apps.stop_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
+ elif appstate == 'restarted':
+ response = self.web_client.web_apps.restart_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
+ else:
+ self.fail("Invalid web app slot state {0}".format(appstate))
+
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ request_id = ex.request_id if ex.request_id else ''
+ self.fail("Failed to {0} web app slot {1} in resource group {2}, request_id {3} - {4}".format(
+ appstate, self.name, self.resource_group, request_id, str(ex)))
+
+ def swap_slot(self):
+ '''
+ Swap slot
+ :return: deserialized response
+ '''
+ self.log("Swap slot")
+
+ try:
+ if self.swap['action'] == 'swap':
+ if self.swap['target_slot'] is None:
+ response = self.web_client.web_apps.swap_slot_with_production(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ target_slot=self.name,
+ preserve_vnet=self.swap['preserve_vnet'])
+ else:
+ response = self.web_client.web_apps.swap_slot_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name,
+ target_slot=self.swap['target_slot'],
+ preserve_vnet=self.swap['preserve_vnet'])
+ elif self.swap['action'] == 'preview':
+ if self.swap['target_slot'] is None:
+ response = self.web_client.web_apps.apply_slot_config_to_production(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ target_slot=self.name,
+ preserve_vnet=self.swap['preserve_vnet'])
+ else:
+ response = self.web_client.web_apps.apply_slot_configuration_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name,
+ target_slot=self.swap['target_slot'],
+ preserve_vnet=self.swap['preserve_vnet'])
+ elif self.swap['action'] == 'reset':
+ if self.swap['target_slot'] is None:
+ response = self.web_client.web_apps.reset_production_slot_config(resource_group_name=self.resource_group,
+ name=self.webapp_name)
+ else:
+ response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.swap['target_slot'])
+ response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group,
+ name=self.webapp_name,
+ slot=self.name)
+
+ self.log("Response : {0}".format(response))
+
+ return response
+ except CloudError as ex:
+ self.fail("Failed to swap web app slot {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex)))
+
+ def clone_slot(self):
+ if self.configuration_source:
+ src_slot = None if self.configuration_source.lower() == self.webapp_name.lower() else self.configuration_source
+
+ if src_slot is None:
+ site_config_clone_from = self.get_configuration()
+ else:
+ site_config_clone_from = self.get_configuration_slot(slot_name=src_slot)
+
+ self.update_configuration_slot(site_config=site_config_clone_from)
+
+ if src_slot is None:
+ app_setting_clone_from = self.list_app_settings()
+ else:
+ app_setting_clone_from = self.list_app_settings_slot(src_slot)
+
+ if self.app_settings:
+ app_setting_clone_from.update(self.app_settings)
+
+ self.update_app_settings_slot(app_settings=app_setting_clone_from)
+
+
+def main():
+ """Main execution"""
+ AzureRMWebAppSlots()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cloudformation.py b/test/support/integration/plugins/modules/cloudformation.py
new file mode 100644
index 0000000000..cd03146501
--- /dev/null
+++ b/test/support/integration/plugins/modules/cloudformation.py
@@ -0,0 +1,837 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: cloudformation
+short_description: Create or delete an AWS CloudFormation stack
+description:
+ - Launches or updates an AWS CloudFormation stack and waits for it complete.
+notes:
+ - CloudFormation features change often, and this module tries to keep up. That means your botocore version should be fresh.
+ The version listed in the requirements is the oldest version that works with the module as a whole.
+ Some features may require recent versions, and we do not pinpoint a minimum version for each feature.
+ Instead of relying on the minimum version, keep botocore up to date. AWS is always releasing features and fixing bugs.
+version_added: "1.1"
+options:
+ stack_name:
+ description:
+ - Name of the CloudFormation stack.
+ required: true
+ type: str
+ disable_rollback:
+ description:
+ - If a stacks fails to form, rollback will remove the stack.
+ default: false
+ type: bool
+ on_create_failure:
+ description:
+ - Action to take upon failure of stack creation. Incompatible with the I(disable_rollback) option.
+ choices:
+ - DO_NOTHING
+ - ROLLBACK
+ - DELETE
+ version_added: "2.8"
+ type: str
+ create_timeout:
+ description:
+ - The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED
+ version_added: "2.6"
+ type: int
+ template_parameters:
+ description:
+ - A list of hashes of all the template variables for the stack. The value can be a string or a dict.
+ - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
+ default: {}
+ type: dict
+ state:
+ description:
+ - If I(state=present), stack will be created.
+ - If I(state=present) and if stack exists and template has changed, it will be updated.
+ - If I(state=absent), stack will be removed.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ template:
+ description:
+ - The local path of the CloudFormation template.
+ - This must be the full path to the file, relative to the working directory. If using roles this may look
+ like C(roles/cloudformation/files/cloudformation-example.json).
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template),
+ I(template_body) nor I(template_url) are specified, the previous template will be reused.
+ type: path
+ notification_arns:
+ description:
+ - A comma separated list of Simple Notification Service (SNS) topic ARNs to publish stack related events.
+ version_added: "2.0"
+ type: str
+ stack_policy:
+ description:
+ - The path of the CloudFormation stack policy. A policy cannot be removed once placed, but it can be modified.
+ for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
+ version_added: "1.9"
+ type: str
+ tags:
+ description:
+ - Dictionary of tags to associate with stack and its resources during stack creation.
+ - Can be updated later, updating tags removes previous entries.
+ version_added: "1.4"
+ type: dict
+ template_url:
+ description:
+ - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an
+ S3 bucket in the same region as the stack.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) are specified,
+ the previous template will be reused.
+ version_added: "2.0"
+ type: str
+ create_changeset:
+ description:
+ - "If stack already exists create a changeset instead of directly applying changes. See the AWS Change Sets docs
+ U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)."
+ - "WARNING: if the stack does not exist, it will be created without changeset. If I(state=absent), the stack will be
+ deleted immediately with no changeset."
+ type: bool
+ default: false
+ version_added: "2.4"
+ changeset_name:
+ description:
+ - Name given to the changeset when creating a changeset.
+ - Only used when I(create_changeset=true).
+ - By default a name prefixed with Ansible-STACKNAME is generated based on input parameters.
+ See the AWS Change Sets docs for more information
+ U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)
+ version_added: "2.4"
+ type: str
+ template_format:
+ description:
+ - This parameter is ignored since Ansible 2.3 and will be removed in Ansible 2.14.
+ - Templates are now passed raw to CloudFormation regardless of format.
+ version_added: "2.0"
+ type: str
+ role_arn:
+ description:
+ - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
+ docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
+ version_added: "2.3"
+ type: str
+ termination_protection:
+ description:
+ - Enable or disable termination protection on the stack. Only works with botocore >= 1.7.18.
+ type: bool
+ version_added: "2.5"
+ template_body:
+ description:
+ - Template body. Use this to pass in the actual body of the CloudFormation template.
+ - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
+ must be specified (but only one of them).
+ - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
+ are specified, the previous template will be reused.
+ version_added: "2.5"
+ type: str
+ events_limit:
+ description:
+ - Maximum number of CloudFormation events to fetch from a stack when creating or updating it.
+ default: 200
+ version_added: "2.7"
+ type: int
+ backoff_delay:
+ description:
+ - Number of seconds to wait for the next retry.
+ default: 3
+ version_added: "2.8"
+ type: int
+ required: False
+ backoff_max_delay:
+ description:
+ - Maximum amount of time to wait between retries.
+ default: 30
+ version_added: "2.8"
+ type: int
+ required: False
+ backoff_retries:
+ description:
+ - Number of times to retry operation.
+ - AWS API throttling mechanism fails CloudFormation module so we have to retry a couple of times.
+ default: 10
+ version_added: "2.8"
+ type: int
+ required: False
+ capabilities:
+ description:
+ - Specify capabilities that stack template contains.
+ - Valid values are C(CAPABILITY_IAM), C(CAPABILITY_NAMED_IAM) and C(CAPABILITY_AUTO_EXPAND).
+ type: list
+ elements: str
+ version_added: "2.8"
+ default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ]
+
+author: "James S. Martin (@jsmartin)"
+extends_documentation_fragment:
+- aws
+- ec2
+requirements: [ boto3, botocore>=1.5.45 ]
+'''
+
+EXAMPLES = '''
+- name: create a cloudformation stack
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ disable_rollback: true
+ template: "files/cloudformation-example.json"
+ template_parameters:
+ KeyName: "jmartin"
+ DiskType: "ephemeral"
+ InstanceType: "m1.small"
+ ClusterSize: 3
+ tags:
+ Stack: "ansible-cloudformation"
+
+# Basic role example
+- name: create a stack, specify role that cloudformation assumes
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ disable_rollback: true
+ template: "roles/cloudformation/files/cloudformation-example.json"
+ role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
+
+- name: delete a stack
+ cloudformation:
+ stack_name: "ansible-cloudformation-old"
+ state: "absent"
+
+# Create a stack, pass in template from a URL, disable rollback if stack creation fails,
+# pass in some parameters to the template, provide tags for resources created
+- name: create a stack, pass in the template via an URL
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: present
+ region: us-east-1
+ disable_rollback: true
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ template_parameters:
+ KeyName: jmartin
+ DiskType: ephemeral
+ InstanceType: m1.small
+ ClusterSize: 3
+ tags:
+ Stack: ansible-cloudformation
+
+# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails,
+# pass in some parameters to the template, provide tags for resources created
+- name: create a stack, pass in the template body via lookup template
+ cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: present
+ region: us-east-1
+ disable_rollback: true
+ template_body: "{{ lookup('template', 'cloudformation.j2') }}"
+ template_parameters:
+ KeyName: jmartin
+ DiskType: ephemeral
+ InstanceType: m1.small
+ ClusterSize: 3
+ tags:
+ Stack: ansible-cloudformation
+
+# Pass a template parameter which uses CloudFormation's UsePreviousValue attribute
+# When use_previous_value is set to True, the given value will be ignored and
+# CloudFormation will use the value from a previously submitted template.
+# If use_previous_value is set to False (default) the given value is used.
+- cloudformation:
+ stack_name: "ansible-cloudformation"
+ state: "present"
+ region: "us-east-1"
+ template: "files/cloudformation-example.json"
+ template_parameters:
+ DBSnapshotIdentifier:
+ use_previous_value: True
+ value: arn:aws:rds:es-east-1:000000000000:snapshot:rds:my-db-snapshot
+ DBName:
+ use_previous_value: True
+ tags:
+ Stack: "ansible-cloudformation"
+
+# Enable termination protection on a stack.
+# If the stack already exists, this will update its termination protection
+- name: enable termination protection during stack creation
+ cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ termination_protection: yes
+
+# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED
+# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back.
+- name: enable termination protection during stack creation
+ cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ create_timeout: 5
+
+# Configure rollback behaviour on the unsuccessful creation of a stack allowing
+# CloudFormation to clean up, or do nothing in the event of an unsuccessful
+# deployment
+# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if
+# it fails to create
+- name: create stack which will delete on creation failure
+ cloudformation:
+ stack_name: my_stack
+ state: present
+ template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
+ on_create_failure: DELETE
+'''
+
+RETURN = '''
+events:
+ type: list
+ description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
+ returned: always
+ sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"]
+log:
+ description: Debugging logs. Useful when modifying or finding an error.
+ returned: always
+ type: list
+ sample: ["updating stack"]
+change_set_id:
+ description: The ID of the stack change set if one was created
+ returned: I(state=present) and I(create_changeset=true)
+ type: str
+ sample: "arn:aws:cloudformation:us-east-1:012345678901:changeSet/Ansible-StackName-f4496805bd1b2be824d1e315c6884247ede41eb0"
+stack_resources:
+ description: AWS stack resources and their status. List of dictionaries, one dict per resource.
+ returned: state == present
+ type: list
+ sample: [
+ {
+ "last_updated_time": "2016-10-11T19:40:14.979000+00:00",
+ "logical_resource_id": "CFTestSg",
+ "physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F",
+ "resource_type": "AWS::EC2::SecurityGroup",
+ "status": "UPDATE_COMPLETE",
+ "status_reason": null
+ }
+ ]
+stack_outputs:
+ type: dict
+ description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
+ returned: state == present
+ sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
+''' # NOQA
+
+import json
+import time
+import uuid
+import traceback
+from hashlib import sha1
+
+try:
+ import boto3
+ import botocore
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, boto3_conn, boto_exception, ec2_argument_spec, get_aws_connection_info
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
+ '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
+ ret = {'events': [], 'log': []}
+
+ try:
+ pg = cfn.get_paginator(
+ 'describe_stack_events'
+ ).paginate(
+ StackName=stack_name,
+ PaginationConfig={'MaxItems': events_limit}
+ )
+ if token_filter is not None:
+ events = list(pg.search(
+ "StackEvents[?ClientRequestToken == '{0}']".format(token_filter)
+ ))
+ else:
+ events = list(pg.search("StackEvents[*]"))
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ if 'does not exist' in error_msg:
+ # missing stack, don't bail.
+ ret['log'].append('Stack does not exist.')
+ return ret
+ ret['log'].append('Unknown error: ' + str(error_msg))
+ return ret
+
+ for e in events:
+ eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e)
+ ret['events'].append(eventline)
+
+ if e['ResourceStatus'].endswith('FAILED'):
+ failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e)
+ ret['log'].append(failline)
+
+ return ret
+
+
+def create_stack(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.")
+
+ # 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and
+ # 'OnFailure' only apply on creation, not update.
+ if module.params.get('on_create_failure') is not None:
+ stack_params['OnFailure'] = module.params['on_create_failure']
+ else:
+ stack_params['DisableRollback'] = module.params['disable_rollback']
+
+ if module.params.get('create_timeout') is not None:
+ stack_params['TimeoutInMinutes'] = module.params['create_timeout']
+ if module.params.get('termination_protection') is not None:
+ if boto_supports_termination_protection(cfn):
+ stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection'))
+ else:
+ module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
+
+ try:
+ response = cfn.create_stack(**stack_params)
+ # Use stack ID to follow stack state in case of on_create_failure = DELETE
+ result = stack_operation(cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None))
+ except Exception as err:
+ error_msg = boto_exception(err)
+ module.fail_json(msg="Failed to create stack {0}: {1}.".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def list_changesets(cfn, stack_name):
+ res = cfn.list_change_sets(StackName=stack_name)
+ return [cs['ChangeSetName'] for cs in res['Summaries']]
+
+
+def create_changeset(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ module.fail_json(msg="Either 'template' or 'template_url' is required.")
+ if module.params['changeset_name'] is not None:
+ stack_params['ChangeSetName'] = module.params['changeset_name']
+
+ # changesets don't accept ClientRequestToken parameters
+ stack_params.pop('ClientRequestToken', None)
+
+ try:
+ changeset_name = build_changeset_name(stack_params)
+ stack_params['ChangeSetName'] = changeset_name
+
+ # Determine if this changeset already exists
+ pending_changesets = list_changesets(cfn, stack_params['StackName'])
+ if changeset_name in pending_changesets:
+ warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets)
+ result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning])
+ else:
+ cs = cfn.create_change_set(**stack_params)
+ # Make sure we don't enter an infinite loop
+ time_end = time.time() + 600
+ while time.time() < time_end:
+ try:
+ newcs = cfn.describe_change_set(ChangeSetName=cs['Id'])
+ except botocore.exceptions.BotoCoreError as err:
+ error_msg = boto_exception(err)
+ module.fail_json(msg=error_msg)
+ if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS':
+ time.sleep(1)
+ elif newcs['Status'] == 'FAILED' and "The submitted information didn't contain changes" in newcs['StatusReason']:
+ cfn.delete_change_set(ChangeSetName=cs['Id'])
+ result = dict(changed=False,
+ output='The created Change Set did not contain any changes to this stack and was deleted.')
+ # a failed change set does not trigger any stack events so we just want to
+ # skip any further processing of result and just return it directly
+ return result
+ else:
+ break
+ # Lets not hog the cpu/spam the AWS API
+ time.sleep(1)
+ result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit)
+ result['change_set_id'] = cs['Id']
+ result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
+ 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
+ 'NOTE that dependencies on this stack might fail due to pending changes!']
+ except Exception as err:
+ error_msg = boto_exception(err)
+ if 'No updates are to be performed.' in error_msg:
+ result = dict(changed=False, output='Stack is already up-to-date.')
+ else:
+ module.fail_json(msg="Failed to create change set: {0}".format(error_msg), exception=traceback.format_exc())
+
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def update_stack(module, stack_params, cfn, events_limit):
+ if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
+ stack_params['UsePreviousTemplate'] = True
+
+ # if the state is present and the stack already exists, we try to update it.
+ # AWS will tell us if the stack template and parameters are the same and
+ # don't need to be updated.
+ try:
+ cfn.update_stack(**stack_params)
+ result = stack_operation(cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None))
+ except Exception as err:
+ error_msg = boto_exception(err)
+ if 'No updates are to be performed.' in error_msg:
+ result = dict(changed=False, output='Stack is already up-to-date.')
+ else:
+ module.fail_json(msg="Failed to update stack {0}: {1}".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
+ if not result:
+ module.fail_json(msg="empty result")
+ return result
+
+
+def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state):
+ '''updates termination protection of a stack'''
+ if not boto_supports_termination_protection(cfn):
+ module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
+ stack = get_stack_facts(cfn, stack_name)
+ if stack:
+ if stack['EnableTerminationProtection'] is not desired_termination_protection_state:
+ try:
+ cfn.update_termination_protection(
+ EnableTerminationProtection=desired_termination_protection_state,
+ StackName=stack_name)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=boto_exception(e), exception=traceback.format_exc())
+
+
+def boto_supports_termination_protection(cfn):
+ '''termination protection was added in botocore 1.7.18'''
+ return hasattr(cfn, "update_termination_protection")
+
+
+def stack_operation(cfn, stack_name, operation, events_limit, op_token=None):
+ '''gets the status of a stack while it is created/updated/deleted'''
+ existed = []
+ while True:
+ try:
+ stack = get_stack_facts(cfn, stack_name)
+ existed.append('yes')
+ except Exception:
+ # If the stack previously existed, and now can't be found then it's
+ # been deleted successfully.
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ ret.update({'changed': True, 'output': 'Stack Deleted'})
+ return ret
+ else:
+ return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ if not stack:
+ if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
+ ret = get_stack_events(cfn, stack_name, events_limit, op_token)
+ ret.update({'changed': True, 'output': 'Stack Deleted'})
+ return ret
+ else:
+ ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
+ return ret
+ # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
+ # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
+ elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET':
+ ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation})
+ return ret
+ elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE':
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'})
+ return ret
+ # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases.
+ elif stack['StackStatus'].endswith('_COMPLETE'):
+ ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
+ return ret
+ elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
+ return ret
+ # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases.
+ elif stack['StackStatus'].endswith('_FAILED'):
+ ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation})
+ return ret
+ else:
+ # this can loop forever :/
+ time.sleep(5)
+ return {'failed': True, 'output': 'Failed for unknown reasons.'}
+
+
+def build_changeset_name(stack_params):
+ if 'ChangeSetName' in stack_params:
+ return stack_params['ChangeSetName']
+
+ json_params = json.dumps(stack_params, sort_keys=True)
+
+ return 'Ansible-{0}-{1}'.format(
+ stack_params['StackName'],
+ sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest()
+ )
+
+
+def check_mode_changeset(module, stack_params, cfn):
+ """Create a change set, describe it and delete it before returning check mode outputs."""
+ stack_params['ChangeSetName'] = build_changeset_name(stack_params)
+ # changesets don't accept ClientRequestToken parameters
+ stack_params.pop('ClientRequestToken', None)
+
+ try:
+ change_set = cfn.create_change_set(**stack_params)
+ for i in range(60): # total time 5 min
+ description = cfn.describe_change_set(ChangeSetName=change_set['Id'])
+ if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
+ break
+ time.sleep(5)
+ else:
+ # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail
+ module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName'])
+
+ cfn.delete_change_set(ChangeSetName=change_set['Id'])
+
+ reason = description.get('StatusReason')
+
+ if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']:
+ return {'changed': False, 'msg': reason, 'meta': description['StatusReason']}
+ return {'changed': True, 'msg': reason, 'meta': description['Changes']}
+
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def get_stack_facts(cfn, stack_name):
+ try:
+ stack_response = cfn.describe_stacks(StackName=stack_name)
+ stack_info = stack_response['Stacks'][0]
+ except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
+ error_msg = boto_exception(err)
+ if 'does not exist' in error_msg:
+ # missing stack, don't bail.
+ return None
+
+ # other error, bail.
+ raise err
+
+ if stack_response and stack_response.get('Stacks', None):
+ stacks = stack_response['Stacks']
+ if len(stacks):
+ stack_info = stacks[0]
+
+ return stack_info
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ stack_name=dict(required=True),
+ template_parameters=dict(required=False, type='dict', default={}),
+ state=dict(default='present', choices=['present', 'absent']),
+ template=dict(default=None, required=False, type='path'),
+ notification_arns=dict(default=None, required=False),
+ stack_policy=dict(default=None, required=False),
+ disable_rollback=dict(default=False, type='bool'),
+ on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']),
+ create_timeout=dict(default=None, type='int'),
+ template_url=dict(default=None, required=False),
+ template_body=dict(default=None, required=False),
+ template_format=dict(removed_in_version='2.14'),
+ create_changeset=dict(default=False, type='bool'),
+ changeset_name=dict(default=None, required=False),
+ role_arn=dict(default=None, required=False),
+ tags=dict(default=None, type='dict'),
+ termination_protection=dict(default=None, type='bool'),
+ events_limit=dict(default=200, type='int'),
+ backoff_retries=dict(type='int', default=10, required=False),
+ backoff_delay=dict(type='int', default=3, required=False),
+ backoff_max_delay=dict(type='int', default=30, required=False),
+ capabilities=dict(type='list', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['template_url', 'template', 'template_body'],
+ ['disable_rollback', 'on_create_failure']],
+ supports_check_mode=True
+ )
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 and botocore are required for this module')
+
+ invalid_capabilities = []
+ user_capabilities = module.params.get('capabilities')
+ for user_cap in user_capabilities:
+ if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']:
+ invalid_capabilities.append(user_cap)
+
+ if invalid_capabilities:
+ module.fail_json(msg="Specified capabilities are invalid : %r,"
+ " please check documentation for valid capabilities" % invalid_capabilities)
+
+ # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
+ stack_params = {
+ 'Capabilities': user_capabilities,
+ 'ClientRequestToken': to_native(uuid.uuid4()),
+ }
+ state = module.params['state']
+ stack_params['StackName'] = module.params['stack_name']
+
+ if module.params['template'] is not None:
+ with open(module.params['template'], 'r') as template_fh:
+ stack_params['TemplateBody'] = template_fh.read()
+ elif module.params['template_body'] is not None:
+ stack_params['TemplateBody'] = module.params['template_body']
+ elif module.params['template_url'] is not None:
+ stack_params['TemplateURL'] = module.params['template_url']
+
+ if module.params.get('notification_arns'):
+ stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
+ else:
+ stack_params['NotificationARNs'] = []
+
+ # can't check the policy when verifying.
+ if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']:
+ with open(module.params['stack_policy'], 'r') as stack_policy_fh:
+ stack_params['StackPolicyBody'] = stack_policy_fh.read()
+
+ template_parameters = module.params['template_parameters']
+
+ stack_params['Parameters'] = []
+ for k, v in template_parameters.items():
+ if isinstance(v, dict):
+ # set parameter based on a dict to allow additional CFN Parameter Attributes
+ param = dict(ParameterKey=k)
+
+ if 'value' in v:
+ param['ParameterValue'] = str(v['value'])
+
+ if 'use_previous_value' in v and bool(v['use_previous_value']):
+ param['UsePreviousValue'] = True
+ param.pop('ParameterValue', None)
+
+ stack_params['Parameters'].append(param)
+ else:
+ # allow default k/v configuration to set a template parameter
+ stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
+
+ if isinstance(module.params.get('tags'), dict):
+ stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
+
+ if module.params.get('role_arn'):
+ stack_params['RoleARN'] = module.params['role_arn']
+
+ result = {}
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ cfn = boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg=boto_exception(e))
+
+ # Wrap the cloudformation client methods that this module uses with
+ # automatic backoff / retry for throttling error codes
+ backoff_wrapper = AWSRetry.jittered_backoff(
+ retries=module.params.get('backoff_retries'),
+ delay=module.params.get('backoff_delay'),
+ max_delay=module.params.get('backoff_max_delay')
+ )
+ cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events)
+ cfn.create_stack = backoff_wrapper(cfn.create_stack)
+ cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets)
+ cfn.create_change_set = backoff_wrapper(cfn.create_change_set)
+ cfn.update_stack = backoff_wrapper(cfn.update_stack)
+ cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks)
+ cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources)
+ cfn.delete_stack = backoff_wrapper(cfn.delete_stack)
+ if boto_supports_termination_protection(cfn):
+ cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection)
+
+ stack_info = get_stack_facts(cfn, stack_params['StackName'])
+
+ if module.check_mode:
+ if state == 'absent' and stack_info:
+ module.exit_json(changed=True, msg='Stack would be deleted', meta=[])
+ elif state == 'absent' and not stack_info:
+ module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[])
+ elif state == 'present' and not stack_info:
+ module.exit_json(changed=True, msg='New stack would be created', meta=[])
+ else:
+ module.exit_json(**check_mode_changeset(module, stack_params, cfn))
+
+ if state == 'present':
+ if not stack_info:
+ result = create_stack(module, stack_params, cfn, module.params.get('events_limit'))
+ elif module.params.get('create_changeset'):
+ result = create_changeset(module, stack_params, cfn, module.params.get('events_limit'))
+ else:
+ if module.params.get('termination_protection') is not None:
+ update_termination_protection(module, cfn, stack_params['StackName'],
+ bool(module.params.get('termination_protection')))
+ result = update_stack(module, stack_params, cfn, module.params.get('events_limit'))
+
+ # format the stack output
+
+ stack = get_stack_facts(cfn, stack_params['StackName'])
+ if stack is not None:
+ if result.get('stack_outputs') is None:
+ # always define stack_outputs, but it may be empty
+ result['stack_outputs'] = {}
+ for output in stack.get('Outputs', []):
+ result['stack_outputs'][output['OutputKey']] = output['OutputValue']
+ stack_resources = []
+ reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
+ for res in reslist.get('StackResourceSummaries', []):
+ stack_resources.append({
+ "logical_resource_id": res['LogicalResourceId'],
+ "physical_resource_id": res.get('PhysicalResourceId', ''),
+ "resource_type": res['ResourceType'],
+ "last_updated_time": res['LastUpdatedTimestamp'],
+ "status": res['ResourceStatus'],
+ "status_reason": res.get('ResourceStatusReason') # can be blank, apparently
+ })
+ result['stack_resources'] = stack_resources
+
+ elif state == 'absent':
+ # absent state is different because of the way delete_stack works.
+ # problem is it it doesn't give an error if stack isn't found
+ # so must describe the stack first
+
+ try:
+ stack = get_stack_facts(cfn, stack_params['StackName'])
+ if not stack:
+ result = {'changed': False, 'output': 'Stack not found.'}
+ else:
+ if stack_params.get('RoleARN') is None:
+ cfn.delete_stack(StackName=stack_params['StackName'])
+ else:
+ cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN'])
+ result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'),
+ stack_params.get('ClientRequestToken', None))
+ except Exception as err:
+ module.fail_json(msg=boto_exception(err), exception=traceback.format_exc())
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cloudformation_info.py b/test/support/integration/plugins/modules/cloudformation_info.py
new file mode 100644
index 0000000000..f62b80235d
--- /dev/null
+++ b/test/support/integration/plugins/modules/cloudformation_info.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: cloudformation_info
+short_description: Obtain information about an AWS CloudFormation stack
+description:
+ - Gets information about an AWS CloudFormation stack.
+ - This module was called C(cloudformation_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(cloudformation_info) module no longer returns C(ansible_facts)!
+requirements:
+ - boto3 >= 1.0.0
+ - python >= 2.6
+version_added: "2.2"
+author:
+ - Justin Menga (@jmenga)
+ - Kevin Coming (@waffie1)
+options:
+ stack_name:
+ description:
+ - The name or id of the CloudFormation stack. Gathers information on all stacks by default.
+ type: str
+ all_facts:
+ description:
+ - Get all stack information for the stack.
+ type: bool
+ default: false
+ stack_events:
+ description:
+ - Get stack events for the stack.
+ type: bool
+ default: false
+ stack_template:
+ description:
+ - Get stack template body for the stack.
+ type: bool
+ default: false
+ stack_resources:
+ description:
+ - Get stack resources for the stack.
+ type: bool
+ default: false
+ stack_policy:
+ description:
+ - Get stack policy for the stack.
+ type: bool
+ default: false
+ stack_change_sets:
+ description:
+ - Get stack change sets for the stack
+ type: bool
+ default: false
+ version_added: '2.10'
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Get summary information about a stack
+- cloudformation_info:
+ stack_name: my-cloudformation-stack
+ register: output
+
+- debug:
+ msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}"
+
+# When the module is called as cloudformation_facts, return values are published
+# in ansible_facts['cloudformation'][<stack_name>] and can be used as follows.
+# Note that this is deprecated and will stop working in Ansible 2.13.
+
+- cloudformation_facts:
+ stack_name: my-cloudformation-stack
+
+- debug:
+ msg: "{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}"
+
+# Get stack outputs, when you have the stack name available as a fact
+- set_fact:
+ stack_name: my-awesome-stack
+
+- cloudformation_info:
+ stack_name: "{{ stack_name }}"
+ register: my_stack
+
+- debug:
+ msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}"
+
+# Get all stack information about a stack
+- cloudformation_info:
+ stack_name: my-cloudformation-stack
+ all_facts: true
+
+# Get stack resource and stack policy information about a stack
+- cloudformation_info:
+ stack_name: my-cloudformation-stack
+ stack_resources: true
+ stack_policy: true
+
+# Fail if the stack doesn't exist
+- name: try to get facts about a stack but fail if it doesn't exist
+ cloudformation_info:
+ stack_name: nonexistent-stack
+ all_facts: yes
+ failed_when: cloudformation['nonexistent-stack'] is undefined
+'''
+
+RETURN = '''
+stack_description:
+ description: Summary facts about the stack
+ returned: if the stack exists
+ type: dict
+stack_outputs:
+ description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each
+ output 'OutputValue' parameter
+ returned: if the stack exists
+ type: dict
+ sample:
+ ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com
+stack_parameters:
+ description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of
+ each parameter 'ParameterValue' parameter
+ returned: if the stack exists
+ type: dict
+ sample:
+ DatabaseEngine: mysql
+ DatabasePassword: "***"
+stack_events:
+ description: All stack events for the stack
+ returned: only if all_facts or stack_events is true and the stack exists
+ type: list
+stack_policy:
+ description: Describes the stack policy for the stack
+ returned: only if all_facts or stack_policy is true and the stack exists
+ type: dict
+stack_template:
+ description: Describes the stack template for the stack
+ returned: only if all_facts or stack_template is true and the stack exists
+ type: dict
+stack_resource_list:
+ description: Describes stack resources for the stack
+ returned: only if all_facts or stack_resourses is true and the stack exists
+ type: list
+stack_resources:
+ description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each
+ resource 'PhysicalResourceId' parameter
+ returned: only if all_facts or stack_resourses is true and the stack exists
+ type: dict
+ sample:
+ AutoScalingGroup: "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7"
+ AutoScalingSecurityGroup: "sg-abcd1234"
+ ApplicationDatabase: "dazvlpr01xj55a"
+stack_change_sets:
+ description: A list of stack change sets. Each item in the list represents the details of a specific changeset
+
+ returned: only if all_facts or stack_change_sets is true and the stack exists
+ type: list
+'''
+
+import json
+import traceback
+
+from functools import partial
+from ansible.module_utils._text import to_native
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry, boto3_tag_list_to_ansible_dict)
+
+try:
+ import botocore
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+class CloudFormationServiceManager:
+ """Handles CloudFormation Services"""
+
+ def __init__(self, module):
+ self.module = module
+ self.client = module.client('cloudformation')
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stacks_with_backoff(self, **kwargs):
+ paginator = self.client.get_paginator('describe_stacks')
+ return paginator.paginate(**kwargs).build_full_result()['Stacks']
+
+ def describe_stacks(self, stack_name=None):
+ try:
+ kwargs = {'StackName': stack_name} if stack_name else {}
+ response = self.describe_stacks_with_backoff(**kwargs)
+ if response is not None:
+ return response
+ self.module.fail_json(msg="Error describing stack(s) - an empty response was returned")
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ if 'does not exist' in e.response['Error']['Message']:
+ # missing stack, don't bail.
+ return {}
+ self.module.fail_json_aws(e, msg="Error describing stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def list_stack_resources_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('list_stack_resources')
+ return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries']
+
+ def list_stack_resources(self, stack_name):
+ try:
+ return self.list_stack_resources_with_backoff(stack_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stack_events_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('describe_stack_events')
+ return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents']
+
+ def describe_stack_events(self, stack_name):
+ try:
+ return self.describe_stack_events_with_backoff(stack_name)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def list_stack_change_sets_with_backoff(self, stack_name):
+ paginator = self.client.get_paginator('list_change_sets')
+ return paginator.paginate(StackName=stack_name).build_full_result()['Summaries']
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def describe_stack_change_set_with_backoff(self, **kwargs):
+ paginator = self.client.get_paginator('describe_change_set')
+ return paginator.paginate(**kwargs).build_full_result()
+
+ def describe_stack_change_sets(self, stack_name):
+ changes = []
+ try:
+ change_sets = self.list_stack_change_sets_with_backoff(stack_name)
+ for item in change_sets:
+ changes.append(self.describe_stack_change_set_with_backoff(
+ StackName=stack_name,
+ ChangeSetName=item['ChangeSetName']))
+ return changes
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def get_stack_policy_with_backoff(self, stack_name):
+ return self.client.get_stack_policy(StackName=stack_name)
+
+ def get_stack_policy(self, stack_name):
+ try:
+ response = self.get_stack_policy_with_backoff(stack_name)
+ stack_policy = response.get('StackPolicyBody')
+ if stack_policy:
+ return json.loads(stack_policy)
+ return dict()
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name)
+
+ @AWSRetry.exponential_backoff(retries=5, delay=5)
+ def get_template_with_backoff(self, stack_name):
+ return self.client.get_template(StackName=stack_name)
+
+ def get_template(self, stack_name):
+ try:
+ response = self.get_template_with_backoff(stack_name)
+ return response.get('TemplateBody')
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name)
+
+
+def to_dict(items, key, value):
+ ''' Transforms a list of items to a Key/Value dictionary '''
+ if items:
+ return dict(zip([i.get(key) for i in items], [i.get(value) for i in items]))
+ else:
+ return dict()
+
+
+def main():
+ argument_spec = dict(
+ stack_name=dict(),
+ all_facts=dict(required=False, default=False, type='bool'),
+ stack_policy=dict(required=False, default=False, type='bool'),
+ stack_events=dict(required=False, default=False, type='bool'),
+ stack_resources=dict(required=False, default=False, type='bool'),
+ stack_template=dict(required=False, default=False, type='bool'),
+ stack_change_sets=dict(required=False, default=False, type='bool'),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ is_old_facts = module._name == 'cloudformation_facts'
+ if is_old_facts:
+ module.deprecate("The 'cloudformation_facts' module has been renamed to 'cloudformation_info', "
+ "and the renamed one no longer returns ansible_facts", version='2.13')
+
+ service_mgr = CloudFormationServiceManager(module)
+
+ if is_old_facts:
+ result = {'ansible_facts': {'cloudformation': {}}}
+ else:
+ result = {'cloudformation': {}}
+
+ for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')):
+ facts = {'stack_description': stack_description}
+ stack_name = stack_description.get('StackName')
+
+ # Create stack output and stack parameter dictionaries
+ if facts['stack_description']:
+ facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')
+ facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'),
+ 'ParameterKey', 'ParameterValue')
+ facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags'))
+
+ # Create optional stack outputs
+ all_facts = module.params.get('all_facts')
+ if all_facts or module.params.get('stack_resources'):
+ facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
+ facts['stack_resources'] = to_dict(facts.get('stack_resource_list'),
+ 'LogicalResourceId', 'PhysicalResourceId')
+ if all_facts or module.params.get('stack_template'):
+ facts['stack_template'] = service_mgr.get_template(stack_name)
+ if all_facts or module.params.get('stack_policy'):
+ facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
+ if all_facts or module.params.get('stack_events'):
+ facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
+ if all_facts or module.params.get('stack_change_sets'):
+ facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name)
+
+ if is_old_facts:
+ result['ansible_facts']['cloudformation'][stack_name] = facts
+ else:
+ result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs',
+ 'stack_parameters',
+ 'stack_policy',
+ 'stack_resources',
+ 'stack_tags',
+ 'stack_template'))
+
+ module.exit_json(changed=False, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cs_role.py b/test/support/integration/plugins/modules/cs_role.py
new file mode 100644
index 0000000000..6db295bd81
--- /dev/null
+++ b/test/support/integration/plugins/modules/cs_role.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: cs_role
+short_description: Manages user roles on Apache CloudStack based clouds.
+description:
+ - Create, update, delete user roles.
+version_added: '2.3'
+author: René Moser (@resmo)
+options:
+ name:
+ description:
+ - Name of the role.
+ type: str
+ required: true
+ uuid:
+ description:
+ - ID of the role.
+ - If provided, I(uuid) is used as key.
+ type: str
+ aliases: [ id ]
+ role_type:
+ description:
+ - Type of the role.
+ - Only considered for creation.
+ type: str
+ default: User
+ choices: [ User, DomainAdmin, ResourceAdmin, Admin ]
+ description:
+ description:
+ - Description of the role.
+ type: str
+ state:
+ description:
+ - State of the role.
+ type: str
+ default: present
+ choices: [ present, absent ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+- name: Ensure an user role is present
+ cs_role:
+ name: myrole_user
+ delegate_to: localhost
+
+- name: Ensure a role having particular ID is named as myrole_user
+ cs_role:
+ name: myrole_user
+ id: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+ delegate_to: localhost
+
+- name: Ensure a role is absent
+ cs_role:
+ name: myrole_user
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the role.
+ returned: success
+ type: str
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the role.
+ returned: success
+ type: str
+ sample: myrole
+description:
+ description: Description of the role.
+ returned: success
+ type: str
+ sample: "This is my role description"
+role_type:
+ description: Type of the role.
+ returned: success
+ type: str
+ sample: User
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.cloudstack import (
+ AnsibleCloudStack,
+ cs_argument_spec,
+ cs_required_together,
+)
+
+
+class AnsibleCloudStackRole(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackRole, self).__init__(module)
+ self.returns = {
+ 'type': 'role_type',
+ }
+
+ def get_role(self):
+ uuid = self.module.params.get('uuid')
+ if uuid:
+ args = {
+ 'id': uuid,
+ }
+ roles = self.query_api('listRoles', **args)
+ if roles:
+ return roles['role'][0]
+ else:
+ args = {
+ 'name': self.module.params.get('name'),
+ }
+ roles = self.query_api('listRoles', **args)
+ if roles:
+ return roles['role'][0]
+ return None
+
+ def present_role(self):
+ role = self.get_role()
+ if role:
+ role = self._update_role(role)
+ else:
+ role = self._create_role(role)
+ return role
+
+ def _create_role(self, role):
+ self.result['changed'] = True
+ args = {
+ 'name': self.module.params.get('name'),
+ 'type': self.module.params.get('role_type'),
+ 'description': self.module.params.get('description'),
+ }
+ if not self.module.check_mode:
+ res = self.query_api('createRole', **args)
+ role = res['role']
+ return role
+
+ def _update_role(self, role):
+ args = {
+ 'id': role['id'],
+ 'name': self.module.params.get('name'),
+ 'description': self.module.params.get('description'),
+ }
+ if self.has_changed(args, role):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.query_api('updateRole', **args)
+
+ # The API as in 4.9 does not return an updated role yet
+ if 'role' not in res:
+ role = self.get_role()
+ else:
+ role = res['role']
+ return role
+
+ def absent_role(self):
+ role = self.get_role()
+ if role:
+ self.result['changed'] = True
+ args = {
+ 'id': role['id'],
+ }
+ if not self.module.check_mode:
+ self.query_api('deleteRole', **args)
+ return role
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ uuid=dict(aliases=['id']),
+ name=dict(required=True),
+ description=dict(),
+ role_type=dict(choices=['User', 'DomainAdmin', 'ResourceAdmin', 'Admin'], default='User'),
+ state=dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ acs_role = AnsibleCloudStackRole(module)
+ state = module.params.get('state')
+ if state == 'absent':
+ role = acs_role.absent_role()
+ else:
+ role = acs_role.present_role()
+
+ result = acs_role.get_result(role)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cs_role_permission.py b/test/support/integration/plugins/modules/cs_role_permission.py
new file mode 100644
index 0000000000..30392b2f87
--- /dev/null
+++ b/test/support/integration/plugins/modules/cs_role_permission.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017, David Passante (@dpassante)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: cs_role_permission
+short_description: Manages role permissions on Apache CloudStack based clouds.
+description:
+ - Create, update and remove CloudStack role permissions.
+ - Managing role permissions only supported in CloudStack >= 4.9.
+version_added: '2.6'
+author: David Passante (@dpassante)
+options:
+ name:
+ description:
+ - The API name of the permission.
+ type: str
+ required: true
+ role:
+ description:
+ - Name or ID of the role.
+ type: str
+ required: true
+ permission:
+ description:
+ - The rule permission, allow or deny. Defaulted to deny.
+ type: str
+ choices: [ allow, deny ]
+ default: deny
+ state:
+ description:
+ - State of the role permission.
+ type: str
+ choices: [ present, absent ]
+ default: present
+ description:
+ description:
+ - The description of the role permission.
+ type: str
+ parent:
+ description:
+ - The parent role permission uuid. use 0 to move this rule at the top of the list.
+ type: str
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+- name: Create a role permission
+ cs_role_permission:
+ role: My_Custom_role
+ name: createVPC
+ permission: allow
+ description: My comments
+ delegate_to: localhost
+
+- name: Remove a role permission
+ cs_role_permission:
+ state: absent
+ role: My_Custom_role
+ name: createVPC
+ delegate_to: localhost
+
+- name: Update a system role permission
+ cs_role_permission:
+ role: Domain Admin
+ name: createVPC
+ permission: deny
+ delegate_to: localhost
+
+- name: Update rules order. Move the rule at the top of list
+ cs_role_permission:
+ role: Domain Admin
+ name: createVPC
+ parent: 0
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+id:
+ description: The ID of the role permission.
+ returned: success
+ type: str
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+name:
+ description: The API name of the permission.
+ returned: success
+ type: str
+ sample: createVPC
+permission:
+ description: The permission type of the api name.
+ returned: success
+ type: str
+ sample: allow
+role_id:
+ description: The ID of the role to which the role permission belongs.
+ returned: success
+ type: str
+ sample: c6f7a5fc-43f8-11e5-a151-feff819cdc7f
+description:
+ description: The description of the role permission
+ returned: success
+ type: str
+ sample: Deny createVPC for users
+'''
+
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.cloudstack import (
+ AnsibleCloudStack,
+ cs_argument_spec,
+ cs_required_together,
+)
+
+
+class AnsibleCloudStackRolePermission(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackRolePermission, self).__init__(module)
+ cloudstack_min_version = LooseVersion('4.9.2')
+
+ self.returns = {
+ 'id': 'id',
+ 'roleid': 'role_id',
+ 'rule': 'name',
+ 'permission': 'permission',
+ 'description': 'description',
+ }
+ self.role_permission = None
+
+ self.cloudstack_version = self._cloudstack_ver()
+
+ if self.cloudstack_version < cloudstack_min_version:
+ self.fail_json(msg="This module requires CloudStack >= %s." % cloudstack_min_version)
+
+ def _cloudstack_ver(self):
+ capabilities = self.get_capabilities()
+ return LooseVersion(capabilities['cloudstackversion'])
+
+ def _get_role_id(self):
+ role = self.module.params.get('role')
+ if not role:
+ return None
+
+ res = self.query_api('listRoles')
+ roles = res['role']
+ if roles:
+ for r in roles:
+ if role in [r['name'], r['id']]:
+ return r['id']
+ self.fail_json(msg="Role '%s' not found" % role)
+
+ def _get_role_perm(self):
+ role_permission = self.role_permission
+
+ args = {
+ 'roleid': self._get_role_id(),
+ }
+
+ rp = self.query_api('listRolePermissions', **args)
+
+ if rp:
+ role_permission = rp['rolepermission']
+
+ return role_permission
+
+ def _get_rule(self, rule=None):
+ if not rule:
+ rule = self.module.params.get('name')
+
+ if self._get_role_perm():
+ for _rule in self._get_role_perm():
+ if rule == _rule['rule'] or rule == _rule['id']:
+ return _rule
+
+ return None
+
+ def _get_rule_order(self):
+ perms = self._get_role_perm()
+ rules = []
+
+ if perms:
+ for i, rule in enumerate(perms):
+ rules.append(rule['id'])
+
+ return rules
+
+ def replace_rule(self):
+ old_rule = self._get_rule()
+
+ if old_rule:
+ rules_order = self._get_rule_order()
+ old_pos = rules_order.index(old_rule['id'])
+
+ self.remove_role_perm()
+
+ new_rule = self.create_role_perm()
+
+ if new_rule:
+ perm_order = self.order_permissions(int(old_pos - 1), new_rule['id'])
+
+ return perm_order
+
+ return None
+
+ def order_permissions(self, parent, rule_id):
+ rules = self._get_rule_order()
+
+ if isinstance(parent, int):
+ parent_pos = parent
+ elif parent == '0':
+ parent_pos = -1
+ else:
+ parent_rule = self._get_rule(parent)
+ if not parent_rule:
+ self.fail_json(msg="Parent rule '%s' not found" % parent)
+
+ parent_pos = rules.index(parent_rule['id'])
+
+ r_id = rules.pop(rules.index(rule_id))
+
+ rules.insert((parent_pos + 1), r_id)
+ rules = ','.join(map(str, rules))
+
+ return rules
+
+ def create_or_update_role_perm(self):
+ role_permission = self._get_rule()
+
+ if not role_permission:
+ role_permission = self.create_role_perm()
+ else:
+ role_permission = self.update_role_perm(role_permission)
+
+ return role_permission
+
+ def create_role_perm(self):
+ role_permission = None
+
+ self.result['changed'] = True
+
+ args = {
+ 'rule': self.module.params.get('name'),
+ 'description': self.module.params.get('description'),
+ 'roleid': self._get_role_id(),
+ 'permission': self.module.params.get('permission'),
+ }
+
+ if not self.module.check_mode:
+ res = self.query_api('createRolePermission', **args)
+ role_permission = res['rolepermission']
+
+ return role_permission
+
+ def update_role_perm(self, role_perm):
+ perm_order = None
+
+ if not self.module.params.get('parent'):
+ args = {
+ 'ruleid': role_perm['id'],
+ 'roleid': role_perm['roleid'],
+ 'permission': self.module.params.get('permission'),
+ }
+
+ if self.has_changed(args, role_perm, only_keys=['permission']):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ if self.cloudstack_version >= LooseVersion('4.11.0'):
+ self.query_api('updateRolePermission', **args)
+ role_perm = self._get_rule()
+ else:
+ perm_order = self.replace_rule()
+ else:
+ perm_order = self.order_permissions(self.module.params.get('parent'), role_perm['id'])
+
+ if perm_order:
+ args = {
+ 'roleid': role_perm['roleid'],
+ 'ruleorder': perm_order,
+ }
+
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ self.query_api('updateRolePermission', **args)
+ role_perm = self._get_rule()
+
+ return role_perm
+
+ def remove_role_perm(self):
+ role_permission = self._get_rule()
+
+ if role_permission:
+ self.result['changed'] = True
+
+ args = {
+ 'id': role_permission['id'],
+ }
+
+ if not self.module.check_mode:
+ self.query_api('deleteRolePermission', **args)
+
+ return role_permission
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ role=dict(required=True),
+ name=dict(required=True),
+ permission=dict(choices=['allow', 'deny'], default='deny'),
+ description=dict(),
+ state=dict(choices=['present', 'absent'], default='present'),
+ parent=dict(),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ mutually_exclusive=(
+ ['permission', 'parent'],
+ ),
+ supports_check_mode=True
+ )
+
+ acs_role_perm = AnsibleCloudStackRolePermission(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ role_permission = acs_role_perm.remove_role_perm()
+ else:
+ role_permission = acs_role_perm.create_or_update_role_perm()
+
+ result = acs_role_perm.get_result(role_permission)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/cs_service_offering.py b/test/support/integration/plugins/modules/cs_service_offering.py
new file mode 100644
index 0000000000..3b15fe7f1e
--- /dev/null
+++ b/test/support/integration/plugins/modules/cs_service_offering.py
@@ -0,0 +1,583 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2017, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: cs_service_offering
+description:
+ - Create and delete service offerings for guest and system VMs.
+ - Update display_text of existing service offering.
+short_description: Manages service offerings on Apache CloudStack based clouds.
+version_added: '2.5'
+author: René Moser (@resmo)
+options:
+ disk_bytes_read_rate:
+ description:
+ - Bytes read rate of the disk offering.
+ type: int
+ aliases: [ bytes_read_rate ]
+ disk_bytes_write_rate:
+ description:
+ - Bytes write rate of the disk offering.
+ type: int
+ aliases: [ bytes_write_rate ]
+ cpu_number:
+ description:
+ - The number of CPUs of the service offering.
+ type: int
+ cpu_speed:
+ description:
+ - The CPU speed of the service offering in MHz.
+ type: int
+ limit_cpu_usage:
+ description:
+ - Restrict the CPU usage to committed service offering.
+ type: bool
+ deployment_planner:
+ description:
+ - The deployment planner heuristics used to deploy a VM of this offering.
+ - If not set, the value of global config I(vm.deployment.planner) is used.
+ type: str
+ display_text:
+ description:
+ - Display text of the service offering.
+ - If not set, I(name) will be used as I(display_text) while creating.
+ type: str
+ domain:
+ description:
+ - Domain the service offering is related to.
+ - Public for all domains and subdomains if not set.
+ type: str
+ host_tags:
+ description:
+ - The host tags for this service offering.
+ type: list
+ aliases:
+ - host_tag
+ hypervisor_snapshot_reserve:
+ description:
+ - Hypervisor snapshot reserve space as a percent of a volume.
+ - Only for managed storage using Xen or VMware.
+ type: int
+ is_iops_customized:
+ description:
+ - Whether compute offering iops is custom or not.
+ type: bool
+ aliases: [ disk_iops_customized ]
+ disk_iops_read_rate:
+ description:
+ - IO requests read rate of the disk offering.
+ type: int
+ disk_iops_write_rate:
+ description:
+ - IO requests write rate of the disk offering.
+ type: int
+ disk_iops_max:
+ description:
+ - Max. iops of the compute offering.
+ type: int
+ disk_iops_min:
+ description:
+ - Min. iops of the compute offering.
+ type: int
+ is_system:
+ description:
+ - Whether it is a system VM offering or not.
+ type: bool
+ default: no
+ is_volatile:
+ description:
+ - Whether the virtual machine needs to be volatile or not.
+ - Every reboot of VM the root disk is detached then destroyed and a fresh root disk is created and attached to VM.
+ type: bool
+ memory:
+ description:
+ - The total memory of the service offering in MB.
+ type: int
+ name:
+ description:
+ - Name of the service offering.
+ type: str
+ required: true
+ network_rate:
+ description:
+ - Data transfer rate in Mb/s allowed.
+ - Supported only for non-system offering and system offerings having I(system_vm_type=domainrouter).
+ type: int
+ offer_ha:
+ description:
+ - Whether HA is set for the service offering.
+ type: bool
+ default: no
+ provisioning_type:
+ description:
+ - Provisioning type used to create volumes.
+ type: str
+ choices:
+ - thin
+ - sparse
+ - fat
+ service_offering_details:
+ description:
+ - Details for planner, used to store specific parameters.
+ - A list of dictionaries having keys C(key) and C(value).
+ type: list
+ state:
+ description:
+ - State of the service offering.
+ type: str
+ choices:
+ - present
+ - absent
+ default: present
+ storage_type:
+ description:
+ - The storage type of the service offering.
+ type: str
+ choices:
+ - local
+ - shared
+ system_vm_type:
+ description:
+ - The system VM type.
+ - Required if I(is_system=yes).
+ type: str
+ choices:
+ - domainrouter
+ - consoleproxy
+ - secondarystoragevm
+ storage_tags:
+ description:
+ - The storage tags for this service offering.
+ type: list
+ aliases:
+ - storage_tag
+ is_customized:
+ description:
+ - Whether the offering is customizable or not.
+ type: bool
+ version_added: '2.8'
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+- name: Create a non-volatile compute service offering with local storage
+ cs_service_offering:
+ name: Micro
+ display_text: Micro 512mb 1cpu
+ cpu_number: 1
+ cpu_speed: 2198
+ memory: 512
+ host_tags: eco
+ storage_type: local
+ delegate_to: localhost
+
+- name: Create a volatile compute service offering with shared storage
+ cs_service_offering:
+ name: Tiny
+ display_text: Tiny 1gb 1cpu
+ cpu_number: 1
+ cpu_speed: 2198
+ memory: 1024
+ storage_type: shared
+ is_volatile: yes
+ host_tags: eco
+ storage_tags: eco
+ delegate_to: localhost
+
+- name: Create or update a volatile compute service offering with shared storage
+ cs_service_offering:
+ name: Tiny
+ display_text: Tiny 1gb 1cpu
+ cpu_number: 1
+ cpu_speed: 2198
+ memory: 1024
+ storage_type: shared
+ is_volatile: yes
+ host_tags: eco
+ storage_tags: eco
+ delegate_to: localhost
+
+- name: Create or update a custom compute service offering
+ cs_service_offering:
+ name: custom
+ display_text: custom compute offer
+ is_customized: yes
+ storage_type: shared
+ host_tags: eco
+ storage_tags: eco
+ delegate_to: localhost
+
+- name: Remove a compute service offering
+ cs_service_offering:
+ name: Tiny
+ state: absent
+ delegate_to: localhost
+
+- name: Create or update a system offering for the console proxy
+ cs_service_offering:
+ name: System Offering for Console Proxy 2GB
+ display_text: System Offering for Console Proxy 2GB RAM
+ is_system: yes
+ system_vm_type: consoleproxy
+ cpu_number: 1
+ cpu_speed: 2198
+ memory: 2048
+ storage_type: shared
+ storage_tags: perf
+ delegate_to: localhost
+
+- name: Remove a system offering
+ cs_service_offering:
+ name: System Offering for Console Proxy 2GB
+ is_system: yes
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the service offering
+ returned: success
+ type: str
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+cpu_number:
+ description: Number of CPUs in the service offering
+ returned: success
+ type: int
+ sample: 4
+cpu_speed:
+ description: Speed of CPUs in MHz in the service offering
+ returned: success
+ type: int
+ sample: 2198
+disk_iops_max:
+ description: Max iops of the disk offering
+ returned: success
+ type: int
+ sample: 1000
+disk_iops_min:
+ description: Min iops of the disk offering
+ returned: success
+ type: int
+ sample: 500
+disk_bytes_read_rate:
+ description: Bytes read rate of the service offering
+ returned: success
+ type: int
+ sample: 1000
+disk_bytes_write_rate:
+ description: Bytes write rate of the service offering
+ returned: success
+ type: int
+ sample: 1000
+disk_iops_read_rate:
+ description: IO requests per second read rate of the service offering
+ returned: success
+ type: int
+ sample: 1000
+disk_iops_write_rate:
+ description: IO requests per second write rate of the service offering
+ returned: success
+ type: int
+ sample: 1000
+created:
+ description: Date the offering was created
+ returned: success
+ type: str
+ sample: 2017-11-19T10:48:59+0000
+display_text:
+ description: Display text of the offering
+ returned: success
+ type: str
+ sample: Micro 512mb 1cpu
+domain:
+ description: Domain the offering is into
+ returned: success
+ type: str
+ sample: ROOT
+host_tags:
+ description: List of host tags
+ returned: success
+ type: list
+ sample: [ 'eco' ]
+storage_tags:
+ description: List of storage tags
+ returned: success
+ type: list
+ sample: [ 'eco' ]
+is_system:
+ description: Whether the offering is for system VMs or not
+ returned: success
+ type: bool
+ sample: false
+is_iops_customized:
+ description: Whether the offering uses custom IOPS or not
+ returned: success
+ type: bool
+ sample: false
+is_volatile:
+ description: Whether the offering is volatile or not
+ returned: success
+ type: bool
+ sample: false
+limit_cpu_usage:
+ description: Whether the CPU usage is restricted to committed service offering
+ returned: success
+ type: bool
+ sample: false
+memory:
+ description: Memory of the system offering
+ returned: success
+ type: int
+ sample: 512
+name:
+ description: Name of the system offering
+ returned: success
+ type: str
+ sample: Micro
+offer_ha:
+ description: Whether HA support is enabled in the offering or not
+ returned: success
+ type: bool
+ sample: false
+provisioning_type:
+ description: Provisioning type used to create volumes
+ returned: success
+ type: str
+ sample: thin
+storage_type:
+ description: Storage type used to create volumes
+ returned: success
+ type: str
+ sample: shared
+system_vm_type:
+ description: System VM type of this offering
+ returned: success
+ type: str
+ sample: consoleproxy
+service_offering_details:
+ description: Additioanl service offering details
+ returned: success
+ type: dict
+ sample: "{'vgpuType': 'GRID K180Q','pciDevice':'Group of NVIDIA Corporation GK107GL [GRID K1] GPUs'}"
+network_rate:
+ description: Data transfer rate in megabits per second allowed
+ returned: success
+ type: int
+ sample: 1000
+is_customized:
+ description: Whether the offering is customizable or not
+ returned: success
+ type: bool
+ sample: false
+ version_added: '2.8'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.cloudstack import (
+ AnsibleCloudStack,
+ cs_argument_spec,
+ cs_required_together,
+)
+
+
+class AnsibleCloudStackServiceOffering(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackServiceOffering, self).__init__(module)
+ self.returns = {
+ 'cpunumber': 'cpu_number',
+ 'cpuspeed': 'cpu_speed',
+ 'deploymentplanner': 'deployment_planner',
+ 'diskBytesReadRate': 'disk_bytes_read_rate',
+ 'diskBytesWriteRate': 'disk_bytes_write_rate',
+ 'diskIopsReadRate': 'disk_iops_read_rate',
+ 'diskIopsWriteRate': 'disk_iops_write_rate',
+ 'maxiops': 'disk_iops_max',
+ 'miniops': 'disk_iops_min',
+ 'hypervisorsnapshotreserve': 'hypervisor_snapshot_reserve',
+ 'iscustomized': 'is_customized',
+ 'iscustomizediops': 'is_iops_customized',
+ 'issystem': 'is_system',
+ 'isvolatile': 'is_volatile',
+ 'limitcpuuse': 'limit_cpu_usage',
+ 'memory': 'memory',
+ 'networkrate': 'network_rate',
+ 'offerha': 'offer_ha',
+ 'provisioningtype': 'provisioning_type',
+ 'serviceofferingdetails': 'service_offering_details',
+ 'storagetype': 'storage_type',
+ 'systemvmtype': 'system_vm_type',
+ 'tags': 'storage_tags',
+ }
+
+ def get_service_offering(self):
+ args = {
+ 'name': self.module.params.get('name'),
+ 'domainid': self.get_domain(key='id'),
+ 'issystem': self.module.params.get('is_system'),
+ 'systemvmtype': self.module.params.get('system_vm_type'),
+ }
+ service_offerings = self.query_api('listServiceOfferings', **args)
+ if service_offerings:
+ return service_offerings['serviceoffering'][0]
+
+ def present_service_offering(self):
+ service_offering = self.get_service_offering()
+ if not service_offering:
+ service_offering = self._create_offering(service_offering)
+ else:
+ service_offering = self._update_offering(service_offering)
+
+ return service_offering
+
+ def absent_service_offering(self):
+ service_offering = self.get_service_offering()
+ if service_offering:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ args = {
+ 'id': service_offering['id'],
+ }
+ self.query_api('deleteServiceOffering', **args)
+ return service_offering
+
+ def _create_offering(self, service_offering):
+ self.result['changed'] = True
+
+ system_vm_type = self.module.params.get('system_vm_type')
+ is_system = self.module.params.get('is_system')
+
+ required_params = []
+ if is_system and not system_vm_type:
+ required_params.append('system_vm_type')
+ self.module.fail_on_missing_params(required_params=required_params)
+
+ args = {
+ 'name': self.module.params.get('name'),
+ 'displaytext': self.get_or_fallback('display_text', 'name'),
+ 'bytesreadrate': self.module.params.get('disk_bytes_read_rate'),
+ 'byteswriterate': self.module.params.get('disk_bytes_write_rate'),
+ 'cpunumber': self.module.params.get('cpu_number'),
+ 'cpuspeed': self.module.params.get('cpu_speed'),
+ 'customizediops': self.module.params.get('is_iops_customized'),
+ 'deploymentplanner': self.module.params.get('deployment_planner'),
+ 'domainid': self.get_domain(key='id'),
+ 'hosttags': self.module.params.get('host_tags'),
+ 'hypervisorsnapshotreserve': self.module.params.get('hypervisor_snapshot_reserve'),
+ 'iopsreadrate': self.module.params.get('disk_iops_read_rate'),
+ 'iopswriterate': self.module.params.get('disk_iops_write_rate'),
+ 'maxiops': self.module.params.get('disk_iops_max'),
+ 'miniops': self.module.params.get('disk_iops_min'),
+ 'issystem': is_system,
+ 'isvolatile': self.module.params.get('is_volatile'),
+ 'memory': self.module.params.get('memory'),
+ 'networkrate': self.module.params.get('network_rate'),
+ 'offerha': self.module.params.get('offer_ha'),
+ 'provisioningtype': self.module.params.get('provisioning_type'),
+ 'serviceofferingdetails': self.module.params.get('service_offering_details'),
+ 'storagetype': self.module.params.get('storage_type'),
+ 'systemvmtype': system_vm_type,
+ 'tags': self.module.params.get('storage_tags'),
+ 'limitcpuuse': self.module.params.get('limit_cpu_usage'),
+ 'customized': self.module.params.get('is_customized')
+ }
+ if not self.module.check_mode:
+ res = self.query_api('createServiceOffering', **args)
+ service_offering = res['serviceoffering']
+ return service_offering
+
+ def _update_offering(self, service_offering):
+ args = {
+ 'id': service_offering['id'],
+ 'name': self.module.params.get('name'),
+ 'displaytext': self.get_or_fallback('display_text', 'name'),
+ }
+ if self.has_changed(args, service_offering):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.query_api('updateServiceOffering', **args)
+ service_offering = res['serviceoffering']
+ return service_offering
+
+ def get_result(self, service_offering):
+ super(AnsibleCloudStackServiceOffering, self).get_result(service_offering)
+ if service_offering:
+ if 'hosttags' in service_offering:
+ self.result['host_tags'] = service_offering['hosttags'].split(',') or [service_offering['hosttags']]
+
+ # Prevent confusion, the api returns a tags key for storage tags.
+ if 'tags' in service_offering:
+ self.result['storage_tags'] = service_offering['tags'].split(',') or [service_offering['tags']]
+ if 'tags' in self.result:
+ del self.result['tags']
+
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ display_text=dict(),
+ cpu_number=dict(type='int'),
+ cpu_speed=dict(type='int'),
+ limit_cpu_usage=dict(type='bool'),
+ deployment_planner=dict(),
+ domain=dict(),
+ host_tags=dict(type='list', aliases=['host_tag']),
+ hypervisor_snapshot_reserve=dict(type='int'),
+ disk_bytes_read_rate=dict(type='int', aliases=['bytes_read_rate']),
+ disk_bytes_write_rate=dict(type='int', aliases=['bytes_write_rate']),
+ disk_iops_read_rate=dict(type='int'),
+ disk_iops_write_rate=dict(type='int'),
+ disk_iops_max=dict(type='int'),
+ disk_iops_min=dict(type='int'),
+ is_system=dict(type='bool', default=False),
+ is_volatile=dict(type='bool'),
+ is_iops_customized=dict(type='bool', aliases=['disk_iops_customized']),
+ memory=dict(type='int'),
+ network_rate=dict(type='int'),
+ offer_ha=dict(type='bool'),
+ provisioning_type=dict(choices=['thin', 'sparse', 'fat']),
+ service_offering_details=dict(type='list'),
+ storage_type=dict(choices=['local', 'shared']),
+ system_vm_type=dict(choices=['domainrouter', 'consoleproxy', 'secondarystoragevm']),
+ storage_tags=dict(type='list', aliases=['storage_tag']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ is_customized=dict(type='bool'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ acs_so = AnsibleCloudStackServiceOffering(module)
+
+ state = module.params.get('state')
+ if state == "absent":
+ service_offering = acs_so.absent_service_offering()
+ else:
+ service_offering = acs_so.present_service_offering()
+
+ result = acs_so.get_result(service_offering)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2.py b/test/support/integration/plugins/modules/ec2.py
new file mode 100644
index 0000000000..91503bbf8e
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2.py
@@ -0,0 +1,1766 @@
+#!/usr/bin/python
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2
+short_description: create, terminate, start or stop an instance in ec2
+description:
+ - Creates or terminates ec2 instances.
+ - >
+ Note: This module uses the older boto Python module to interact with the EC2 API.
+ M(ec2) will still receive bug fixes, but no new features.
+ Consider using the M(ec2_instance) module instead.
+ If M(ec2_instance) does not support a feature you need that is available in M(ec2), please
+ file a feature request.
+version_added: "0.9"
+options:
+ key_name:
+ description:
+ - Key pair to use on the instance.
+ - The SSH key must already exist in AWS in order to use this argument.
+ - Keys can be created / deleted using the M(ec2_key) module.
+ aliases: ['keypair']
+ type: str
+ id:
+ version_added: "1.1"
+ description:
+ - Identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances.
+ - This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on.
+ - For details, see the description of client token at U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
+ type: str
+ group:
+ description:
+ - Security group (or list of groups) to use with the instance.
+ aliases: [ 'groups' ]
+ type: list
+ elements: str
+ group_id:
+ version_added: "1.1"
+ description:
+ - Security group id (or list of ids) to use with the instance.
+ type: list
+ elements: str
+ zone:
+ version_added: "1.2"
+ description:
+ - AWS availability zone in which to launch the instance.
+ aliases: [ 'aws_zone', 'ec2_zone' ]
+ type: str
+ instance_type:
+ description:
+ - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
+ - Required when creating a new instance.
+ type: str
+ aliases: ['type']
+ tenancy:
+ version_added: "1.9"
+ description:
+ - An instance with a tenancy of C(dedicated) runs on single-tenant hardware and can only be launched into a VPC.
+ - Note that to use dedicated tenancy you MUST specify a I(vpc_subnet_id) as well.
+ - Dedicated tenancy is not available for EC2 "micro" instances.
+ default: default
+ choices: [ "default", "dedicated" ]
+ type: str
+ spot_price:
+ version_added: "1.5"
+ description:
+ - Maximum spot price to bid. If not set, a regular on-demand instance is requested.
+ - A spot request is made with this maximum bid. When it is filled, the instance is started.
+ type: str
+ spot_type:
+ version_added: "2.0"
+ description:
+ - The type of spot request.
+ - After being interrupted a C(persistent) spot instance will be started once there is capacity to fill the request again.
+ default: "one-time"
+ choices: [ "one-time", "persistent" ]
+ type: str
+ image:
+ description:
+ - I(ami) ID to use for the instance.
+ - Required when I(state=present).
+ type: str
+ kernel:
+ description:
+ - Kernel eki to use for the instance.
+ type: str
+ ramdisk:
+ description:
+ - Ramdisk eri to use for the instance.
+ type: str
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ - Does not wait for SSH, see the 'wait_for_connection' example for details.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ default: 300
+ type: int
+ spot_wait_timeout:
+ version_added: "1.5"
+ description:
+ - How long to wait for the spot instance request to be fulfilled. Affects 'Request valid until' for setting spot request lifespan.
+ default: 600
+ type: int
+ count:
+ description:
+ - Number of instances to launch.
+ default: 1
+ type: int
+ monitoring:
+ version_added: "1.1"
+ description:
+ - Enable detailed monitoring (CloudWatch) for instance.
+ type: bool
+ default: false
+ user_data:
+ version_added: "0.9"
+ description:
+ - Opaque blob of data which is made available to the EC2 instance.
+ type: str
+ instance_tags:
+ version_added: "1.0"
+ description:
+ - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'.
+ type: dict
+ placement_group:
+ version_added: "1.3"
+ description:
+ - Placement group for the instance when using EC2 Clustered Compute.
+ type: str
+ vpc_subnet_id:
+ version_added: "1.1"
+ description:
+ - the subnet ID in which to launch the instance (VPC).
+ type: str
+ assign_public_ip:
+ version_added: "1.5"
+ description:
+ - When provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+.
+ type: bool
+ private_ip:
+ version_added: "1.2"
+ description:
+ - The private ip address to assign the instance (from the vpc subnet).
+ type: str
+ instance_profile_name:
+ version_added: "1.3"
+ description:
+ - Name of the IAM instance profile (i.e. what the EC2 console refers to as an "IAM Role") to use. Boto library must be 2.5.0+.
+ type: str
+ instance_ids:
+ version_added: "1.3"
+ description:
+ - "list of instance ids, currently used for states: absent, running, stopped"
+ aliases: ['instance_id']
+ type: list
+ elements: str
+ source_dest_check:
+ version_added: "1.6"
+ description:
+ - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers).
+ When initially creating an instance the EC2 API defaults this to C(True).
+ type: bool
+ termination_protection:
+ version_added: "2.0"
+ description:
+ - Enable or Disable the Termination Protection.
+ type: bool
+ default: false
+ instance_initiated_shutdown_behavior:
+ version_added: "2.2"
+ description:
+ - Set whether AWS will Stop or Terminate an instance on shutdown. This parameter is ignored when using instance-store.
+ images (which require termination on shutdown).
+ default: 'stop'
+ choices: [ "stop", "terminate" ]
+ type: str
+ state:
+ version_added: "1.3"
+ description:
+ - Create, terminate, start, stop or restart instances. The state 'restarted' was added in Ansible 2.2.
+ - When I(state=absent), I(instance_ids) is required.
+ - When I(state=running), I(state=stopped) or I(state=restarted) then either I(instance_ids) or I(instance_tags) is required.
+ default: 'present'
+ choices: ['absent', 'present', 'restarted', 'running', 'stopped']
+ type: str
+ volumes:
+ version_added: "1.5"
+ description:
+ - A list of hash/dictionaries of volumes to add to the new instance.
+ type: list
+ elements: dict
+ suboptions:
+ device_name:
+ type: str
+ required: true
+ description:
+ - A name for the device (For example C(/dev/sda)).
+ delete_on_termination:
+ type: bool
+ default: false
+ description:
+ - Whether the volume should be automatically deleted when the instance is terminated.
+ ephemeral:
+ type: str
+ description:
+ - Whether the volume should be ephemeral.
+ - Data on ephemeral volumes is lost when the instance is stopped.
+ - Mutually exclusive with the I(snapshot) parameter.
+ encrypted:
+ type: bool
+ default: false
+ description:
+ - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK.
+ snapshot:
+ type: str
+ description:
+ - The ID of an EBS snapshot to copy when creating the volume.
+ - Mutually exclusive with the I(ephemeral) parameter.
+ volume_type:
+ type: str
+ description:
+ - The type of volume to create.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types.
+ volume_size:
+ type: int
+ description:
+ - The size of the volume (in GiB).
+ iops:
+ type: int
+ description:
+ - The number of IOPS per second to provision for the volume.
+ - Required when I(volume_type=io1).
+ ebs_optimized:
+ version_added: "1.6"
+ description:
+ - Whether instance is using optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
+ default: false
+ type: bool
+ exact_count:
+ version_added: "1.5"
+ description:
+ - An integer value which indicates how many instances that match the 'count_tag' parameter should be running.
+ Instances are either created or terminated based on this value.
+ type: int
+ count_tag:
+ version_added: "1.5"
+ description:
+ - Used with I(exact_count) to determine how many nodes based on a specific tag criteria should be running.
+ This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers
+ that are tagged with "class=webserver". The specified tag must already exist or be passed in as the I(instance_tags) option.
+ type: raw
+ network_interfaces:
+ version_added: "2.0"
+ description:
+ - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces,
+ none of the I(assign_public_ip), I(private_ip), I(vpc_subnet_id), I(group), or I(group_id) parameters may be used. (Those parameters are
+ for creating a new network interface at launch.)
+ aliases: ['network_interface']
+ type: list
+ elements: str
+ spot_launch_group:
+ version_added: "2.1"
+ description:
+ - Launch group for spot requests, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group).
+ type: str
+author:
+ - "Tim Gerla (@tgerla)"
+ - "Lester Wade (@lwade)"
+ - "Seth Vidal (@skvidal)"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic provisioning example
+- ec2:
+ key_name: mykey
+ instance_type: t2.micro
+ image: ami-123456
+ wait: yes
+ group: webserver
+ count: 3
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Advanced example with tagging and CloudWatch
+- ec2:
+ key_name: mykey
+ group: databases
+ instance_type: t2.micro
+ image: ami-123456
+ wait: yes
+ wait_timeout: 500
+ count: 5
+ instance_tags:
+ db: postgres
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Single instance with additional IOPS volume from snapshot and volume delete on termination
+- ec2:
+ key_name: mykey
+ group: webserver
+ instance_type: c3.medium
+ image: ami-123456
+ wait: yes
+ wait_timeout: 500
+ volumes:
+ - device_name: /dev/sdb
+ snapshot: snap-abcdef12
+ volume_type: io1
+ iops: 1000
+ volume_size: 100
+ delete_on_termination: true
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Single instance with ssd gp2 root volume
+- ec2:
+ key_name: mykey
+ group: webserver
+ instance_type: c3.medium
+ image: ami-123456
+ wait: yes
+ wait_timeout: 500
+ volumes:
+ - device_name: /dev/xvda
+ volume_type: gp2
+ volume_size: 8
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ count_tag:
+ Name: dbserver
+ exact_count: 1
+
+# Multiple groups example
+- ec2:
+ key_name: mykey
+ group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
+ instance_type: m1.large
+ image: ami-6e649707
+ wait: yes
+ wait_timeout: 500
+ count: 5
+ instance_tags:
+ db: postgres
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Multiple instances with additional volume from snapshot
+- ec2:
+ key_name: mykey
+ group: webserver
+ instance_type: m1.large
+ image: ami-6e649707
+ wait: yes
+ wait_timeout: 500
+ count: 5
+ volumes:
+ - device_name: /dev/sdb
+ snapshot: snap-abcdef12
+ volume_size: 10
+ monitoring: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+# Dedicated tenancy example
+- local_action:
+ module: ec2
+ assign_public_ip: yes
+ group_id: sg-1dc53f72
+ key_name: mykey
+ image: ami-6e649707
+ instance_type: m1.small
+ tenancy: dedicated
+ vpc_subnet_id: subnet-29e63245
+ wait: yes
+
+# Spot instance example
+- ec2:
+ spot_price: 0.24
+ spot_wait_timeout: 600
+ keypair: mykey
+ group_id: sg-1dc53f72
+ instance_type: m1.small
+ image: ami-6e649707
+ wait: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ spot_launch_group: report_generators
+ instance_initiated_shutdown_behavior: terminate
+
+# Examples using pre-existing network interfaces
+- ec2:
+ key_name: mykey
+ instance_type: t2.small
+ image: ami-f005ba11
+ network_interface: eni-deadbeef
+
+- ec2:
+ key_name: mykey
+ instance_type: t2.small
+ image: ami-f005ba11
+ network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e']
+
+# Launch instances, runs some tasks
+# and then terminate them
+
+- name: Create a sandbox instance
+ hosts: localhost
+ gather_facts: False
+ vars:
+ keypair: my_keypair
+ instance_type: m1.small
+ security_group: my_securitygroup
+ image: my_ami_id
+ region: us-east-1
+ tasks:
+ - name: Launch instance
+ ec2:
+ key_name: "{{ keypair }}"
+ group: "{{ security_group }}"
+ instance_type: "{{ instance_type }}"
+ image: "{{ image }}"
+ wait: true
+ region: "{{ region }}"
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ register: ec2
+
+ - name: Add new instance to host group
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: launched
+ loop: "{{ ec2.instances }}"
+
+ - name: Wait for SSH to come up
+ delegate_to: "{{ item.public_dns_name }}"
+ wait_for_connection:
+ delay: 60
+ timeout: 320
+ loop: "{{ ec2.instances }}"
+
+- name: Configure instance(s)
+ hosts: launched
+ become: True
+ gather_facts: True
+ roles:
+ - my_awesome_role
+ - my_awesome_test
+
+- name: Terminate instances
+ hosts: localhost
+ tasks:
+ - name: Terminate instances that were previously launched
+ ec2:
+ state: 'absent'
+ instance_ids: '{{ ec2.instance_ids }}'
+
+# Start a few existing instances, run some tasks
+# and stop the instances
+
+- name: Start sandbox instances
+ hosts: localhost
+ gather_facts: false
+ vars:
+ instance_ids:
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ region: us-east-1
+ tasks:
+ - name: Start the sandbox instances
+ ec2:
+ instance_ids: '{{ instance_ids }}'
+ region: '{{ region }}'
+ state: running
+ wait: True
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+ roles:
+ - do_neat_stuff
+ - do_more_neat_stuff
+
+- name: Stop sandbox instances
+ hosts: localhost
+ gather_facts: false
+ vars:
+ instance_ids:
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ - 'i-xxxxxx'
+ region: us-east-1
+ tasks:
+ - name: Stop the sandbox instances
+ ec2:
+ instance_ids: '{{ instance_ids }}'
+ region: '{{ region }}'
+ state: stopped
+ wait: True
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+#
+# Start stopped instances specified by tag
+#
+- local_action:
+ module: ec2
+ instance_tags:
+ Name: ExtraPower
+ state: running
+
+#
+# Restart instances specified by tag
+#
+- local_action:
+ module: ec2
+ instance_tags:
+ Name: ExtraPower
+ state: restarted
+
+#
+# Enforce that 5 instances with a tag "foo" are running
+# (Highly recommended!)
+#
+
+- ec2:
+ key_name: mykey
+ instance_type: c1.medium
+ image: ami-40603AD1
+ wait: yes
+ group: webserver
+ instance_tags:
+ foo: bar
+ exact_count: 5
+ count_tag: foo
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+#
+# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
+#
+
+- ec2:
+ key_name: mykey
+ instance_type: c1.medium
+ image: ami-40603AD1
+ wait: yes
+ group: webserver
+ instance_tags:
+ Name: database
+ dbtype: postgres
+ exact_count: 5
+ count_tag:
+ Name: database
+ dbtype: postgres
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
+#
+# count_tag complex argument examples
+#
+
+ # instances with tag foo
+- ec2:
+ count_tag:
+ foo:
+
+ # instances with tag foo=bar
+- ec2:
+ count_tag:
+ foo: bar
+
+ # instances with tags foo=bar & baz
+- ec2:
+ count_tag:
+ foo: bar
+ baz:
+
+ # instances with tags foo & bar & baz=bang
+- ec2:
+ count_tag:
+ - foo
+ - bar
+ - baz: bang
+
+'''
+
+import time
+import datetime
+import traceback
+from ast import literal_eval
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, ec2_connect
+from ansible.module_utils.six import get_function_code, string_types
+from ansible.module_utils._text import to_bytes, to_text
+
+try:
+ import boto.ec2
+ from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
+ from boto.exception import EC2ResponseError
+ from boto import connect_ec2_endpoint
+ from boto import connect_vpc
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone=None):
+
+ # get reservations for instances that match tag(s) and are in the desired state
+ state = module.params.get('state')
+ if state not in ['running', 'stopped']:
+ state = None
+ reservations = get_reservations(module, ec2, vpc, tags=count_tag, state=state, zone=zone)
+
+ instances = []
+ for res in reservations:
+ if hasattr(res, 'instances'):
+ for inst in res.instances:
+ if inst.state == 'terminated' or inst.state == 'shutting-down':
+ continue
+ instances.append(inst)
+
+ return reservations, instances
+
+
+def _set_none_to_blank(dictionary):
+ result = dictionary
+ for k in result:
+ if isinstance(result[k], dict):
+ result[k] = _set_none_to_blank(result[k])
+ elif not result[k]:
+ result[k] = ""
+ return result
+
+
+def get_reservations(module, ec2, vpc, tags=None, state=None, zone=None):
+ # TODO: filters do not work with tags that have underscores
+ filters = dict()
+
+ vpc_subnet_id = module.params.get('vpc_subnet_id')
+ vpc_id = None
+ if vpc_subnet_id:
+ filters.update({"subnet-id": vpc_subnet_id})
+ if vpc:
+ vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
+
+ if vpc_id:
+ filters.update({"vpc-id": vpc_id})
+
+ if tags is not None:
+
+ if isinstance(tags, str):
+ try:
+ tags = literal_eval(tags)
+ except Exception:
+ pass
+
+ # if not a string type, convert and make sure it's a text string
+ if isinstance(tags, int):
+ tags = to_text(tags)
+
+ # if string, we only care that a tag of that name exists
+ if isinstance(tags, str):
+ filters.update({"tag-key": tags})
+
+ # if list, append each item to filters
+ if isinstance(tags, list):
+ for x in tags:
+ if isinstance(x, dict):
+ x = _set_none_to_blank(x)
+ filters.update(dict(("tag:" + tn, tv) for (tn, tv) in x.items()))
+ else:
+ filters.update({"tag-key": x})
+
+ # if dict, add the key and value to the filter
+ if isinstance(tags, dict):
+ tags = _set_none_to_blank(tags)
+ filters.update(dict(("tag:" + tn, tv) for (tn, tv) in tags.items()))
+
+ # lets check to see if the filters dict is empty, if so then stop
+ if not filters:
+ module.fail_json(msg="Filters based on tag is empty => tags: %s" % (tags))
+
+ if state:
+ # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
+ filters.update({'instance-state-name': state})
+
+ if zone:
+ filters.update({'availability-zone': zone})
+
+ if module.params.get('id'):
+ filters['client-token'] = module.params['id']
+
+ results = ec2.get_all_instances(filters=filters)
+
+ return results
+
+
+def get_instance_info(inst):
+ """
+ Retrieves instance information from an instance
+ ID and returns it as a dictionary
+ """
+ instance_info = {'id': inst.id,
+ 'ami_launch_index': inst.ami_launch_index,
+ 'private_ip': inst.private_ip_address,
+ 'private_dns_name': inst.private_dns_name,
+ 'public_ip': inst.ip_address,
+ 'dns_name': inst.dns_name,
+ 'public_dns_name': inst.public_dns_name,
+ 'state_code': inst.state_code,
+ 'architecture': inst.architecture,
+ 'image_id': inst.image_id,
+ 'key_name': inst.key_name,
+ 'placement': inst.placement,
+ 'region': inst.placement[:-1],
+ 'kernel': inst.kernel,
+ 'ramdisk': inst.ramdisk,
+ 'launch_time': inst.launch_time,
+ 'instance_type': inst.instance_type,
+ 'root_device_type': inst.root_device_type,
+ 'root_device_name': inst.root_device_name,
+ 'state': inst.state,
+ 'hypervisor': inst.hypervisor,
+ 'tags': inst.tags,
+ 'groups': dict((group.id, group.name) for group in inst.groups),
+ }
+ try:
+ instance_info['virtualization_type'] = getattr(inst, 'virtualization_type')
+ except AttributeError:
+ instance_info['virtualization_type'] = None
+
+ try:
+ instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
+ except AttributeError:
+ instance_info['ebs_optimized'] = False
+
+ try:
+ bdm_dict = {}
+ bdm = getattr(inst, 'block_device_mapping')
+ for device_name in bdm.keys():
+ bdm_dict[device_name] = {
+ 'status': bdm[device_name].status,
+ 'volume_id': bdm[device_name].volume_id,
+ 'delete_on_termination': bdm[device_name].delete_on_termination
+ }
+ instance_info['block_device_mapping'] = bdm_dict
+ except AttributeError:
+ instance_info['block_device_mapping'] = False
+
+ try:
+ instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
+ except AttributeError:
+ instance_info['tenancy'] = 'default'
+
+ return instance_info
+
+
+def boto_supports_associate_public_ip_address(ec2):
+ """
+ Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
+ class. Added in Boto 2.13.0
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if Boto library accepts associate_public_ip_address argument, else false
+ """
+
+ try:
+ network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
+ getattr(network_interface, "associate_public_ip_address")
+ return True
+ except AttributeError:
+ return False
+
+
+def boto_supports_profile_name_arg(ec2):
+ """
+ Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if Boto library accept instance_profile_name argument, else false
+ """
+ run_instances_method = getattr(ec2, 'run_instances')
+ return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames
+
+
+def boto_supports_volume_encryption():
+ """
+ Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
+
+ Returns:
+ True if boto library has the named param as an argument on the request_spot_instances method, else False
+ """
+ return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
+
+
+def create_block_device(module, ec2, volume):
+ # Not aware of a way to determine this programatically
+ # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
+ MAX_IOPS_TO_SIZE_RATIO = 30
+
+ volume_type = volume.get('volume_type')
+
+ if 'snapshot' not in volume and 'ephemeral' not in volume:
+ if 'volume_size' not in volume:
+ module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
+ if 'snapshot' in volume:
+ if volume_type == 'io1' and 'iops' not in volume:
+ module.fail_json(msg='io1 volumes must have an iops value set')
+ if 'iops' in volume:
+ snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
+ size = volume.get('volume_size', snapshot.volume_size)
+ if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
+ module.fail_json(msg='IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
+ if 'ephemeral' in volume:
+ if 'snapshot' in volume:
+ module.fail_json(msg='Cannot set both ephemeral and snapshot')
+ if boto_supports_volume_encryption():
+ return BlockDeviceType(snapshot_id=volume.get('snapshot'),
+ ephemeral_name=volume.get('ephemeral'),
+ size=volume.get('volume_size'),
+ volume_type=volume_type,
+ delete_on_termination=volume.get('delete_on_termination', False),
+ iops=volume.get('iops'),
+ encrypted=volume.get('encrypted', None))
+ else:
+ return BlockDeviceType(snapshot_id=volume.get('snapshot'),
+ ephemeral_name=volume.get('ephemeral'),
+ size=volume.get('volume_size'),
+ volume_type=volume_type,
+ delete_on_termination=volume.get('delete_on_termination', False),
+ iops=volume.get('iops'))
+
+
+def boto_supports_param_in_spot_request(ec2, param):
+ """
+ Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if boto library has the named param as an argument on the request_spot_instances method, else False
+ """
+ method = getattr(ec2, 'request_spot_instances')
+ return param in get_function_code(method).co_varnames
+
+
+def await_spot_requests(module, ec2, spot_requests, count):
+ """
+ Wait for a group of spot requests to be fulfilled, or fail.
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances
+ count: Total number of instances to be created by the spot requests
+
+ Returns:
+ list of instance ID's created by the spot request(s)
+ """
+ spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
+ wait_complete = time.time() + spot_wait_timeout
+
+ spot_req_inst_ids = dict()
+ while time.time() < wait_complete:
+ reqs = ec2.get_all_spot_instance_requests()
+ for sirb in spot_requests:
+ if sirb.id in spot_req_inst_ids:
+ continue
+ for sir in reqs:
+ if sir.id != sirb.id:
+ continue # this is not our spot instance
+ if sir.instance_id is not None:
+ spot_req_inst_ids[sirb.id] = sir.instance_id
+ elif sir.state == 'open':
+ continue # still waiting, nothing to do here
+ elif sir.state == 'active':
+ continue # Instance is created already, nothing to do here
+ elif sir.state == 'failed':
+ module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
+ sir.id, sir.status.code, sir.fault.code, sir.fault.message))
+ elif sir.state == 'cancelled':
+ module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id)
+ elif sir.state == 'closed':
+ # instance is terminating or marked for termination
+ # this may be intentional on the part of the operator,
+ # or it may have been terminated by AWS due to capacity,
+ # price, or group constraints in this case, we'll fail
+ # the module if the reason for the state is anything
+ # other than termination by user. Codes are documented at
+ # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html
+ if sir.status.code == 'instance-terminated-by-user':
+ # do nothing, since the user likely did this on purpose
+ pass
+ else:
+ spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s"
+ module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message))
+
+ if len(spot_req_inst_ids) < count:
+ time.sleep(5)
+ else:
+ return list(spot_req_inst_ids.values())
+ module.fail_json(msg="wait for spot requests timeout on %s" % time.asctime())
+
+
+def enforce_count(module, ec2, vpc):
+
+ exact_count = module.params.get('exact_count')
+ count_tag = module.params.get('count_tag')
+ zone = module.params.get('zone')
+
+ # fail here if the exact count was specified without filtering
+ # on a tag, as this may lead to a undesired removal of instances
+ if exact_count and count_tag is None:
+ module.fail_json(msg="you must use the 'count_tag' option with exact_count")
+
+ reservations, instances = find_running_instances_by_count_tag(module, ec2, vpc, count_tag, zone)
+
+ changed = None
+ checkmode = False
+ instance_dict_array = []
+ changed_instance_ids = None
+
+ if len(instances) == exact_count:
+ changed = False
+ elif len(instances) < exact_count:
+ changed = True
+ to_create = exact_count - len(instances)
+ if not checkmode:
+ (instance_dict_array, changed_instance_ids, changed) \
+ = create_instances(module, ec2, vpc, override_count=to_create)
+
+ for inst in instance_dict_array:
+ instances.append(inst)
+ elif len(instances) > exact_count:
+ changed = True
+ to_remove = len(instances) - exact_count
+ if not checkmode:
+ all_instance_ids = sorted([x.id for x in instances])
+ remove_ids = all_instance_ids[0:to_remove]
+
+ instances = [x for x in instances if x.id not in remove_ids]
+
+ (changed, instance_dict_array, changed_instance_ids) \
+ = terminate_instances(module, ec2, remove_ids)
+ terminated_list = []
+ for inst in instance_dict_array:
+ inst['state'] = "terminated"
+ terminated_list.append(inst)
+ instance_dict_array = terminated_list
+
+ # ensure all instances are dictionaries
+ all_instances = []
+ for inst in instances:
+
+ if not isinstance(inst, dict):
+ warn_if_public_ip_assignment_changed(module, inst)
+ inst = get_instance_info(inst)
+ all_instances.append(inst)
+
+ return (all_instances, instance_dict_array, changed_instance_ids, changed)
+
+
+def create_instances(module, ec2, vpc, override_count=None):
+ """
+ Creates new instances
+
+ module : AnsibleModule object
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ A list of dictionaries with instance information
+ about the instances that were launched
+ """
+
+ key_name = module.params.get('key_name')
+ id = module.params.get('id')
+ group_name = module.params.get('group')
+ group_id = module.params.get('group_id')
+ zone = module.params.get('zone')
+ instance_type = module.params.get('instance_type')
+ tenancy = module.params.get('tenancy')
+ spot_price = module.params.get('spot_price')
+ spot_type = module.params.get('spot_type')
+ image = module.params.get('image')
+ if override_count:
+ count = override_count
+ else:
+ count = module.params.get('count')
+ monitoring = module.params.get('monitoring')
+ kernel = module.params.get('kernel')
+ ramdisk = module.params.get('ramdisk')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
+ placement_group = module.params.get('placement_group')
+ user_data = module.params.get('user_data')
+ instance_tags = module.params.get('instance_tags')
+ vpc_subnet_id = module.params.get('vpc_subnet_id')
+ assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
+ private_ip = module.params.get('private_ip')
+ instance_profile_name = module.params.get('instance_profile_name')
+ volumes = module.params.get('volumes')
+ ebs_optimized = module.params.get('ebs_optimized')
+ exact_count = module.params.get('exact_count')
+ count_tag = module.params.get('count_tag')
+ source_dest_check = module.boolean(module.params.get('source_dest_check'))
+ termination_protection = module.boolean(module.params.get('termination_protection'))
+ network_interfaces = module.params.get('network_interfaces')
+ spot_launch_group = module.params.get('spot_launch_group')
+ instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior')
+
+ vpc_id = None
+ if vpc_subnet_id:
+ if not vpc:
+ module.fail_json(msg="region must be specified")
+ else:
+ vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
+ else:
+ vpc_id = None
+
+ try:
+ # Here we try to lookup the group id from the security group name - if group is set.
+ if group_name:
+ if vpc_id:
+ grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
+ else:
+ grp_details = ec2.get_all_security_groups()
+ if isinstance(group_name, string_types):
+ group_name = [group_name]
+ unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
+ if len(unmatched) > 0:
+ module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
+ group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
+ # Now we try to lookup the group id testing if group exists.
+ elif group_id:
+ # wrap the group_id in a list if it's not one already
+ if isinstance(group_id, string_types):
+ group_id = [group_id]
+ grp_details = ec2.get_all_security_groups(group_ids=group_id)
+ group_name = [grp_item.name for grp_item in grp_details]
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ # Lookup any instances that much our run id.
+
+ running_instances = []
+ count_remaining = int(count)
+
+ if id is not None:
+ filter_dict = {'client-token': id, 'instance-state-name': 'running'}
+ previous_reservations = ec2.get_all_instances(None, filter_dict)
+ for res in previous_reservations:
+ for prev_instance in res.instances:
+ running_instances.append(prev_instance)
+ count_remaining = count_remaining - len(running_instances)
+
+ # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
+
+ if count_remaining == 0:
+ changed = False
+ else:
+ changed = True
+ try:
+ params = {'image_id': image,
+ 'key_name': key_name,
+ 'monitoring_enabled': monitoring,
+ 'placement': zone,
+ 'instance_type': instance_type,
+ 'kernel_id': kernel,
+ 'ramdisk_id': ramdisk}
+ if user_data is not None:
+ params['user_data'] = to_bytes(user_data, errors='surrogate_or_strict')
+
+ if ebs_optimized:
+ params['ebs_optimized'] = ebs_optimized
+
+ # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
+ if not spot_price:
+ params['tenancy'] = tenancy
+
+ if boto_supports_profile_name_arg(ec2):
+ params['instance_profile_name'] = instance_profile_name
+ else:
+ if instance_profile_name is not None:
+ module.fail_json(
+ msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
+
+ if assign_public_ip is not None:
+ if not boto_supports_associate_public_ip_address(ec2):
+ module.fail_json(
+ msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
+ elif not vpc_subnet_id:
+ module.fail_json(
+ msg="assign_public_ip only available with vpc_subnet_id")
+
+ else:
+ if private_ip:
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ subnet_id=vpc_subnet_id,
+ private_ip_address=private_ip,
+ groups=group_id,
+ associate_public_ip_address=assign_public_ip)
+ else:
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ subnet_id=vpc_subnet_id,
+ groups=group_id,
+ associate_public_ip_address=assign_public_ip)
+ interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
+ params['network_interfaces'] = interfaces
+ else:
+ if network_interfaces:
+ if isinstance(network_interfaces, string_types):
+ network_interfaces = [network_interfaces]
+ interfaces = []
+ for i, network_interface_id in enumerate(network_interfaces):
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ network_interface_id=network_interface_id,
+ device_index=i)
+ interfaces.append(interface)
+ params['network_interfaces'] = \
+ boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
+ else:
+ params['subnet_id'] = vpc_subnet_id
+ if vpc_subnet_id:
+ params['security_group_ids'] = group_id
+ else:
+ params['security_groups'] = group_name
+
+ if volumes:
+ bdm = BlockDeviceMapping()
+ for volume in volumes:
+ if 'device_name' not in volume:
+ module.fail_json(msg='Device name must be set for volume')
+ # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0
+ # to be a signal not to create this volume
+ if 'volume_size' not in volume or int(volume['volume_size']) > 0:
+ bdm[volume['device_name']] = create_block_device(module, ec2, volume)
+
+ params['block_device_map'] = bdm
+
+ # check to see if we're using spot pricing first before starting instances
+ if not spot_price:
+ if assign_public_ip is not None and private_ip:
+ params.update(
+ dict(
+ min_count=count_remaining,
+ max_count=count_remaining,
+ client_token=id,
+ placement_group=placement_group,
+ )
+ )
+ else:
+ params.update(
+ dict(
+ min_count=count_remaining,
+ max_count=count_remaining,
+ client_token=id,
+ placement_group=placement_group,
+ private_ip_address=private_ip,
+ )
+ )
+
+ # For ordinary (not spot) instances, we can select 'stop'
+ # (the default) or 'terminate' here.
+ params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
+
+ try:
+ res = ec2.run_instances(**params)
+ except boto.exception.EC2ResponseError as e:
+ if (params['instance_initiated_shutdown_behavior'] != 'terminate' and
+ "InvalidParameterCombination" == e.error_code):
+ params['instance_initiated_shutdown_behavior'] = 'terminate'
+ res = ec2.run_instances(**params)
+ else:
+ raise
+
+ instids = [i.id for i in res.instances]
+ while True:
+ try:
+ ec2.get_all_instances(instids)
+ break
+ except boto.exception.EC2ResponseError as e:
+ if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
+ # there's a race between start and get an instance
+ continue
+ else:
+ module.fail_json(msg=str(e))
+
+ # The instances returned through ec2.run_instances above can be in
+ # terminated state due to idempotency. See commit 7f11c3d for a complete
+ # explanation.
+ terminated_instances = [
+ str(instance.id) for instance in res.instances if instance.state == 'terminated'
+ ]
+ if terminated_instances:
+ module.fail_json(msg="Instances with id(s) %s " % terminated_instances +
+ "were created previously but have since been terminated - " +
+ "use a (possibly different) 'instanceid' parameter")
+
+ else:
+ if private_ip:
+ module.fail_json(
+ msg='private_ip only available with on-demand (non-spot) instances')
+ if boto_supports_param_in_spot_request(ec2, 'placement_group'):
+ params['placement_group'] = placement_group
+ elif placement_group:
+ module.fail_json(
+ msg="placement_group parameter requires Boto version 2.3.0 or higher.")
+
+ # You can't tell spot instances to 'stop'; they will always be
+ # 'terminate'd. For convenience, we'll ignore the latter value.
+ if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate':
+ module.fail_json(
+ msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.")
+
+ if spot_launch_group and isinstance(spot_launch_group, string_types):
+ params['launch_group'] = spot_launch_group
+
+ params.update(dict(
+ count=count_remaining,
+ type=spot_type,
+ ))
+
+ # Set spot ValidUntil
+ # ValidUntil -> (timestamp). The end date of the request, in
+ # UTC format (for example, YYYY -MM -DD T*HH* :MM :SS Z).
+ utc_valid_until = (
+ datetime.datetime.utcnow()
+ + datetime.timedelta(seconds=spot_wait_timeout))
+ params['valid_until'] = utc_valid_until.strftime('%Y-%m-%dT%H:%M:%S.000Z')
+
+ res = ec2.request_spot_instances(spot_price, **params)
+
+ # Now we have to do the intermediate waiting
+ if wait:
+ instids = await_spot_requests(module, ec2, res, count)
+ else:
+ instids = []
+ except boto.exception.BotoServerError as e:
+ module.fail_json(msg="Instance creation failed => %s: %s" % (e.error_code, e.error_message))
+
+ # wait here until the instances are up
+ num_running = 0
+ wait_timeout = time.time() + wait_timeout
+ res_list = ()
+ while wait_timeout > time.time() and num_running < len(instids):
+ try:
+ res_list = ec2.get_all_instances(instids)
+ except boto.exception.BotoServerError as e:
+ if e.error_code == 'InvalidInstanceID.NotFound':
+ time.sleep(1)
+ continue
+ else:
+ raise
+
+ num_running = 0
+ for res in res_list:
+ num_running += len([i for i in res.instances if i.state == 'running'])
+ if len(res_list) <= 0:
+ # got a bad response of some sort, possibly due to
+ # stale/cached data. Wait a second and then try again
+ time.sleep(1)
+ continue
+ if wait and num_running < len(instids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
+
+ # We do this after the loop ends so that we end up with one list
+ for res in res_list:
+ running_instances.extend(res.instances)
+
+ # Enabled by default by AWS
+ if source_dest_check is False:
+ for inst in res.instances:
+ inst.modify_attribute('sourceDestCheck', False)
+
+ # Disabled by default by AWS
+ if termination_protection is True:
+ for inst in res.instances:
+ inst.modify_attribute('disableApiTermination', True)
+
+ # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
+ if instance_tags and instids:
+ try:
+ ec2.create_tags(instids, instance_tags)
+ except boto.exception.EC2ResponseError as e:
+ module.fail_json(msg="Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
+
+ instance_dict_array = []
+ created_instance_ids = []
+ for inst in running_instances:
+ inst.update()
+ d = get_instance_info(inst)
+ created_instance_ids.append(inst.id)
+ instance_dict_array.append(d)
+
+ return (instance_dict_array, created_instance_ids, changed)
+
+
+def terminate_instances(module, ec2, instance_ids):
+ """
+ Terminates a list of instances
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ termination_list: a list of instances to terminate in the form of
+ [ {id: <inst-id>}, ..]
+
+ Returns a dictionary of instance information
+ about the instances terminated.
+
+ If the instance to be terminated is running
+ "changed" will be set to False.
+
+ """
+
+ # Whether to wait for termination to complete before returning
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ changed = False
+ instance_dict_array = []
+
+ if not isinstance(instance_ids, list) or len(instance_ids) < 1:
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ terminated_instance_ids = []
+ for res in ec2.get_all_instances(instance_ids):
+ for inst in res.instances:
+ if inst.state == 'running' or inst.state == 'stopped':
+ terminated_instance_ids.append(inst.id)
+ instance_dict_array.append(get_instance_info(inst))
+ try:
+ ec2.terminate_instances([inst.id])
+ except EC2ResponseError as e:
+ module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
+ changed = True
+
+ # wait here until the instances are 'terminated'
+ if wait:
+ num_terminated = 0
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
+ response = ec2.get_all_instances(instance_ids=terminated_instance_ids,
+ filters={'instance-state-name': 'terminated'})
+ try:
+ num_terminated = sum([len(res.instances) for res in response])
+ except Exception as e:
+ # got a bad response of some sort, possibly due to
+ # stale/cached data. Wait a second and then try again
+ time.sleep(1)
+ continue
+
+ if num_terminated < len(terminated_instance_ids):
+ time.sleep(5)
+
+ # waiting took too long
+ if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
+ module.fail_json(msg="wait for instance termination timeout on %s" % time.asctime())
+ # Lets get the current state of the instances after terminating - issue600
+ instance_dict_array = []
+ for res in ec2.get_all_instances(instance_ids=terminated_instance_ids, filters={'instance-state-name': 'terminated'}):
+ for inst in res.instances:
+ instance_dict_array.append(get_instance_info(inst))
+
+ return (changed, instance_dict_array, terminated_instance_ids)
+
+
+def startstop_instances(module, ec2, instance_ids, state, instance_tags):
+ """
+ Starts or stops a list of existing instances
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ instance_ids: The list of instances to start in the form of
+ [ {id: <inst-id>}, ..]
+ instance_tags: A dict of tag keys and values in the form of
+ {key: value, ... }
+ state: Intended state ("running" or "stopped")
+
+ Returns a dictionary of instance information
+ about the instances started/stopped.
+
+ If the instance was not able to change state,
+ "changed" will be set to False.
+
+ Note that if instance_ids and instance_tags are both non-empty,
+ this method will process the intersection of the two
+ """
+
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ group_id = module.params.get('group_id')
+ group_name = module.params.get('group')
+ changed = False
+ instance_dict_array = []
+
+ if not isinstance(instance_ids, list) or len(instance_ids) < 1:
+ # Fail unless the user defined instance tags
+ if not instance_tags:
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ # To make an EC2 tag filter, we need to prepend 'tag:' to each key.
+ # An empty filter does no filtering, so it's safe to pass it to the
+ # get_all_instances method even if the user did not specify instance_tags
+ filters = {}
+ if instance_tags:
+ for key, value in instance_tags.items():
+ filters["tag:" + key] = value
+
+ if module.params.get('id'):
+ filters['client-token'] = module.params['id']
+ # Check that our instances are not in the state we want to take
+
+ # Check (and eventually change) instances attributes and instances state
+ existing_instances_array = []
+ for res in ec2.get_all_instances(instance_ids, filters=filters):
+ for inst in res.instances:
+
+ warn_if_public_ip_assignment_changed(module, inst)
+
+ changed = (check_source_dest_attr(module, inst, ec2) or
+ check_termination_protection(module, inst) or changed)
+
+ # Check security groups and if we're using ec2-vpc; ec2-classic security groups may not be modified
+ if inst.vpc_id and group_name:
+ grp_details = ec2.get_all_security_groups(filters={'vpc_id': inst.vpc_id})
+ if isinstance(group_name, string_types):
+ group_name = [group_name]
+ unmatched = set(group_name) - set(to_text(grp.name) for grp in grp_details)
+ if unmatched:
+ module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
+ group_ids = [to_text(grp.id) for grp in grp_details if to_text(grp.name) in group_name]
+ elif inst.vpc_id and group_id:
+ if isinstance(group_id, string_types):
+ group_id = [group_id]
+ grp_details = ec2.get_all_security_groups(group_ids=group_id)
+ group_ids = [grp_item.id for grp_item in grp_details]
+ if inst.vpc_id and (group_name or group_id):
+ if set(sg.id for sg in inst.groups) != set(group_ids):
+ changed = inst.modify_attribute('groupSet', group_ids)
+
+ # Check instance state
+ if inst.state != state:
+ instance_dict_array.append(get_instance_info(inst))
+ try:
+ if state == 'running':
+ inst.start()
+ else:
+ inst.stop()
+ except EC2ResponseError as e:
+ module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
+ changed = True
+ existing_instances_array.append(inst.id)
+
+ instance_ids = list(set(existing_instances_array + (instance_ids or [])))
+ # Wait for all the instances to finish starting or stopping
+ wait_timeout = time.time() + wait_timeout
+ while wait and wait_timeout > time.time():
+ instance_dict_array = []
+ matched_instances = []
+ for res in ec2.get_all_instances(instance_ids):
+ for i in res.instances:
+ if i.state == state:
+ instance_dict_array.append(get_instance_info(i))
+ matched_instances.append(i)
+ if len(matched_instances) < len(instance_ids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for instances running timeout on %s" % time.asctime())
+
+ return (changed, instance_dict_array, instance_ids)
+
+
+def restart_instances(module, ec2, instance_ids, state, instance_tags):
+ """
+ Restarts a list of existing instances
+
+ module: Ansible module object
+ ec2: authenticated ec2 connection object
+ instance_ids: The list of instances to start in the form of
+ [ {id: <inst-id>}, ..]
+ instance_tags: A dict of tag keys and values in the form of
+ {key: value, ... }
+ state: Intended state ("restarted")
+
+ Returns a dictionary of instance information
+ about the instances.
+
+ If the instance was not able to change state,
+ "changed" will be set to False.
+
+ Wait will not apply here as this is a OS level operation.
+
+ Note that if instance_ids and instance_tags are both non-empty,
+ this method will process the intersection of the two.
+ """
+
+ changed = False
+ instance_dict_array = []
+
+ if not isinstance(instance_ids, list) or len(instance_ids) < 1:
+ # Fail unless the user defined instance tags
+ if not instance_tags:
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ # To make an EC2 tag filter, we need to prepend 'tag:' to each key.
+ # An empty filter does no filtering, so it's safe to pass it to the
+ # get_all_instances method even if the user did not specify instance_tags
+ filters = {}
+ if instance_tags:
+ for key, value in instance_tags.items():
+ filters["tag:" + key] = value
+ if module.params.get('id'):
+ filters['client-token'] = module.params['id']
+
+ # Check that our instances are not in the state we want to take
+
+ # Check (and eventually change) instances attributes and instances state
+ for res in ec2.get_all_instances(instance_ids, filters=filters):
+ for inst in res.instances:
+
+ warn_if_public_ip_assignment_changed(module, inst)
+
+ changed = (check_source_dest_attr(module, inst, ec2) or
+ check_termination_protection(module, inst) or changed)
+
+ # Check instance state
+ if inst.state != state:
+ instance_dict_array.append(get_instance_info(inst))
+ try:
+ inst.reboot()
+ except EC2ResponseError as e:
+ module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
+ changed = True
+
+ return (changed, instance_dict_array, instance_ids)
+
+
+def check_termination_protection(module, inst):
+ """
+ Check the instance disableApiTermination attribute.
+
+ module: Ansible module object
+ inst: EC2 instance object
+
+ returns: True if state changed None otherwise
+ """
+
+ termination_protection = module.params.get('termination_protection')
+
+ if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None):
+ inst.modify_attribute('disableApiTermination', termination_protection)
+ return True
+
+
+def check_source_dest_attr(module, inst, ec2):
+ """
+ Check the instance sourceDestCheck attribute.
+
+ module: Ansible module object
+ inst: EC2 instance object
+
+ returns: True if state changed None otherwise
+ """
+
+ source_dest_check = module.params.get('source_dest_check')
+
+ if source_dest_check is not None:
+ try:
+ if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
+ inst.modify_attribute('sourceDestCheck', source_dest_check)
+ return True
+ except boto.exception.EC2ResponseError as exc:
+ # instances with more than one Elastic Network Interface will
+ # fail, because they have the sourceDestCheck attribute defined
+ # per-interface
+ if exc.code == 'InvalidInstanceID':
+ for interface in inst.interfaces:
+ if interface.source_dest_check != source_dest_check:
+ ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
+ return True
+ else:
+ module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
+ exception=traceback.format_exc())
+
+
+def warn_if_public_ip_assignment_changed(module, instance):
+ # This is a non-modifiable attribute.
+ assign_public_ip = module.params.get('assign_public_ip')
+
+ # Check that public ip assignment is the same and warn if not
+ public_dns_name = getattr(instance, 'public_dns_name', None)
+ if (assign_public_ip or public_dns_name) and (not public_dns_name or assign_public_ip is False):
+ module.warn("Unable to modify public ip assignment to {0} for instance {1}. "
+ "Whether or not to assign a public IP is determined during instance creation.".format(assign_public_ip, instance.id))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ key_name=dict(aliases=['keypair']),
+ id=dict(),
+ group=dict(type='list', aliases=['groups']),
+ group_id=dict(type='list'),
+ zone=dict(aliases=['aws_zone', 'ec2_zone']),
+ instance_type=dict(aliases=['type']),
+ spot_price=dict(),
+ spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
+ spot_launch_group=dict(),
+ image=dict(),
+ kernel=dict(),
+ count=dict(type='int', default='1'),
+ monitoring=dict(type='bool', default=False),
+ ramdisk=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ spot_wait_timeout=dict(type='int', default=600),
+ placement_group=dict(),
+ user_data=dict(),
+ instance_tags=dict(type='dict'),
+ vpc_subnet_id=dict(),
+ assign_public_ip=dict(type='bool'),
+ private_ip=dict(),
+ instance_profile_name=dict(),
+ instance_ids=dict(type='list', aliases=['instance_id']),
+ source_dest_check=dict(type='bool', default=None),
+ termination_protection=dict(type='bool', default=None),
+ state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
+ instance_initiated_shutdown_behavior=dict(default='stop', choices=['stop', 'terminate']),
+ exact_count=dict(type='int', default=None),
+ count_tag=dict(type='raw'),
+ volumes=dict(type='list'),
+ ebs_optimized=dict(type='bool', default=False),
+ tenancy=dict(default='default', choices=['default', 'dedicated']),
+ network_interfaces=dict(type='list', aliases=['network_interface'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ # Can be uncommented when we finish the deprecation cycle.
+ # ['group', 'group_id'],
+ ['exact_count', 'count'],
+ ['exact_count', 'state'],
+ ['exact_count', 'instance_ids'],
+ ['network_interfaces', 'assign_public_ip'],
+ ['network_interfaces', 'group'],
+ ['network_interfaces', 'group_id'],
+ ['network_interfaces', 'private_ip'],
+ ['network_interfaces', 'vpc_subnet_id'],
+ ],
+ )
+
+ if module.params.get('group') and module.params.get('group_id'):
+ module.deprecate(
+ msg='Support for passing both group and group_id has been deprecated. '
+ 'Currently group_id is ignored, in future passing both will result in an error',
+ version='2.14')
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+ if module.params.get('region') or not module.params.get('ec2_url'):
+ ec2 = ec2_connect(module)
+ elif module.params.get('ec2_url'):
+ ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs)
+
+ if 'region' not in aws_connect_kwargs:
+ aws_connect_kwargs['region'] = ec2.region
+
+ vpc = connect_vpc(**aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg="Failed to get connection: %s" % e.message, exception=traceback.format_exc())
+
+ tagged_instances = []
+
+ state = module.params['state']
+
+ if state == 'absent':
+ instance_ids = module.params['instance_ids']
+ if not instance_ids:
+ module.fail_json(msg='instance_ids list is required for absent state')
+
+ (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
+
+ elif state in ('running', 'stopped'):
+ instance_ids = module.params.get('instance_ids')
+ instance_tags = module.params.get('instance_tags')
+ if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
+ module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
+
+ (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
+
+ elif state in ('restarted'):
+ instance_ids = module.params.get('instance_ids')
+ instance_tags = module.params.get('instance_tags')
+ if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
+ module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
+
+ (changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
+
+ elif state == 'present':
+ # Changed is always set to true when provisioning new instances
+ if not module.params.get('image'):
+ module.fail_json(msg='image parameter is required for new instance')
+
+ if module.params.get('exact_count') is None:
+ (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
+ else:
+ (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
+
+ # Always return instances in the same order
+ if new_instance_ids:
+ new_instance_ids.sort()
+ if instance_dict_array:
+ instance_dict_array.sort(key=lambda x: x['id'])
+ if tagged_instances:
+ tagged_instances.sort(key=lambda x: x['id'])
+
+ module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_ami_info.py b/test/support/integration/plugins/modules/ec2_ami_info.py
new file mode 100644
index 0000000000..41e1aa83f9
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_ami_info.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_info
+version_added: '2.5'
+short_description: Gather information about ec2 AMIs
+description:
+ - Gather information about ec2 AMIs
+ - This module was called C(ec2_ami_facts) before Ansible 2.9. The usage did not change.
+author:
+ - Prasad Katti (@prasadkatti)
+requirements: [ boto3 ]
+options:
+ image_ids:
+ description: One or more image IDs.
+ aliases: [image_id]
+ type: list
+ elements: str
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters.
+ - Filter names and values are case sensitive.
+ type: dict
+ owners:
+ description:
+ - Filter the images by the owner. Valid options are an AWS account ID, self,
+ or an AWS owner alias ( amazon | aws-marketplace | microsoft ).
+ aliases: [owner]
+ type: list
+ elements: str
+ executable_users:
+ description:
+ - Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs).
+ aliases: [executable_user]
+ type: list
+ elements: str
+ describe_image_attributes:
+ description:
+ - Describe attributes (like launchPermission) of the images found.
+ default: no
+ type: bool
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: gather information about an AMI using ami-id
+ ec2_ami_info:
+ image_ids: ami-5b488823
+
+- name: gather information about all AMIs with tag key Name and value webapp
+ ec2_ami_info:
+ filters:
+ "tag:Name": webapp
+
+- name: gather information about an AMI with 'AMI Name' equal to foobar
+ ec2_ami_info:
+ filters:
+ name: foobar
+
+- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477)
+ ec2_ami_info:
+ owners: 099720109477
+ filters:
+ name: "ubuntu/images/ubuntu-zesty-17.04-*"
+'''
+
+RETURN = '''
+images:
+ description: A list of images.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ architecture:
+ description: The architecture of the image.
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ device_name:
+ description: The device name exposed to the instance.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ ebs:
+ description: EBS volumes
+ returned: always
+ type: complex
+ creation_date:
+ description: The date and time the image was created.
+ returned: always
+ type: str
+ sample: '2017-10-16T19:22:13.000Z'
+ description:
+ description: The description of the AMI.
+ returned: always
+ type: str
+ sample: ''
+ ena_support:
+ description: Whether enhanced networking with ENA is enabled.
+ returned: always
+ type: bool
+ sample: true
+ hypervisor:
+ description: The hypervisor type of the image.
+ returned: always
+ type: str
+ sample: xen
+ image_id:
+ description: The ID of the AMI.
+ returned: always
+ type: str
+ sample: ami-5b466623
+ image_location:
+ description: The location of the AMI.
+ returned: always
+ type: str
+ sample: 408466080000/Webapp
+ image_type:
+ description: The type of image.
+ returned: always
+ type: str
+ sample: machine
+ launch_permissions:
+ description: A List of AWS accounts may launch the AMI.
+ returned: When image is owned by calling account and I(describe_image_attributes) is yes.
+ type: list
+ elements: dict
+ contains:
+ group:
+ description: A value of 'all' means the AMI is public.
+ type: str
+ user_id:
+ description: An AWS account ID with permissions to launch the AMI.
+ type: str
+ sample: [{"group": "all"}, {"user_id": "408466080000"}]
+ name:
+ description: The name of the AMI that was provided during image creation.
+ returned: always
+ type: str
+ sample: Webapp
+ owner_id:
+ description: The AWS account ID of the image owner.
+ returned: always
+ type: str
+ sample: '408466080000'
+ public:
+ description: Whether the image has public launch permissions.
+ returned: always
+ type: bool
+ sample: true
+ root_device_name:
+ description: The device name of the root device.
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ sriov_net_support:
+ description: Whether enhanced networking is enabled.
+ returned: always
+ type: str
+ sample: simple
+ state:
+ description: The current state of the AMI.
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: Any tags assigned to the image.
+ returned: always
+ type: dict
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+'''
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
+
+
+def list_ec2_images(ec2_client, module):
+
+ image_ids = module.params.get("image_ids")
+ owners = module.params.get("owners")
+ executable_users = module.params.get("executable_users")
+ filters = module.params.get("filters")
+ owner_param = []
+
+ # describe_images is *very* slow if you pass the `Owners`
+ # param (unless it's self), for some reason.
+ # Converting the owners to filters and removing from the
+ # owners param greatly speeds things up.
+ # Implementation based on aioue's suggestion in #24886
+ for owner in owners:
+ if owner.isdigit():
+ if 'owner-id' not in filters:
+ filters['owner-id'] = list()
+ filters['owner-id'].append(owner)
+ elif owner == 'self':
+ # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
+ owner_param.append(owner)
+ else:
+ if 'owner-alias' not in filters:
+ filters['owner-alias'] = list()
+ filters['owner-alias'].append(owner)
+
+ filters = ansible_dict_to_boto3_filter_list(filters)
+
+ try:
+ images = ec2_client.describe_images(ImageIds=image_ids, Filters=filters, Owners=owner_param, ExecutableUsers=executable_users)
+ images = [camel_dict_to_snake_dict(image) for image in images["Images"]]
+ except (ClientError, BotoCoreError) as err:
+ module.fail_json_aws(err, msg="error describing images")
+ for image in images:
+ try:
+ image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', []))
+ if module.params.get("describe_image_attributes"):
+ launch_permissions = ec2_client.describe_image_attribute(Attribute='launchPermission', ImageId=image['image_id'])['LaunchPermissions']
+ image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions]
+ except (ClientError, BotoCoreError) as err:
+ # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures
+ pass
+
+ images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist
+ module.exit_json(images=images)
+
+
+def main():
+
+ argument_spec = dict(
+ image_ids=dict(default=[], type='list', aliases=['image_id']),
+ filters=dict(default={}, type='dict'),
+ owners=dict(default=[], type='list', aliases=['owner']),
+ executable_users=dict(default=[], type='list', aliases=['executable_user']),
+ describe_image_attributes=dict(default=False, type='bool')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+ if module._module._name == 'ec2_ami_facts':
+ module._module.deprecate("The 'ec2_ami_facts' module has been renamed to 'ec2_ami_info'", version='2.13')
+
+ ec2_client = module.client('ec2')
+
+ list_ec2_images(ec2_client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_eni.py b/test/support/integration/plugins/modules/ec2_eni.py
new file mode 100644
index 0000000000..8b6dbd1c32
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_eni.py
@@ -0,0 +1,633 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eni
+short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
+description:
+ - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is
+ provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status
+ of the network interface.
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+options:
+ eni_id:
+ description:
+ - The ID of the ENI (to modify).
+ - If I(eni_id=None) and I(state=present), a new eni will be created.
+ type: str
+ instance_id:
+ description:
+ - Instance ID that you wish to attach ENI to.
+ - Since version 2.2, use the I(attached) parameter to attach or detach an ENI. Prior to 2.2, to detach an ENI from an instance, use C(None).
+ type: str
+ private_ip_address:
+ description:
+ - Private IP address.
+ type: str
+ subnet_id:
+ description:
+ - ID of subnet in which to create the ENI.
+ type: str
+ description:
+ description:
+ - Optional description of the ENI.
+ type: str
+ security_groups:
+ description:
+ - List of security groups associated with the interface. Only used when I(state=present).
+ - Since version 2.2, you can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.
+ type: list
+ elements: str
+ state:
+ description:
+ - Create or delete ENI.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ device_index:
+ description:
+ - The index of the device for the network interface attachment on the instance.
+ default: 0
+ type: int
+ attached:
+ description:
+ - Specifies if network interface should be attached or detached from instance. If omitted, attachment status
+ won't change
+ version_added: 2.2
+ type: bool
+ force_detach:
+ description:
+ - Force detachment of the interface. This applies either when explicitly detaching the interface by setting I(instance_id=None)
+ or when deleting an interface with I(state=absent).
+ default: false
+ type: bool
+ delete_on_termination:
+ description:
+ - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the
+ interface is being modified, not on creation.
+ required: false
+ type: bool
+ source_dest_check:
+ description:
+ - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled.
+ You can only specify this flag when the interface is being modified, not on creation.
+ required: false
+ type: bool
+ secondary_private_ip_addresses:
+ description:
+ - A list of IP addresses to assign as secondary IP addresses to the network interface.
+ This option is mutually exclusive of I(secondary_private_ip_address_count)
+ required: false
+ version_added: 2.2
+ type: list
+ elements: str
+ purge_secondary_private_ip_addresses:
+ description:
+ - To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified.
+ - Set I(secondary_private_ip_addresses=[]) to purge all secondary addresses.
+ default: false
+ type: bool
+ version_added: 2.5
+ secondary_private_ip_address_count:
+ description:
+ - The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of I(secondary_private_ip_addresses)
+ required: false
+ version_added: 2.2
+ type: int
+ allow_reassignment:
+ description:
+ - Indicates whether to allow an IP address that is already assigned to another network interface or instance
+ to be reassigned to the specified network interface.
+ required: false
+ default: false
+ type: bool
+ version_added: 2.7
+extends_documentation_fragment:
+ - aws
+ - ec2
+notes:
+ - This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id),
+ or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI.
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an ENI. As no security group is defined, ENI will be created in default security group
+- ec2_eni:
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI and attach it to an instance
+- ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI with two secondary addresses
+- ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ secondary_private_ip_address_count: 2
+
+# Assign a secondary IP address to an existing ENI
+# This will purge any existing IPs
+- ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ eni_id: eni-yyyyyyyy
+ state: present
+ secondary_private_ip_addresses:
+ - 172.16.1.1
+
+# Remove any secondary IP addresses from an existing ENI
+- ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ eni_id: eni-yyyyyyyy
+ state: present
+ secondary_private_ip_address_count: 0
+
+# Destroy an ENI, detaching it from any instance if necessary
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ force_detach: true
+ state: absent
+
+# Update an ENI
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ description: "My new description"
+ state: present
+
+# Update an ENI identifying it by private_ip_address and subnet_id
+- ec2_eni:
+ subnet_id: subnet-xxxxxxx
+ private_ip_address: 172.16.1.1
+ description: "My new description"
+
+# Detach an ENI from an instance
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ instance_id: None
+ state: present
+
+### Delete an interface on termination
+# First create the interface
+- ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ register: eni
+
+# Modify the interface to enable the delete_on_terminaton flag
+- ec2_eni:
+ eni_id: "{{ eni.interface.id }}"
+ delete_on_termination: true
+
+'''
+
+
+RETURN = '''
+interface:
+ description: Network interface attributes
+ returned: when state != absent
+ type: complex
+ contains:
+ description:
+ description: interface description
+ type: str
+ sample: Firewall network interface
+ groups:
+ description: list of security groups
+ type: list
+ elements: dict
+ sample: [ { "sg-f8a8a9da": "default" } ]
+ id:
+ description: network interface id
+ type: str
+ sample: "eni-1d889198"
+ mac_address:
+ description: interface's physical address
+ type: str
+ sample: "00:00:5E:00:53:23"
+ owner_id:
+ description: aws account id
+ type: str
+ sample: 812381371
+ private_ip_address:
+ description: primary ip address of this interface
+ type: str
+ sample: 10.20.30.40
+ private_ip_addresses:
+ description: list of all private ip addresses associated to this interface
+ type: list
+ elements: dict
+ sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
+ source_dest_check:
+ description: value of source/dest check flag
+ type: bool
+ sample: True
+ status:
+ description: network interface status
+ type: str
+ sample: "pending"
+ subnet_id:
+ description: which vpc subnet the interface is bound
+ type: str
+ sample: subnet-b0a0393c
+ vpc_id:
+ description: which vpc this network interface is bound
+ type: str
+ sample: vpc-9a9a9da
+
+'''
+
+import time
+import re
+
+try:
+ import boto.ec2
+ import boto.vpc
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import (AnsibleAWSError, connect_to_aws,
+ ec2_argument_spec, get_aws_connection_info,
+ get_ec2_security_group_ids_from_names)
+
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ for ip in interface.private_ip_addresses:
+ private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
+
+ interface_info = {'id': interface.id,
+ 'subnet_id': interface.subnet_id,
+ 'vpc_id': interface.vpc_id,
+ 'description': interface.description,
+ 'owner_id': interface.owner_id,
+ 'status': interface.status,
+ 'mac_address': interface.mac_address,
+ 'private_ip_address': interface.private_ip_address,
+ 'source_dest_check': interface.source_dest_check,
+ 'groups': dict((group.id, group.name) for group in interface.groups),
+ 'private_ip_addresses': private_addresses
+ }
+
+ if interface.attachment is not None:
+ interface_info['attachment'] = {'attachment_id': interface.attachment.id,
+ 'instance_id': interface.attachment.instance_id,
+ 'device_index': interface.attachment.device_index,
+ 'status': interface.attachment.status,
+ 'attach_time': interface.attachment.attach_time,
+ 'delete_on_termination': interface.attachment.delete_on_termination,
+ }
+
+ return interface_info
+
+
+def wait_for_eni(eni, status):
+
+ while True:
+ time.sleep(3)
+ eni.update()
+ # If the status is detached we just need attachment to disappear
+ if eni.attachment is None:
+ if status == "detached":
+ break
+ else:
+ if status == "attached" and eni.attachment.status == "attached":
+ break
+
+
+def create_eni(connection, vpc_id, module):
+
+ instance_id = module.params.get("instance_id")
+ attached = module.params.get("attached")
+ if instance_id == 'None':
+ instance_id = None
+ device_index = module.params.get("device_index")
+ subnet_id = module.params.get('subnet_id')
+ private_ip_address = module.params.get('private_ip_address')
+ description = module.params.get('description')
+ security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection, vpc_id=vpc_id, boto3=False)
+ secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
+ secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
+ changed = False
+
+ try:
+ eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
+ if attached and instance_id is not None:
+ try:
+ eni.attach(instance_id, device_index)
+ except BotoServerError:
+ eni.delete()
+ raise
+ # Wait to allow creation / attachment to finish
+ wait_for_eni(eni, "attached")
+ eni.update()
+
+ if secondary_private_ip_address_count is not None:
+ try:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id, secondary_private_ip_address_count=secondary_private_ip_address_count)
+ except BotoServerError:
+ eni.delete()
+ raise
+
+ if secondary_private_ip_addresses is not None:
+ try:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses)
+ except BotoServerError:
+ eni.delete()
+ raise
+
+ changed = True
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def modify_eni(connection, vpc_id, module, eni):
+
+ instance_id = module.params.get("instance_id")
+ attached = module.params.get("attached")
+ do_detach = module.params.get('state') == 'detached'
+ device_index = module.params.get("device_index")
+ description = module.params.get('description')
+ security_groups = module.params.get('security_groups')
+ force_detach = module.params.get("force_detach")
+ source_dest_check = module.params.get("source_dest_check")
+ delete_on_termination = module.params.get("delete_on_termination")
+ secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
+ purge_secondary_private_ip_addresses = module.params.get("purge_secondary_private_ip_addresses")
+ secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
+ allow_reassignment = module.params.get("allow_reassignment")
+ changed = False
+
+ try:
+ if description is not None:
+ if eni.description != description:
+ connection.modify_network_interface_attribute(eni.id, "description", description)
+ changed = True
+ if len(security_groups) > 0:
+ groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=vpc_id, boto3=False)
+ if sorted(get_sec_group_list(eni.groups)) != sorted(groups):
+ connection.modify_network_interface_attribute(eni.id, "groupSet", groups)
+ changed = True
+ if source_dest_check is not None:
+ if eni.source_dest_check != source_dest_check:
+ connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
+ changed = True
+ if delete_on_termination is not None and eni.attachment is not None:
+ if eni.attachment.delete_on_termination is not delete_on_termination:
+ connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
+ changed = True
+
+ current_secondary_addresses = [i.private_ip_address for i in eni.private_ip_addresses if not i.primary]
+ if secondary_private_ip_addresses is not None:
+ secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))
+ if secondary_addresses_to_remove and purge_secondary_private_ip_addresses:
+ connection.unassign_private_ip_addresses(network_interface_id=eni.id,
+ private_ip_addresses=list(set(current_secondary_addresses) -
+ set(secondary_private_ip_addresses)),
+ dry_run=False)
+ changed = True
+
+ secondary_addresses_to_add = list(set(secondary_private_ip_addresses) - set(current_secondary_addresses))
+ if secondary_addresses_to_add:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id,
+ private_ip_addresses=secondary_addresses_to_add,
+ secondary_private_ip_address_count=None,
+ allow_reassignment=allow_reassignment, dry_run=False)
+ changed = True
+ if secondary_private_ip_address_count is not None:
+ current_secondary_address_count = len(current_secondary_addresses)
+
+ if secondary_private_ip_address_count > current_secondary_address_count:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id,
+ private_ip_addresses=None,
+ secondary_private_ip_address_count=(secondary_private_ip_address_count -
+ current_secondary_address_count),
+ allow_reassignment=allow_reassignment, dry_run=False)
+ changed = True
+ elif secondary_private_ip_address_count < current_secondary_address_count:
+ # How many of these addresses do we want to remove
+ secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count
+ connection.unassign_private_ip_addresses(network_interface_id=eni.id,
+ private_ip_addresses=current_secondary_addresses[:secondary_addresses_to_remove_count],
+ dry_run=False)
+
+ if attached is True:
+ if eni.attachment and eni.attachment.instance_id != instance_id:
+ detach_eni(eni, module)
+ eni.attach(instance_id, device_index)
+ wait_for_eni(eni, "attached")
+ changed = True
+ if eni.attachment is None:
+ eni.attach(instance_id, device_index)
+ wait_for_eni(eni, "attached")
+ changed = True
+ elif attached is False:
+ detach_eni(eni, module)
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ eni.update()
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def delete_eni(connection, module):
+
+ eni_id = module.params.get("eni_id")
+ force_detach = module.params.get("force_detach")
+
+ try:
+ eni_result_set = connection.get_all_network_interfaces(eni_id)
+ eni = eni_result_set[0]
+
+ if force_detach is True:
+ if eni.attachment is not None:
+ eni.detach(force_detach)
+ # Wait to allow detachment to finish
+ wait_for_eni(eni, "detached")
+ eni.update()
+ eni.delete()
+ changed = True
+ else:
+ eni.delete()
+ changed = True
+
+ module.exit_json(changed=changed)
+ except BotoServerError as e:
+ regex = re.compile('The networkInterface ID \'.*\' does not exist')
+ if regex.search(e.message) is not None:
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg=e.message)
+
+
+def detach_eni(eni, module):
+
+ attached = module.params.get("attached")
+
+ force_detach = module.params.get("force_detach")
+ if eni.attachment is not None:
+ eni.detach(force_detach)
+ wait_for_eni(eni, "detached")
+ if attached:
+ return
+ eni.update()
+ module.exit_json(changed=True, interface=get_eni_info(eni))
+ else:
+ module.exit_json(changed=False, interface=get_eni_info(eni))
+
+
+def uniquely_find_eni(connection, module):
+
+ eni_id = module.params.get("eni_id")
+ private_ip_address = module.params.get('private_ip_address')
+ subnet_id = module.params.get('subnet_id')
+ instance_id = module.params.get('instance_id')
+ device_index = module.params.get('device_index')
+ attached = module.params.get('attached')
+
+ try:
+ filters = {}
+
+ # proceed only if we're univocally specifying an ENI
+ if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None):
+ return None
+
+ if private_ip_address and subnet_id:
+ filters['private-ip-address'] = private_ip_address
+ filters['subnet-id'] = subnet_id
+
+ if not attached and instance_id and device_index:
+ filters['attachment.instance-id'] = instance_id
+ filters['attachment.device-index'] = device_index
+
+ if eni_id is None and len(filters) == 0:
+ return None
+
+ eni_result = connection.get_all_network_interfaces(eni_id, filters=filters)
+ if len(eni_result) == 1:
+ return eni_result[0]
+ else:
+ return None
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ return None
+
+
+def get_sec_group_list(groups):
+
+ # Build list of remote security groups
+ remote_security_groups = []
+ for group in groups:
+ remote_security_groups.append(group.id.encode())
+
+ return remote_security_groups
+
+
+def _get_vpc_id(connection, module, subnet_id):
+
+ try:
+ return connection.get_all_subnets(subnet_ids=[subnet_id])[0].vpc_id
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ eni_id=dict(default=None, type='str'),
+ instance_id=dict(default=None, type='str'),
+ private_ip_address=dict(type='str'),
+ subnet_id=dict(type='str'),
+ description=dict(type='str'),
+ security_groups=dict(default=[], type='list'),
+ device_index=dict(default=0, type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ force_detach=dict(default='no', type='bool'),
+ source_dest_check=dict(default=None, type='bool'),
+ delete_on_termination=dict(default=None, type='bool'),
+ secondary_private_ip_addresses=dict(default=None, type='list'),
+ purge_secondary_private_ip_addresses=dict(default=False, type='bool'),
+ secondary_private_ip_address_count=dict(default=None, type='int'),
+ allow_reassignment=dict(default=False, type='bool'),
+ attached=dict(default=None, type='bool')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['secondary_private_ip_addresses', 'secondary_private_ip_address_count']
+ ],
+ required_if=([
+ ('state', 'absent', ['eni_id']),
+ ('attached', True, ['instance_id']),
+ ('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses'])
+ ])
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ vpc_connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ eni = uniquely_find_eni(connection, module)
+ if eni is None:
+ subnet_id = module.params.get("subnet_id")
+ if subnet_id is None:
+ module.fail_json(msg="subnet_id is required when creating a new ENI")
+
+ vpc_id = _get_vpc_id(vpc_connection, module, subnet_id)
+ create_eni(connection, vpc_id, module)
+ else:
+ vpc_id = eni.vpc_id
+ modify_eni(connection, vpc_id, module, eni)
+
+ elif state == 'absent':
+ delete_eni(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_eni_info.py b/test/support/integration/plugins/modules/ec2_eni_info.py
new file mode 100644
index 0000000000..99922a84d1
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_eni_info.py
@@ -0,0 +1,275 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_eni_info
+short_description: Gather information about ec2 ENI interfaces in AWS
+description:
+ - Gather information about ec2 ENI interfaces in AWS.
+ - This module was called C(ec2_eni_facts) before Ansible 2.9. The usage did not change.
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+requirements: [ boto3 ]
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
+ type: dict
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all ENIs
+- ec2_eni_info:
+
+# Gather information about a particular ENI
+- ec2_eni_info:
+ filters:
+ network-interface-id: eni-xxxxxxx
+
+'''
+
+RETURN = '''
+network_interfaces:
+ description: List of matching elastic network interfaces
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: Info of associated elastic IP (EIP)
+ returned: always, empty dict if no association exists
+ type: dict
+ sample: {
+ allocation_id: "eipalloc-5sdf123",
+ association_id: "eipassoc-8sdf123",
+ ip_owner_id: "4415120123456",
+ public_dns_name: "ec2-52-1-0-63.compute-1.amazonaws.com",
+ public_ip: "52.1.0.63"
+ }
+ attachment:
+ description: Info about attached ec2 instance
+ returned: always, empty dict if ENI is not attached
+ type: dict
+ sample: {
+ attach_time: "2017-08-05T15:25:47+00:00",
+ attachment_id: "eni-attach-149d21234",
+ delete_on_termination: false,
+ device_index: 1,
+ instance_id: "i-15b8d3cadbafa1234",
+ instance_owner_id: "4415120123456",
+ status: "attached"
+ }
+ availability_zone:
+ description: Availability zone of ENI
+ returned: always
+ type: str
+ sample: "us-east-1b"
+ description:
+ description: Description text for ENI
+ returned: always
+ type: str
+ sample: "My favourite network interface"
+ groups:
+ description: List of attached security groups
+ returned: always
+ type: list
+ sample: [
+ {
+ group_id: "sg-26d0f1234",
+ group_name: "my_ec2_security_group"
+ }
+ ]
+ id:
+ description: The id of the ENI (alias for network_interface_id)
+ returned: always
+ type: str
+ sample: "eni-392fsdf"
+ interface_type:
+ description: Type of the network interface
+ returned: always
+ type: str
+ sample: "interface"
+ ipv6_addresses:
+ description: List of IPv6 addresses for this interface
+ returned: always
+ type: list
+ sample: []
+ mac_address:
+ description: MAC address of the network interface
+ returned: always
+ type: str
+ sample: "0a:f8:10:2f:ab:a1"
+ network_interface_id:
+ description: The id of the ENI
+ returned: always
+ type: str
+ sample: "eni-392fsdf"
+ owner_id:
+ description: AWS account id of the owner of the ENI
+ returned: always
+ type: str
+ sample: "4415120123456"
+ private_dns_name:
+ description: Private DNS name for the ENI
+ returned: always
+ type: str
+ sample: "ip-172-16-1-180.ec2.internal"
+ private_ip_address:
+ description: Private IP address for the ENI
+ returned: always
+ type: str
+ sample: "172.16.1.180"
+ private_ip_addresses:
+ description: List of private IP addresses attached to the ENI
+ returned: always
+ type: list
+ sample: []
+ requester_id:
+ description: The ID of the entity that launched the ENI
+ returned: always
+ type: str
+ sample: "AIDAIONYVJQNIAZFT3ABC"
+ requester_managed:
+ description: Indicates whether the network interface is being managed by an AWS service.
+ returned: always
+ type: bool
+ sample: false
+ source_dest_check:
+ description: Indicates whether the network interface performs source/destination checking.
+ returned: always
+ type: bool
+ sample: false
+ status:
+ description: Indicates if the network interface is attached to an instance or not
+ returned: always
+ type: str
+ sample: "in-use"
+ subnet_id:
+ description: Subnet ID the ENI is in
+ returned: always
+ type: str
+ sample: "subnet-7bbf01234"
+ tag_set:
+ description: Dictionary of tags added to the ENI
+ returned: always
+ type: dict
+ sample: {}
+ vpc_id:
+ description: ID of the VPC the network interface it part of
+ returned: always
+ type: str
+ sample: "vpc-b3f1f123"
+'''
+
+try:
+ from botocore.exceptions import ClientError, NoCredentialsError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_conn
+from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
+from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
+
+
+def list_eni(connection, module):
+
+ if module.params.get("filters") is None:
+ filters = []
+ else:
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ network_interfaces_result = connection.describe_network_interfaces(Filters=filters)['NetworkInterfaces']
+ except (ClientError, NoCredentialsError) as e:
+ module.fail_json(msg=e.message)
+
+ # Modify boto3 tags list to be ansible friendly dict and then camel_case
+ camel_network_interfaces = []
+ for network_interface in network_interfaces_result:
+ network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet'])
+ # Added id to interface info to be compatible with return values of ec2_eni module:
+ network_interface['Id'] = network_interface['NetworkInterfaceId']
+ camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface))
+
+ module.exit_json(network_interfaces=camel_network_interfaces)
+
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ for ip in interface.private_ip_addresses:
+ private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
+
+ interface_info = {'id': interface.id,
+ 'subnet_id': interface.subnet_id,
+ 'vpc_id': interface.vpc_id,
+ 'description': interface.description,
+ 'owner_id': interface.owner_id,
+ 'status': interface.status,
+ 'mac_address': interface.mac_address,
+ 'private_ip_address': interface.private_ip_address,
+ 'source_dest_check': interface.source_dest_check,
+ 'groups': dict((group.id, group.name) for group in interface.groups),
+ 'private_ip_addresses': private_addresses
+ }
+
+ if hasattr(interface, 'publicDnsName'):
+ interface_info['association'] = {'public_ip_address': interface.publicIp,
+ 'public_dns_name': interface.publicDnsName,
+ 'ip_owner_id': interface.ipOwnerId
+ }
+
+ if interface.attachment is not None:
+ interface_info['attachment'] = {'attachment_id': interface.attachment.id,
+ 'instance_id': interface.attachment.instance_id,
+ 'device_index': interface.attachment.device_index,
+ 'status': interface.attachment.status,
+ 'attach_time': interface.attachment.attach_time,
+ 'delete_on_termination': interface.attachment.delete_on_termination,
+ }
+
+ return interface_info
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters=dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+ if module._name == 'ec2_eni_facts':
+ module.deprecate("The 'ec2_eni_facts' module has been renamed to 'ec2_eni_info'", version='2.13')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
+
+ list_eni(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_group.py b/test/support/integration/plugins/modules/ec2_group.py
new file mode 100644
index 0000000000..bc416f66b5
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_group.py
@@ -0,0 +1,1345 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = '''
+---
+module: ec2_group
+author: "Andrew de Quincey (@adq)"
+version_added: "1.3"
+requirements: [ boto3 ]
+short_description: maintain an ec2 VPC security group.
+description:
+ - Maintains ec2 security groups. This module has a dependency on python-boto >= 2.5.
+options:
+ name:
+ description:
+ - Name of the security group.
+ - One of and only one of I(name) or I(group_id) is required.
+ - Required if I(state=present).
+ required: false
+ type: str
+ group_id:
+ description:
+ - Id of group to delete (works only with absent).
+ - One of and only one of I(name) or I(group_id) is required.
+ required: false
+ version_added: "2.4"
+ type: str
+ description:
+ description:
+ - Description of the security group. Required when C(state) is C(present).
+ required: false
+ type: str
+ vpc_id:
+ description:
+ - ID of the VPC to create the group in.
+ required: false
+ type: str
+ rules:
+ description:
+ - List of firewall inbound rules to enforce in this group (see example). If none are supplied,
+ no inbound rules will be enabled. Rules list may include its own name in `group_name`.
+ This allows idempotent loopback additions (e.g. allow group to access itself).
+ Rule sources list support was added in version 2.4. This allows to define multiple sources per
+ source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed.
+ In version 2.5 support for rule descriptions was added.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ cidr_ip:
+ type: str
+ description:
+ - The IPv4 CIDR range traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ cidr_ipv6:
+ type: str
+ description:
+ - The IPv6 CIDR range traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ ip_prefix:
+ type: str
+ description:
+ - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
+ that traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_id:
+ type: str
+ description:
+ - The ID of the Security Group that traffic is coming from.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_name:
+ type: str
+ description:
+ - Name of the Security Group that traffic is coming from.
+ - If the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_desc:
+ type: str
+ description:
+ - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ proto:
+ type: str
+ description:
+ - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
+ from_port:
+ type: int
+ description: The start of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
+ to_port:
+ type: int
+ description: The end of the range of ports that traffic is coming from. A value of C(-1) indicates all ports.
+ rule_desc:
+ type: str
+ description: A description for the rule.
+ rules_egress:
+ description:
+ - List of firewall outbound rules to enforce in this group (see example). If none are supplied,
+ a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
+ Rule Egress sources list support was added in version 2.4. In version 2.5 support for rule descriptions
+ was added.
+ required: false
+ version_added: "1.6"
+ type: list
+ elements: dict
+ suboptions:
+ cidr_ip:
+ type: str
+ description:
+ - The IPv4 CIDR range traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ cidr_ipv6:
+ type: str
+ description:
+ - The IPv6 CIDR range traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ ip_prefix:
+ type: str
+ description:
+ - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html)
+ that traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_id:
+ type: str
+ description:
+ - The ID of the Security Group that traffic is going to.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_name:
+ type: str
+ description:
+ - Name of the Security Group that traffic is going to.
+ - If the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id)
+ and I(group_name).
+ group_desc:
+ type: str
+ description:
+ - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be
+ created with I(group_desc) as the description.
+ proto:
+ type: str
+ description:
+ - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers))
+ from_port:
+ type: int
+ description: The start of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
+ to_port:
+ type: int
+ description: The end of the range of ports that traffic is going to. A value of C(-1) indicates all ports.
+ rule_desc:
+ type: str
+ description: A description for the rule.
+ state:
+ version_added: "1.4"
+ description:
+ - Create or delete a security group.
+ required: false
+ default: 'present'
+ choices: [ "present", "absent" ]
+ aliases: []
+ type: str
+ purge_rules:
+ version_added: "1.8"
+ description:
+ - Purge existing rules on security group that are not found in rules.
+ required: false
+ default: 'true'
+ aliases: []
+ type: bool
+ purge_rules_egress:
+ version_added: "1.8"
+ description:
+ - Purge existing rules_egress on security group that are not found in rules_egress.
+ required: false
+ default: 'true'
+ aliases: []
+ type: bool
+ tags:
+ version_added: "2.4"
+ description:
+ - A dictionary of one or more tags to assign to the security group.
+ required: false
+ type: dict
+ aliases: ['resource_tags']
+ purge_tags:
+ version_added: "2.4"
+ description:
+ - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
+ tags will not be modified.
+ required: false
+ default: yes
+ type: bool
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+
+notes:
+ - If a rule declares a group_name and that group doesn't exist, it will be
+ automatically created. In that case, group_desc should be provided as well.
+ The module will refuse to create a depended-on group without a description.
+ - Preview diff mode support is added in version 2.7.
+'''
+
+EXAMPLES = '''
+- name: example using security group rule descriptions
+ ec2_group:
+ name: "{{ name }}"
+ description: sg with rule descriptions
+ vpc_id: vpc-xxxxxxxx
+ profile: "{{ aws_profile }}"
+ region: us-east-1
+ rules:
+ - proto: tcp
+ ports:
+ - 80
+ cidr_ip: 0.0.0.0/0
+ rule_desc: allow all on port 80
+
+- name: example ec2 group
+ ec2_group:
+ name: example
+ description: an example EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ aws_secret_key: SECRET
+ aws_access_key: ACCESS
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 10.0.0.0/8
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ # this should only be needed for EC2 Classic security group rules
+ # because in a VPC an ELB will use a user-account security group
+ group_id: amazon-elb/sg-87654321/amazon-elb-sg
+ - proto: tcp
+ from_port: 3306
+ to_port: 3306
+ group_id: 123412341234/sg-87654321/exact-name-of-sg
+ - proto: udp
+ from_port: 10050
+ to_port: 10050
+ cidr_ip: 10.0.0.0/8
+ - proto: udp
+ from_port: 10051
+ to_port: 10051
+ group_id: sg-12345678
+ - proto: icmp
+ from_port: 8 # icmp type, -1 = any type
+ to_port: -1 # icmp subtype, -1 = any subtype
+ cidr_ip: 10.0.0.0/8
+ - proto: all
+ # the containing group name may be specified here
+ group_name: example
+ - proto: all
+ # in the 'proto' attribute, if you specify -1, all, or a protocol number other than tcp, udp, icmp, or 58 (ICMPv6),
+ # traffic on all ports is allowed, regardless of any ports you specify
+ from_port: 10050 # this value is ignored
+ to_port: 10050 # this value is ignored
+ cidr_ip: 10.0.0.0/8
+
+ rules_egress:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ cidr_ipv6: 64:ff9b::/96
+ group_name: example-other
+ # description to use if example-other needs to be created
+ group_desc: other example EC2 group
+
+- name: example2 ec2 group
+ ec2_group:
+ name: example2
+ description: an example2 EC2 group
+ vpc_id: 12345
+ region: eu-west-1
+ rules:
+ # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port).
+ - proto: tcp
+ ports: 22
+ group_name: example-vpn
+ - proto: tcp
+ ports:
+ - 80
+ - 443
+ - 8080-8099
+ cidr_ip: 0.0.0.0/0
+ # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule.
+ - proto: tcp
+ ports:
+ - 6379
+ - 26379
+ group_name:
+ - example-vpn
+ - example-redis
+ - proto: tcp
+ ports: 5665
+ group_name: example-vpn
+ cidr_ip:
+ - 172.16.1.0/24
+ - 172.16.17.0/24
+ cidr_ipv6:
+ - 2607:F8B0::/32
+ - 64:ff9b::/96
+ group_id:
+ - sg-edcd9784
+ diff: True
+
+- name: "Delete group by its id"
+ ec2_group:
+ region: eu-west-1
+ group_id: sg-33b4ee5b
+ state: absent
+'''
+
+RETURN = '''
+group_name:
+ description: Security group name
+ sample: My Security Group
+ type: str
+ returned: on create/update
+group_id:
+ description: Security group id
+ sample: sg-abcd1234
+ type: str
+ returned: on create/update
+description:
+ description: Description of security group
+ sample: My Security Group
+ type: str
+ returned: on create/update
+tags:
+ description: Tags associated with the security group
+ sample:
+ Name: My Security Group
+ Purpose: protecting stuff
+ type: dict
+ returned: on create/update
+vpc_id:
+ description: ID of VPC to which the security group belongs
+ sample: vpc-abcd1234
+ type: str
+ returned: on create/update
+ip_permissions:
+ description: Inbound rules associated with the security group.
+ sample:
+ - from_port: 8182
+ ip_protocol: tcp
+ ip_ranges:
+ - cidr_ip: "1.1.1.1/32"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ to_port: 8182
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+ip_permissions_egress:
+ description: Outbound rules associated with the security group.
+ sample:
+ - ip_protocol: -1
+ ip_ranges:
+ - cidr_ip: "0.0.0.0/0"
+ ipv6_ranges: []
+ prefix_list_ids: []
+ user_id_group_pairs: []
+ type: list
+ returned: on create/update
+owner_id:
+ description: AWS Account ID of the security group
+ sample: 123456789012
+ type: int
+ returned: on create/update
+'''
+
+import json
+import re
+import itertools
+from copy import deepcopy
+from time import sleep
+from collections import namedtuple
+from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
+from ansible.module_utils.aws.iam import get_aws_account_id
+from ansible.module_utils.aws.waiters import get_waiter
+from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
+from ansible.module_utils.common.network import to_ipv6_subnet, to_subnet
+from ansible.module_utils.compat.ipaddress import ip_network, IPv6Network
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import string_types
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description'])
+valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix'])
+current_account_id = None
+
+
+def rule_cmp(a, b):
+ """Compare rules without descriptions"""
+ for prop in ['port_range', 'protocol', 'target', 'target_type']:
+ if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol):
+ # equal protocols can interchange `(-1, -1)` and `(None, None)`
+ if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)):
+ continue
+ elif getattr(a, prop) != getattr(b, prop):
+ return False
+ elif getattr(a, prop) != getattr(b, prop):
+ return False
+ return True
+
+
+def rules_to_permissions(rules):
+ return [to_permission(rule) for rule in rules]
+
+
+def to_permission(rule):
+ # take a Rule, output the serialized grant
+ perm = {
+ 'IpProtocol': rule.protocol,
+ }
+ perm['FromPort'], perm['ToPort'] = rule.port_range
+ if rule.target_type == 'ipv4':
+ perm['IpRanges'] = [{
+ 'CidrIp': rule.target,
+ }]
+ if rule.description:
+ perm['IpRanges'][0]['Description'] = rule.description
+ elif rule.target_type == 'ipv6':
+ perm['Ipv6Ranges'] = [{
+ 'CidrIpv6': rule.target,
+ }]
+ if rule.description:
+ perm['Ipv6Ranges'][0]['Description'] = rule.description
+ elif rule.target_type == 'group':
+ if isinstance(rule.target, tuple):
+ pair = {}
+ if rule.target[0]:
+ pair['UserId'] = rule.target[0]
+ # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
+ if rule.target[1]:
+ pair['GroupId'] = rule.target[1]
+ elif rule.target[2]:
+ pair['GroupName'] = rule.target[2]
+ perm['UserIdGroupPairs'] = [pair]
+ else:
+ perm['UserIdGroupPairs'] = [{
+ 'GroupId': rule.target
+ }]
+ if rule.description:
+ perm['UserIdGroupPairs'][0]['Description'] = rule.description
+ elif rule.target_type == 'ip_prefix':
+ perm['PrefixListIds'] = [{
+ 'PrefixListId': rule.target,
+ }]
+ if rule.description:
+ perm['PrefixListIds'][0]['Description'] = rule.description
+ elif rule.target_type not in valid_targets:
+ raise ValueError('Invalid target type for rule {0}'.format(rule))
+ return fix_port_and_protocol(perm)
+
+
+def rule_from_group_permission(perm):
+ def ports_from_permission(p):
+ if 'FromPort' not in p and 'ToPort' not in p:
+ return (None, None)
+ return (int(perm['FromPort']), int(perm['ToPort']))
+
+ # outputs a rule tuple
+ for target_key, target_subkey, target_type in [
+ ('IpRanges', 'CidrIp', 'ipv4'),
+ ('Ipv6Ranges', 'CidrIpv6', 'ipv6'),
+ ('PrefixListIds', 'PrefixListId', 'ip_prefix'),
+ ]:
+ if target_key not in perm:
+ continue
+ for r in perm[target_key]:
+ # there may be several IP ranges here, which is ok
+ yield Rule(
+ ports_from_permission(perm),
+ to_text(perm['IpProtocol']),
+ r[target_subkey],
+ target_type,
+ r.get('Description')
+ )
+ if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']:
+ for pair in perm['UserIdGroupPairs']:
+ target = (
+ pair.get('UserId', None),
+ pair.get('GroupId', None),
+ pair.get('GroupName', None),
+ )
+ if pair.get('UserId', '').startswith('amazon-'):
+ # amazon-elb and amazon-prefix rules don't need
+ # group-id specified, so remove it when querying
+ # from permission
+ target = (
+ target[0],
+ None,
+ target[2],
+ )
+ elif 'VpcPeeringConnectionId' in pair or pair['UserId'] != current_account_id:
+ target = (
+ pair.get('UserId', None),
+ pair.get('GroupId', None),
+ pair.get('GroupName', None),
+ )
+
+ yield Rule(
+ ports_from_permission(perm),
+ to_text(perm['IpProtocol']),
+ target,
+ 'group',
+ pair.get('Description')
+ )
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['InvalidGroup.NotFound'])
+def get_security_groups_with_backoff(connection, **kwargs):
+ return connection.describe_security_groups(**kwargs)
+
+
+@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
+def sg_exists_with_backoff(connection, **kwargs):
+ try:
+ return connection.describe_security_groups(**kwargs)
+ except is_boto3_error_code('InvalidGroup.NotFound'):
+ return {'SecurityGroups': []}
+
+
+def deduplicate_rules_args(rules):
+ """Returns unique rules"""
+ if rules is None:
+ return None
+ return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values())
+
+
+def validate_rule(module, rule):
+ VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', 'ip_prefix',
+ 'group_id', 'group_name', 'group_desc',
+ 'proto', 'from_port', 'to_port', 'rule_desc')
+ if not isinstance(rule, dict):
+ module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))
+ for k in rule:
+ if k not in VALID_PARAMS:
+ module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule))
+
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_id OR cidr_ip, not both')
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg='Specify group_name OR cidr_ip, not both')
+ elif 'group_id' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ipv6, not both")
+ elif 'group_name' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ipv6, not both")
+ elif 'cidr_ip' in rule and 'cidr_ipv6' in rule:
+ module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg='Specify group_id OR group_name, not both')
+
+
+def get_target_from_rule(module, client, rule, name, group, groups, vpc_id):
+ """
+ Returns tuple of (target_type, target, group_created) after validating rule params.
+
+ rule: Dict describing a rule.
+ name: Name of the security group being managed.
+ groups: Dict of all available security groups.
+
+ AWS accepts an ip range or a security group as target of a rule. This
+ function validate the rule specification and return either a non-None
+ group_id or a non-None ip range.
+ """
+ FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)'
+ group_id = None
+ group_name = None
+ target_group_created = False
+
+ validate_rule(module, rule)
+ if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
+ # this is a foreign Security Group. Since you can't fetch it you must create an instance of it
+ owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
+ group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name)
+ groups[group_id] = group_instance
+ groups[group_name] = group_instance
+ # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific
+ if group_id and group_name:
+ group_name = None
+ return 'group', (owner_id, group_id, group_name), False
+ elif 'group_id' in rule:
+ return 'group', rule['group_id'], False
+ elif 'group_name' in rule:
+ group_name = rule['group_name']
+ if group_name == name:
+ group_id = group['GroupId']
+ groups[group_id] = group
+ groups[group_name] = group
+ elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'):
+ # both are VPC groups, this is ok
+ group_id = groups[group_name]['GroupId']
+ elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')):
+ # both are EC2 classic, this is ok
+ group_id = groups[group_name]['GroupId']
+ else:
+ auto_group = None
+ filters = {'group-name': group_name}
+ if vpc_id:
+ filters['vpc-id'] = vpc_id
+ # if we got here, either the target group does not exist, or there
+ # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC
+ # is bad, so we have to create a new SG because no compatible group
+ # exists
+ if not rule.get('group_desc', '').strip():
+ # retry describing the group once
+ try:
+ auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
+ except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError):
+ module.fail_json(msg="group %s will be automatically created by rule %s but "
+ "no description was provided" % (group_name, rule))
+ except ClientError as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e)
+ elif not module.check_mode:
+ params = dict(GroupName=group_name, Description=rule['group_desc'])
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ try:
+ auto_group = client.create_security_group(**params)
+ get_waiter(
+ client, 'security_group_exists',
+ ).wait(
+ GroupIds=[auto_group['GroupId']],
+ )
+ except is_boto3_error_code('InvalidGroup.Duplicate'):
+ # The group exists, but didn't show up in any of our describe-security-groups calls
+ # Try searching on a filter for the name, and allow a retry window for AWS to update
+ # the model on their end.
+ try:
+ auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0]
+ except IndexError as e:
+ module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
+ except ClientError as e:
+ module.fail_json_aws(
+ e,
+ msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name))
+ if auto_group is not None:
+ group_id = auto_group['GroupId']
+ groups[group_id] = auto_group
+ groups[group_name] = auto_group
+ target_group_created = True
+ return 'group', group_id, target_group_created
+ elif 'cidr_ip' in rule:
+ return 'ipv4', validate_ip(module, rule['cidr_ip']), False
+ elif 'cidr_ipv6' in rule:
+ return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False
+ elif 'ip_prefix' in rule:
+ return 'ip_prefix', rule['ip_prefix'], False
+
+ module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule)
+
+
+def ports_expand(ports):
+ # takes a list of ports and returns a list of (port_from, port_to)
+ ports_expanded = []
+ for port in ports:
+ if not isinstance(port, string_types):
+ ports_expanded.append((port,) * 2)
+ elif '-' in port:
+ ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1)))
+ else:
+ ports_expanded.append((int(port.strip()),) * 2)
+
+ return ports_expanded
+
+
+def rule_expand_ports(rule):
+ # takes a rule dict and returns a list of expanded rule dicts
+ if 'ports' not in rule:
+ if isinstance(rule.get('from_port'), string_types):
+ rule['from_port'] = int(rule.get('from_port'))
+ if isinstance(rule.get('to_port'), string_types):
+ rule['to_port'] = int(rule.get('to_port'))
+ return [rule]
+
+ ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']]
+
+ rule_expanded = []
+ for from_to in ports_expand(ports):
+ temp_rule = rule.copy()
+ del temp_rule['ports']
+ temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to)
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rules_expand_ports(rules):
+ # takes a list of rules and expands it based on 'ports'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_ports(rule_complex)]
+
+
+def rule_expand_source(rule, source_type):
+ # takes a rule dict and returns a list of expanded rule dicts for specified source_type
+ sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]]
+ source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix')
+
+ rule_expanded = []
+ for source in sources:
+ temp_rule = rule.copy()
+ for s in source_types_all:
+ temp_rule.pop(s, None)
+ temp_rule[source_type] = source
+ rule_expanded.append(temp_rule)
+
+ return rule_expanded
+
+
+def rule_expand_sources(rule):
+ # takes a rule dict and returns a list of expanded rule discts
+ source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule)
+
+ return [r for stype in source_types
+ for r in rule_expand_source(rule, stype)]
+
+
+def rules_expand_sources(rules):
+ # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name'
+ if not rules:
+ return rules
+
+ return [rule for rule_complex in rules
+ for rule in rule_expand_sources(rule_complex)]
+
+
+def update_rules_description(module, client, rule_type, group_id, ip_permissions):
+ if module.check_mode:
+ return
+ try:
+ if rule_type == "in":
+ client.update_security_group_rule_descriptions_ingress(GroupId=group_id, IpPermissions=ip_permissions)
+ if rule_type == "out":
+ client.update_security_group_rule_descriptions_egress(GroupId=group_id, IpPermissions=ip_permissions)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id)
+
+
+def fix_port_and_protocol(permission):
+ for key in ('FromPort', 'ToPort'):
+ if key in permission:
+ if permission[key] is None:
+ del permission[key]
+ else:
+ permission[key] = int(permission[key])
+
+ permission['IpProtocol'] = to_text(permission['IpProtocol'])
+
+ return permission
+
+
+def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id):
+ if revoke_ingress:
+ revoke(client, module, revoke_ingress, group_id, 'in')
+ if revoke_egress:
+ revoke(client, module, revoke_egress, group_id, 'out')
+ return bool(revoke_ingress or revoke_egress)
+
+
+def revoke(client, module, ip_permissions, group_id, rule_type):
+ if not module.check_mode:
+ try:
+ if rule_type == 'in':
+ client.revoke_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
+ elif rule_type == 'out':
+ client.revoke_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
+ except (BotoCoreError, ClientError) as e:
+ rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
+ module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions))
+
+
+def add_new_permissions(client, module, new_ingress, new_egress, group_id):
+ if new_ingress:
+ authorize(client, module, new_ingress, group_id, 'in')
+ if new_egress:
+ authorize(client, module, new_egress, group_id, 'out')
+ return bool(new_ingress or new_egress)
+
+
+def authorize(client, module, ip_permissions, group_id, rule_type):
+ if not module.check_mode:
+ try:
+ if rule_type == 'in':
+ client.authorize_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions)
+ elif rule_type == 'out':
+ client.authorize_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions)
+ except (BotoCoreError, ClientError) as e:
+ rules = 'ingress rules' if rule_type == 'in' else 'egress rules'
+ module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions))
+
+
+def validate_ip(module, cidr_ip):
+ split_addr = cidr_ip.split('/')
+ if len(split_addr) == 2:
+ # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set
+ # Get the network bits if IPv4, and validate if IPv6.
+ try:
+ ip = to_subnet(split_addr[0], split_addr[1])
+ if ip != cidr_ip:
+ module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(
+ cidr_ip, ip))
+ except ValueError:
+ # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here
+ try:
+ isinstance(ip_network(to_text(cidr_ip)), IPv6Network)
+ ip = cidr_ip
+ except ValueError:
+ # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError
+ # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits
+ ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1]
+ if ip6 != cidr_ip:
+ module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6))
+ return ip6
+ return ip
+ return cidr_ip
+
+
+def update_tags(client, module, group_id, current_tags, tags, purge_tags):
+ tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags)
+
+ if not module.check_mode:
+ if tags_to_delete:
+ try:
+ client.delete_tags(Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete))
+
+ # Add/update tags
+ if tags_need_modify:
+ try:
+ client.create_tags(Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json(e, msg="Unable to add tags {0}".format(tags_need_modify))
+
+ return bool(tags_need_modify or tags_to_delete)
+
+
+def update_rule_descriptions(module, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list):
+ changed = False
+ client = module.client('ec2')
+ ingress_needs_desc_update = []
+ egress_needs_desc_update = []
+
+ for present_rule in present_egress:
+ needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
+ for r in needs_update:
+ named_tuple_egress_list.remove(r)
+ egress_needs_desc_update.extend(needs_update)
+ for present_rule in present_ingress:
+ needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description]
+ for r in needs_update:
+ named_tuple_ingress_list.remove(r)
+ ingress_needs_desc_update.extend(needs_update)
+
+ if ingress_needs_desc_update:
+ update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update))
+ changed |= True
+ if egress_needs_desc_update:
+ update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update))
+ changed |= True
+ return changed
+
+
+def create_security_group(client, module, name, description, vpc_id):
+ if not module.check_mode:
+ params = dict(GroupName=name, Description=description)
+ if vpc_id:
+ params['VpcId'] = vpc_id
+ try:
+ group = client.create_security_group(**params)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to create security group")
+ # When a group is created, an egress_rule ALLOW ALL
+ # to 0.0.0.0/0 is added automatically but it's not
+ # reflected in the object returned by the AWS API
+ # call. We re-read the group for getting an updated object
+ # amazon sometimes takes a couple seconds to update the security group so wait till it exists
+ while True:
+ sleep(3)
+ group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ if group.get('VpcId') and not group.get('IpPermissionsEgress'):
+ pass
+ else:
+ break
+ return group
+ return None
+
+
+def wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_ingress, purge_egress):
+ group_id = group['GroupId']
+ tries = 6
+
+ def await_rules(group, desired_rules, purge, rule_key):
+ for i in range(tries):
+ current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], []))
+ if purge and len(current_rules ^ set(desired_rules)) == 0:
+ return group
+ elif purge:
+ conflicts = current_rules ^ set(desired_rules)
+ # For cases where set comparison is equivalent, but invalid port/proto exist
+ for a, b in itertools.combinations(conflicts, 2):
+ if rule_cmp(a, b):
+ conflicts.discard(a)
+ conflicts.discard(b)
+ if not len(conflicts):
+ return group
+ elif current_rules.issuperset(desired_rules) and not purge:
+ return group
+ sleep(10)
+ group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
+ module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules))
+ return group
+
+ group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0]
+ if 'VpcId' in group and module.params.get('rules_egress') is not None:
+ group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress')
+ return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions')
+
+
+def group_exists(client, module, vpc_id, group_id, name):
+ params = {'Filters': []}
+ if group_id:
+ params['GroupIds'] = [group_id]
+ if name:
+ # Add name to filters rather than params['GroupNames']
+ # because params['GroupNames'] only checks the default vpc if no vpc is provided
+ params['Filters'].append({'Name': 'group-name', 'Values': [name]})
+ if vpc_id:
+ params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]})
+ # Don't filter by description to maintain backwards compatibility
+
+ try:
+ security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', [])
+ all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', [])
+ except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
+ module.fail_json_aws(e, msg="Error in describe_security_groups")
+
+ if security_groups:
+ groups = dict((group['GroupId'], group) for group in all_groups)
+ groups.update(dict((group['GroupName'], group) for group in all_groups))
+ if vpc_id:
+ vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id)
+ groups.update(vpc_wins)
+ # maintain backwards compatibility by using the last matching group
+ return security_groups[-1], groups
+ return None, {}
+
+
+def verify_rules_with_descriptions_permitted(client, module, rules, rules_egress):
+ if not hasattr(client, "update_security_group_rule_descriptions_egress"):
+ all_rules = rules if rules else [] + rules_egress if rules_egress else []
+ if any('rule_desc' in rule for rule in all_rules):
+ module.fail_json(msg="Using rule descriptions requires botocore version >= 1.7.2.")
+
+
+def get_diff_final_resource(client, module, security_group):
+ def get_account_id(security_group, module):
+ try:
+ owner_id = security_group.get('owner_id', module.client('sts').get_caller_identity()['Account'])
+ except (BotoCoreError, ClientError) as e:
+ owner_id = "Unable to determine owner_id: {0}".format(to_text(e))
+ return owner_id
+
+ def get_final_tags(security_group_tags, specified_tags, purge_tags):
+ if specified_tags is None:
+ return security_group_tags
+ tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags)
+ end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete)
+ end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete))
+ end_result_tags.update(tags_need_modify)
+ return end_result_tags
+
+ def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules):
+ if specified_rules is None:
+ return security_group_rules
+ if purge_rules:
+ final_rules = []
+ else:
+ final_rules = list(security_group_rules)
+ specified_rules = flatten_nested_targets(module, deepcopy(specified_rules))
+ for rule in specified_rules:
+ format_rule = {
+ 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'),
+ 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': []
+ }
+ if rule.get('proto', 'tcp') in ('all', '-1', -1):
+ format_rule['ip_protocol'] = '-1'
+ format_rule.pop('from_port')
+ format_rule.pop('to_port')
+ elif rule.get('ports'):
+ if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)):
+ rule['ports'] = [rule['ports']]
+ for port in rule.get('ports'):
+ if isinstance(port, string_types) and '-' in port:
+ format_rule['from_port'], format_rule['to_port'] = port.split('-')
+ else:
+ format_rule['from_port'] = format_rule['to_port'] = port
+ elif rule.get('from_port') or rule.get('to_port'):
+ format_rule['from_port'] = rule.get('from_port', rule.get('to_port'))
+ format_rule['to_port'] = rule.get('to_port', rule.get('from_port'))
+ for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'):
+ if rule.get(source_type):
+ rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type)
+ if rule.get('rule_desc'):
+ format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}]
+ else:
+ if not isinstance(rule[source_type], list):
+ rule[source_type] = [rule[source_type]]
+ format_rule[rule_key] = [{source_type: target} for target in rule[source_type]]
+ if rule.get('group_id') or rule.get('group_name'):
+ rule_sg = camel_dict_to_snake_dict(group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0])
+ format_rule['user_id_group_pairs'] = [{
+ 'description': rule_sg.get('description', rule_sg.get('group_desc')),
+ 'group_id': rule_sg.get('group_id', rule.get('group_id')),
+ 'group_name': rule_sg.get('group_name', rule.get('group_name')),
+ 'peering_status': rule_sg.get('peering_status'),
+ 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)),
+ 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']),
+ 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id')
+ }]
+ for k, v in list(format_rule['user_id_group_pairs'][0].items()):
+ if v is None:
+ format_rule['user_id_group_pairs'][0].pop(k)
+ final_rules.append(format_rule)
+ # Order final rules consistently
+ final_rules.sort(key=get_ip_permissions_sort_key)
+ return final_rules
+ security_group_ingress = security_group.get('ip_permissions', [])
+ specified_ingress = module.params['rules']
+ purge_ingress = module.params['purge_rules']
+ security_group_egress = security_group.get('ip_permissions_egress', [])
+ specified_egress = module.params['rules_egress']
+ purge_egress = module.params['purge_rules_egress']
+ return {
+ 'description': module.params['description'],
+ 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'),
+ 'group_name': security_group.get('group_name', module.params['name']),
+ 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress),
+ 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress),
+ 'owner_id': get_account_id(security_group, module),
+ 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']),
+ 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])}
+
+
+def flatten_nested_targets(module, rules):
+ def _flatten(targets):
+ for target in targets:
+ if isinstance(target, list):
+ for t in _flatten(target):
+ yield t
+ elif isinstance(target, string_types):
+ yield target
+
+ if rules is not None:
+ for rule in rules:
+ target_list_type = None
+ if isinstance(rule.get('cidr_ip'), list):
+ target_list_type = 'cidr_ip'
+ elif isinstance(rule.get('cidr_ipv6'), list):
+ target_list_type = 'cidr_ipv6'
+ if target_list_type is not None:
+ rule[target_list_type] = list(_flatten(rule[target_list_type]))
+ return rules
+
+
+def get_rule_sort_key(dicts):
+ if dicts.get('cidr_ip'):
+ return dicts.get('cidr_ip')
+ elif dicts.get('cidr_ipv6'):
+ return dicts.get('cidr_ipv6')
+ elif dicts.get('prefix_list_id'):
+ return dicts.get('prefix_list_id')
+ elif dicts.get('group_id'):
+ return dicts.get('group_id')
+ return None
+
+
+def get_ip_permissions_sort_key(rule):
+ if rule.get('ip_ranges'):
+ rule.get('ip_ranges').sort(key=get_rule_sort_key)
+ return rule.get('ip_ranges')[0]['cidr_ip']
+ elif rule.get('ipv6_ranges'):
+ rule.get('ipv6_ranges').sort(key=get_rule_sort_key)
+ return rule.get('ipv6_ranges')[0]['cidr_ipv6']
+ elif rule.get('prefix_list_ids'):
+ rule.get('prefix_list_ids').sort(key=get_rule_sort_key)
+ return rule.get('prefix_list_ids')[0]['prefix_list_id']
+ elif rule.get('user_id_group_pairs'):
+ rule.get('user_id_group_pairs').sort(key=get_rule_sort_key)
+ return rule.get('user_id_group_pairs')[0]['group_id']
+ return None
+
+
+def main():
+ argument_spec = dict(
+ name=dict(),
+ group_id=dict(),
+ description=dict(),
+ vpc_id=dict(),
+ rules=dict(type='list'),
+ rules_egress=dict(type='list'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ purge_rules=dict(default=True, required=False, type='bool'),
+ purge_rules_egress=dict(default=True, required=False, type='bool'),
+ tags=dict(required=False, type='dict', aliases=['resource_tags']),
+ purge_tags=dict(default=True, required=False, type='bool')
+ )
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[['name', 'group_id']],
+ required_if=[['state', 'present', ['name']]],
+ )
+
+ name = module.params['name']
+ group_id = module.params['group_id']
+ description = module.params['description']
+ vpc_id = module.params['vpc_id']
+ rules = flatten_nested_targets(module, deepcopy(module.params['rules']))
+ rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress']))
+ rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules)))
+ rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress)))
+ state = module.params.get('state')
+ purge_rules = module.params['purge_rules']
+ purge_rules_egress = module.params['purge_rules_egress']
+ tags = module.params['tags']
+ purge_tags = module.params['purge_tags']
+
+ if state == 'present' and not description:
+ module.fail_json(msg='Must provide description when state is present.')
+
+ changed = False
+ client = module.client('ec2')
+
+ verify_rules_with_descriptions_permitted(client, module, rules, rules_egress)
+ group, groups = group_exists(client, module, vpc_id, group_id, name)
+ group_created_new = not bool(group)
+
+ global current_account_id
+ current_account_id = get_aws_account_id(module)
+
+ before = {}
+ after = {}
+
+ # Ensure requested group is absent
+ if state == 'absent':
+ if group:
+ # found a match, delete it
+ before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
+ before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
+ try:
+ if not module.check_mode:
+ client.delete_security_group(GroupId=group['GroupId'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group)
+ else:
+ group = None
+ changed = True
+ else:
+ # no match found, no changes required
+ pass
+
+ # Ensure requested group is present
+ elif state == 'present':
+ if group:
+ # existing group
+ before = camel_dict_to_snake_dict(group, ignore_list=['Tags'])
+ before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', []))
+ if group['Description'] != description:
+ module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting "
+ "and re-creating the security group. Try using state=absent to delete, then rerunning this task.")
+ else:
+ # no match found, create it
+ group = create_security_group(client, module, name, description, vpc_id)
+ changed = True
+
+ if tags is not None and group is not None:
+ current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', []))
+ changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags)
+
+ if group:
+ named_tuple_ingress_list = []
+ named_tuple_egress_list = []
+ current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], [])
+ current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], [])
+
+ for new_rules, rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list),
+ (rules_egress, 'out', named_tuple_egress_list)]:
+ if new_rules is None:
+ continue
+ for rule in new_rules:
+ target_type, target, target_group_created = get_target_from_rule(
+ module, client, rule, name, group, groups, vpc_id)
+ changed |= target_group_created
+
+ if rule.get('proto', 'tcp') in ('all', '-1', -1):
+ rule['proto'] = '-1'
+ rule['from_port'] = None
+ rule['to_port'] = None
+ try:
+ int(rule.get('proto', 'tcp'))
+ rule['proto'] = to_text(rule.get('proto', 'tcp'))
+ rule['from_port'] = None
+ rule['to_port'] = None
+ except ValueError:
+ # rule does not use numeric protocol spec
+ pass
+
+ named_tuple_rule_list.append(
+ Rule(
+ port_range=(rule['from_port'], rule['to_port']),
+ protocol=to_text(rule.get('proto', 'tcp')),
+ target=target, target_type=target_type,
+ description=rule.get('rule_desc'),
+ )
+ )
+
+ # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
+ new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
+ new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))]
+
+ if module.params.get('rules_egress') is None and 'VpcId' in group:
+ # when no egress rules are specified and we're in a VPC,
+ # we add in a default allow all out rule, which was the
+ # default behavior before egress rules were added
+ rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
+ if rule in current_egress:
+ named_tuple_egress_list.append(rule)
+ if rule not in current_egress:
+ current_egress.append(rule)
+
+ # List comprehensions for rules to add, rules to modify, and rule ids to determine purging
+ present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress)))
+ present_egress = list(set(named_tuple_egress_list).union(set(current_egress)))
+
+ if purge_rules:
+ revoke_ingress = []
+ for p in present_ingress:
+ if not any([rule_cmp(p, b) for b in named_tuple_ingress_list]):
+ revoke_ingress.append(to_permission(p))
+ else:
+ revoke_ingress = []
+ if purge_rules_egress and module.params.get('rules_egress') is not None:
+ if module.params.get('rules_egress') is []:
+ revoke_egress = [
+ to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list)
+ if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None)
+ ]
+ else:
+ revoke_egress = []
+ for p in present_egress:
+ if not any([rule_cmp(p, b) for b in named_tuple_egress_list]):
+ revoke_egress.append(to_permission(p))
+ else:
+ revoke_egress = []
+
+ # named_tuple_ingress_list and named_tuple_egress_list got updated by
+ # method update_rule_descriptions, deep copy these two lists to new
+ # variables for the record of the 'desired' ingress and egress sg permissions
+ desired_ingress = deepcopy(named_tuple_ingress_list)
+ desired_egress = deepcopy(named_tuple_egress_list)
+
+ changed |= update_rule_descriptions(module, group['GroupId'], present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list)
+
+ # Revoke old rules
+ changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId'])
+ rule_msg = 'Revoking {0}, and egress {1}'.format(revoke_ingress, revoke_egress)
+
+ new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))]
+ new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress))
+ new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress))
+ # Authorize new rules
+ changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId'])
+
+ if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None:
+ # A new group with no rules provided is already being awaited.
+ # When it is created we wait for the default egress rule to be added by AWS
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ elif changed and not module.check_mode:
+ # keep pulling until current security group rules match the desired ingress and egress rules
+ security_group = wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress)
+ else:
+ security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0]
+ security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags'])
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []))
+
+ else:
+ security_group = {'group_id': None}
+
+ if module._diff:
+ if module.params['state'] == 'present':
+ after = get_diff_final_resource(client, module, security_group)
+ if before.get('ip_permissions'):
+ before['ip_permissions'].sort(key=get_ip_permissions_sort_key)
+
+ security_group['diff'] = [{'before': before, 'after': after}]
+
+ module.exit_json(changed=changed, **security_group)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_instance.py b/test/support/integration/plugins/modules/ec2_instance.py
new file mode 100644
index 0000000000..7a587fb941
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_instance.py
@@ -0,0 +1,1805 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: ec2_instance
+short_description: Create & manage EC2 instances
+description:
+ - Create and manage AWS EC2 instances.
+ - >
+ Note: This module does not support creating
+ L(EC2 Spot instances,https://aws.amazon.com/ec2/spot/). The M(ec2) module
+ can create and manage spot instances.
+version_added: "2.5"
+author:
+ - Ryan Scott Brown (@ryansb)
+requirements: [ "boto3", "botocore" ]
+options:
+ instance_ids:
+ description:
+ - If you specify one or more instance IDs, only instances that have the specified IDs are returned.
+ type: list
+ state:
+ description:
+ - Goal state for the instances.
+ choices: [present, terminated, running, started, stopped, restarted, rebooted, absent]
+ default: present
+ type: str
+ wait:
+ description:
+ - Whether or not to wait for the desired state (use wait_timeout to customize this).
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - How long to wait (in seconds) for the instance to finish booting/terminating.
+ default: 600
+ type: int
+ instance_type:
+ description:
+ - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
+ Only required when instance is not already present.
+ default: t2.micro
+ type: str
+ user_data:
+ description:
+ - Opaque blob of data which is made available to the ec2 instance
+ type: str
+ tower_callback:
+ description:
+ - Preconfigured user-data to enable an instance to perform a Tower callback (Linux only).
+ - Mutually exclusive with I(user_data).
+ - For Windows instances, to enable remote access via Ansible set I(tower_callback.windows) to true, and optionally set an admin password.
+ - If using 'windows' and 'set_password', callback to Tower will not be performed but the instance will be ready to receive winrm connections from Ansible.
+ type: dict
+ suboptions:
+ tower_address:
+ description:
+ - IP address or DNS name of Tower server. Must be accessible via this address from the VPC that this instance will be launched in.
+ type: str
+ job_template_id:
+ description:
+ - Either the integer ID of the Tower Job Template, or the name (name supported only for Tower 3.2+).
+ type: str
+ host_config_key:
+ description:
+ - Host configuration secret key generated by the Tower job template.
+ type: str
+ tags:
+ description:
+ - A hash/dictionary of tags to add to the new instance or to add/remove from an existing one.
+ type: dict
+ purge_tags:
+ description:
+ - Delete any tags not specified in the task that are on the instance.
+ This means you have to specify all the desired tags on each task affecting an instance.
+ default: false
+ type: bool
+ image:
+ description:
+ - An image to use for the instance. The M(ec2_ami_info) module may be used to retrieve images.
+ One of I(image) or I(image_id) are required when instance is not already present.
+ type: dict
+ suboptions:
+ id:
+ description:
+ - The AMI ID.
+ type: str
+ ramdisk:
+ description:
+ - Overrides the AMI's default ramdisk ID.
+ type: str
+ kernel:
+ description:
+ - a string AKI to override the AMI kernel.
+ image_id:
+ description:
+ - I(ami) ID to use for the instance. One of I(image) or I(image_id) are required when instance is not already present.
+ - This is an alias for I(image.id).
+ type: str
+ security_groups:
+ description:
+ - A list of security group IDs or names (strings). Mutually exclusive with I(security_group).
+ type: list
+ security_group:
+ description:
+ - A security group ID or name. Mutually exclusive with I(security_groups).
+ type: str
+ name:
+ description:
+ - The Name tag for the instance.
+ type: str
+ vpc_subnet_id:
+ description:
+ - The subnet ID in which to launch the instance (VPC)
+ If none is provided, ec2_instance will chose the default zone of the default VPC.
+ aliases: ['subnet_id']
+ type: str
+ network:
+ description:
+ - Either a dictionary containing the key 'interfaces' corresponding to a list of network interface IDs or
+ containing specifications for a single network interface.
+ - Use the ec2_eni module to create ENIs with special settings.
+ type: dict
+ suboptions:
+ interfaces:
+ description:
+ - a list of ENI IDs (strings) or a list of objects containing the key I(id).
+ type: list
+ assign_public_ip:
+ description:
+ - when true assigns a public IP address to the interface
+ type: bool
+ private_ip_address:
+ description:
+ - an IPv4 address to assign to the interface
+ type: str
+ ipv6_addresses:
+ description:
+ - a list of IPv6 addresses to assign to the network interface
+ type: list
+ source_dest_check:
+ description:
+ - controls whether source/destination checking is enabled on the interface
+ type: bool
+ description:
+ description:
+ - a description for the network interface
+ type: str
+ private_ip_addresses:
+ description:
+ - a list of IPv4 addresses to assign to the network interface
+ type: list
+ subnet_id:
+ description:
+ - the subnet to connect the network interface to
+ type: str
+ delete_on_termination:
+ description:
+ - Delete the interface when the instance it is attached to is
+ terminated.
+ type: bool
+ device_index:
+ description:
+ - The index of the interface to modify
+ type: int
+ groups:
+ description:
+ - a list of security group IDs to attach to the interface
+ type: list
+ volumes:
+ description:
+ - A list of block device mappings, by default this will always use the AMI root device so the volumes option is primarily for adding more storage.
+ - A mapping contains the (optional) keys device_name, virtual_name, ebs.volume_type, ebs.volume_size, ebs.kms_key_id,
+ ebs.iops, and ebs.delete_on_termination.
+ - For more information about each parameter, see U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html).
+ type: list
+ launch_template:
+ description:
+ - The EC2 launch template to base instance configuration on.
+ type: dict
+ suboptions:
+ id:
+ description:
+ - the ID of the launch template (optional if name is specified).
+ type: str
+ name:
+ description:
+ - the pretty name of the launch template (optional if id is specified).
+ type: str
+ version:
+ description:
+ - the specific version of the launch template to use. If unspecified, the template default is chosen.
+ key_name:
+ description:
+ - Name of the SSH access key to assign to the instance - must exist in the region the instance is created.
+ type: str
+ availability_zone:
+ description:
+ - Specify an availability zone to use the default subnet it. Useful if not specifying the I(vpc_subnet_id) parameter.
+ - If no subnet, ENI, or availability zone is provided, the default subnet in the default VPC will be used in the first AZ (alphabetically sorted).
+ type: str
+ instance_initiated_shutdown_behavior:
+ description:
+ - Whether to stop or terminate an instance upon shutdown.
+ choices: ['stop', 'terminate']
+ type: str
+ tenancy:
+ description:
+ - What type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges.
+ choices: ['dedicated', 'default']
+ type: str
+ termination_protection:
+ description:
+ - Whether to enable termination protection.
+ This module will not terminate an instance with termination protection active, it must be turned off first.
+ type: bool
+ cpu_credit_specification:
+ description:
+ - For T series instances, choose whether to allow increased charges to buy CPU credits if the default pool is depleted.
+ - Choose I(unlimited) to enable buying additional CPU credits.
+ choices: ['unlimited', 'standard']
+ type: str
+ cpu_options:
+ description:
+ - Reduce the number of vCPU exposed to the instance.
+ - Those parameters can only be set at instance launch. The two suboptions threads_per_core and core_count are mandatory.
+ - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for combinations available.
+ - Requires botocore >= 1.10.16
+ version_added: 2.7
+ type: dict
+ suboptions:
+ threads_per_core:
+ description:
+ - Select the number of threads per core to enable. Disable or Enable Intel HT.
+ choices: [1, 2]
+ required: true
+ type: int
+ core_count:
+ description:
+ - Set the number of core to enable.
+ required: true
+ type: int
+ detailed_monitoring:
+ description:
+ - Whether to allow detailed cloudwatch metrics to be collected, enabling more detailed alerting.
+ type: bool
+ ebs_optimized:
+ description:
+ - Whether instance is should use optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
+ type: bool
+ filters:
+ description:
+ - A dict of filters to apply when deciding whether existing instances match and should be altered. Each dict item
+ consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html).
+ for possible filters. Filter names and values are case sensitive.
+ - By default, instances are filtered for counting by their "Name" tag, base AMI, state (running, by default), and
+ subnet ID. Any queryable filter can be used. Good candidates are specific tags, SSH keys, or security groups.
+ type: dict
+ instance_role:
+ description:
+ - The ARN or name of an EC2-enabled instance role to be used. If a name is not provided in arn format
+ then the ListInstanceProfiles permission must also be granted.
+ U(https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListInstanceProfiles.html) If no full ARN is provided,
+ the role with a matching name will be used from the active AWS account.
+ type: str
+ placement_group:
+ description:
+ - The placement group that needs to be assigned to the instance
+ version_added: 2.8
+ type: str
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Terminate every running instance in a region. Use with EXTREME caution.
+- ec2_instance:
+ state: absent
+ filters:
+ instance-state-name: running
+
+# restart a particular instance by its ID
+- ec2_instance:
+ state: restarted
+ instance_ids:
+ - i-12345678
+
+# start an instance with a public IP address
+- ec2_instance:
+ name: "public-compute-instance"
+ key_name: "prod-ssh-key"
+ vpc_subnet_id: subnet-5ca1ab1e
+ instance_type: c5.large
+ security_group: default
+ network:
+ assign_public_ip: true
+ image_id: ami-123456
+ tags:
+ Environment: Testing
+
+# start an instance and Add EBS
+- ec2_instance:
+ name: "public-withebs-instance"
+ vpc_subnet_id: subnet-5ca1ab1e
+ instance_type: t2.micro
+ key_name: "prod-ssh-key"
+ security_group: default
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ volume_size: 16
+ delete_on_termination: true
+
+# start an instance with a cpu_options
+- ec2_instance:
+ name: "public-cpuoption-instance"
+ vpc_subnet_id: subnet-5ca1ab1e
+ tags:
+ Environment: Testing
+ instance_type: c4.large
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ cpu_options:
+ core_count: 1
+ threads_per_core: 1
+
+# start an instance and have it begin a Tower callback on boot
+- ec2_instance:
+ name: "tower-callback-test"
+ key_name: "prod-ssh-key"
+ vpc_subnet_id: subnet-5ca1ab1e
+ security_group: default
+ tower_callback:
+ # IP or hostname of tower server
+ tower_address: 1.2.3.4
+ job_template_id: 876
+ host_config_key: '[secret config key goes here]'
+ network:
+ assign_public_ip: true
+ image_id: ami-123456
+ cpu_credit_specification: unlimited
+ tags:
+ SomeThing: "A value"
+
+# start an instance with ENI (An existing ENI ID is required)
+- ec2_instance:
+ name: "public-eni-instance"
+ key_name: "prod-ssh-key"
+ vpc_subnet_id: subnet-5ca1ab1e
+ network:
+ interfaces:
+ - id: "eni-12345"
+ tags:
+ Env: "eni_on"
+ volumes:
+ - device_name: /dev/sda1
+ ebs:
+ delete_on_termination: true
+ instance_type: t2.micro
+ image_id: ami-123456
+
+# add second ENI interface
+- ec2_instance:
+ name: "public-eni-instance"
+ network:
+ interfaces:
+ - id: "eni-12345"
+ - id: "eni-67890"
+ image_id: ami-123456
+ tags:
+ Env: "eni_on"
+ instance_type: t2.micro
+'''
+
+RETURN = '''
+instances:
+ description: a list of ec2 instances
+ returned: when wait == true
+ type: complex
+ contains:
+ ami_launch_index:
+ description: The AMI launch index, which can be used to find this instance in the launch group.
+ returned: always
+ type: int
+ sample: 0
+ architecture:
+ description: The architecture of the image
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/sdh or xvdh).
+ returned: always
+ type: str
+ sample: /dev/sdh
+ ebs:
+ description: Parameters used to automatically set up EBS volumes when the instance is launched.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ volume_id:
+ description: The ID of the EBS volume
+ returned: always
+ type: str
+ sample: vol-12345678
+ client_token:
+ description: The idempotency token you provided when you launched the instance, if applicable.
+ returned: always
+ type: str
+ sample: mytoken
+ ebs_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ hypervisor:
+ description: The hypervisor type of the instance.
+ returned: always
+ type: str
+ sample: xen
+ iam_instance_profile:
+ description: The IAM instance profile associated with the instance, if applicable.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: The Amazon Resource Name (ARN) of the instance profile.
+ returned: always
+ type: str
+ sample: "arn:aws:iam::000012345678:instance-profile/myprofile"
+ id:
+ description: The ID of the instance profile
+ returned: always
+ type: str
+ sample: JFJ397FDG400FG9FD1N
+ image_id:
+ description: The ID of the AMI used to launch the instance.
+ returned: always
+ type: str
+ sample: ami-0011223344
+ instance_id:
+ description: The ID of the instance.
+ returned: always
+ type: str
+ sample: i-012345678
+ instance_type:
+ description: The instance type size of the running instance.
+ returned: always
+ type: str
+ sample: t2.micro
+ key_name:
+ description: The name of the key pair, if this instance was launched with an associated key pair.
+ returned: always
+ type: str
+ sample: my-key
+ launch_time:
+ description: The time the instance was launched.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ monitoring:
+ description: The monitoring for the instance.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
+ returned: always
+ type: str
+ sample: disabled
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: The association information for an Elastic IPv4 associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ attachment:
+ description: The network interface attachment.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ attachment_id:
+ description: The ID of the network interface attachment.
+ returned: always
+ type: str
+ sample: eni-attach-3aff3f
+ delete_on_termination:
+ description: Indicates whether the network interface is deleted when the instance is terminated.
+ returned: always
+ type: bool
+ sample: true
+ device_index:
+ description: The index of the device on the instance for the network interface attachment.
+ returned: always
+ type: int
+ sample: 0
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ description:
+ description: The description.
+ returned: always
+ type: str
+ sample: My interface
+ groups:
+ description: One or more security groups.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-abcdef12
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: mygroup
+ ipv6_addresses:
+ description: One or more IPv6 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ipv6_address:
+ description: The IPv6 address.
+ returned: always
+ type: str
+ sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ owner_id:
+ description: The AWS account ID of the owner of the network interface.
+ returned: always
+ type: str
+ sample: 01234567890
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ private_ip_addresses:
+ description: The private IPv4 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ association:
+ description: The association information for an Elastic IP address (IPv4) associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ primary:
+ description: Indicates whether this IPv4 address is the primary private IP address of the network interface.
+ returned: always
+ type: bool
+ sample: true
+ private_ip_address:
+ description: The private IPv4 address of the network interface.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The status of the network interface.
+ returned: always
+ type: str
+ sample: in-use
+ subnet_id:
+ description: The ID of the subnet for the network interface.
+ returned: always
+ type: str
+ sample: subnet-0123456
+ vpc_id:
+ description: The ID of the VPC for the network interface.
+ returned: always
+ type: str
+ sample: vpc-0123456
+ placement:
+ description: The location where the instance launched, if applicable.
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The Availability Zone of the instance.
+ returned: always
+ type: str
+ sample: ap-southeast-2a
+ group_name:
+ description: The name of the placement group the instance is in (for cluster compute instances).
+ returned: always
+ type: str
+ sample: ""
+ tenancy:
+ description: The tenancy of the instance (if the instance is running in a VPC).
+ returned: always
+ type: str
+ sample: default
+ private_dns_name:
+ description: The private DNS name.
+ returned: always
+ type: str
+ sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ product_codes:
+ description: One or more product codes.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ product_code_id:
+ description: The product code.
+ returned: always
+ type: str
+ sample: aw0evgkw8ef3n2498gndfgasdfsd5cce
+ product_code_type:
+ description: The type of product code.
+ returned: always
+ type: str
+ sample: marketplace
+ public_dns_name:
+ description: The public DNS name assigned to the instance.
+ returned: always
+ type: str
+ sample:
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance
+ returned: always
+ type: str
+ sample: 52.0.0.1
+ root_device_name:
+ description: The device name of the root device
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ network.source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ state:
+ description: The current state of the instance.
+ returned: always
+ type: complex
+ contains:
+ code:
+ description: The low byte represents the state.
+ returned: always
+ type: int
+ sample: 16
+ name:
+ description: The name of the state.
+ returned: always
+ type: str
+ sample: running
+ state_transition_reason:
+ description: The reason for the most recent state transition.
+ returned: always
+ type: str
+ sample:
+ subnet_id:
+ description: The ID of the subnet in which the instance is running.
+ returned: always
+ type: str
+ sample: subnet-00abcdef
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: dict
+ sample: vpc-0011223344
+'''
+
+import re
+import uuid
+import string
+import textwrap
+import time
+from collections import namedtuple
+
+try:
+ import boto3
+ import botocore.exceptions
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.six import text_type, string_types
+from ansible.module_utils.six.moves.urllib import parse as urlparse
+from ansible.module_utils._text import to_bytes, to_native
+import ansible.module_utils.ec2 as ec2_utils
+from ansible.module_utils.ec2 import (AWSRetry,
+ ansible_dict_to_boto3_filter_list,
+ compare_aws_tags,
+ boto3_tag_list_to_ansible_dict,
+ ansible_dict_to_boto3_tag_list,
+ camel_dict_to_snake_dict)
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+
+module = None
+
+
+def tower_callback_script(tower_conf, windows=False, passwd=None):
+ script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'
+ if windows and passwd is not None:
+ script_tpl = """<powershell>
+ $admin = [adsi]("WinNT://./administrator, user")
+ $admin.PSBase.Invoke("SetPassword", "{PASS}")
+ Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}'))
+ </powershell>
+ """
+ return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url))
+ elif windows and passwd is None:
+ script_tpl = """<powershell>
+ $admin = [adsi]("WinNT://./administrator, user")
+ Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}'))
+ </powershell>
+ """
+ return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url))
+ elif not windows:
+ for p in ['tower_address', 'job_template_id', 'host_config_key']:
+ if p not in tower_conf:
+ module.fail_json(msg="Incomplete tower_callback configuration. tower_callback.{0} not set.".format(p))
+
+ if isinstance(tower_conf['job_template_id'], string_types):
+ tower_conf['job_template_id'] = urlparse.quote(tower_conf['job_template_id'])
+ tpl = string.Template(textwrap.dedent("""#!/bin/bash
+ set -x
+
+ retry_attempts=10
+ attempt=0
+ while [[ $attempt -lt $retry_attempts ]]
+ do
+ status_code=`curl --max-time 10 -v -k -s -i \
+ --data "host_config_key=${host_config_key}" \
+ 'https://${tower_address}/api/v2/job_templates/${template_id}/callback/' \
+ | head -n 1 \
+ | awk '{print $2}'`
+ if [[ $status_code == 404 ]]
+ then
+ status_code=`curl --max-time 10 -v -k -s -i \
+ --data "host_config_key=${host_config_key}" \
+ 'https://${tower_address}/api/v1/job_templates/${template_id}/callback/' \
+ | head -n 1 \
+ | awk '{print $2}'`
+ # fall back to using V1 API for Tower 3.1 and below, since v2 API will always 404
+ fi
+ if [[ $status_code == 201 ]]
+ then
+ exit 0
+ fi
+ attempt=$(( attempt + 1 ))
+ echo "$${status_code} received... retrying in 1 minute. (Attempt $${attempt})"
+ sleep 60
+ done
+ exit 1
+ """))
+ return tpl.safe_substitute(tower_address=tower_conf['tower_address'],
+ template_id=tower_conf['job_template_id'],
+ host_config_key=tower_conf['host_config_key'])
+ raise NotImplementedError("Only windows with remote-prep or non-windows with tower job callback supported so far.")
+
+
+@AWSRetry.jittered_backoff()
+def manage_tags(match, new_tags, purge_tags, ec2):
+ changed = False
+ old_tags = boto3_tag_list_to_ansible_dict(match['Tags'])
+ tags_to_set, tags_to_delete = compare_aws_tags(
+ old_tags, new_tags,
+ purge_tags=purge_tags,
+ )
+ if tags_to_set:
+ ec2.create_tags(
+ Resources=[match['InstanceId']],
+ Tags=ansible_dict_to_boto3_tag_list(tags_to_set))
+ changed |= True
+ if tags_to_delete:
+ delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete)
+ ec2.delete_tags(
+ Resources=[match['InstanceId']],
+ Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values))
+ changed |= True
+ return changed
+
+
+def build_volume_spec(params):
+ volumes = params.get('volumes') or []
+ for volume in volumes:
+ if 'ebs' in volume:
+ for int_value in ['volume_size', 'iops']:
+ if int_value in volume['ebs']:
+ volume['ebs'][int_value] = int(volume['ebs'][int_value])
+ return [ec2_utils.snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes]
+
+
+def add_or_update_instance_profile(instance, desired_profile_name):
+ instance_profile_setting = instance.get('IamInstanceProfile')
+ if instance_profile_setting and desired_profile_name:
+ if desired_profile_name in (instance_profile_setting.get('Name'), instance_profile_setting.get('Arn')):
+ # great, the profile we asked for is what's there
+ return False
+ else:
+ desired_arn = determine_iam_role(desired_profile_name)
+ if instance_profile_setting.get('Arn') == desired_arn:
+ return False
+ # update association
+ ec2 = module.client('ec2')
+ try:
+ association = ec2.describe_iam_instance_profile_associations(Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}])
+ except botocore.exceptions.ClientError as e:
+ # check for InvalidAssociationID.NotFound
+ module.fail_json_aws(e, "Could not find instance profile association")
+ try:
+ resp = ec2.replace_iam_instance_profile_association(
+ AssociationId=association['IamInstanceProfileAssociations'][0]['AssociationId'],
+ IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)}
+ )
+ return True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, "Could not associate instance profile")
+
+ if not instance_profile_setting and desired_profile_name:
+ # create association
+ ec2 = module.client('ec2')
+ try:
+ resp = ec2.associate_iam_instance_profile(
+ IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)},
+ InstanceId=instance['InstanceId']
+ )
+ return True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, "Could not associate new instance profile")
+
+ return False
+
+
+def build_network_spec(params, ec2=None):
+ """
+ Returns list of interfaces [complex]
+ Interface type: {
+ 'AssociatePublicIpAddress': True|False,
+ 'DeleteOnTermination': True|False,
+ 'Description': 'string',
+ 'DeviceIndex': 123,
+ 'Groups': [
+ 'string',
+ ],
+ 'Ipv6AddressCount': 123,
+ 'Ipv6Addresses': [
+ {
+ 'Ipv6Address': 'string'
+ },
+ ],
+ 'NetworkInterfaceId': 'string',
+ 'PrivateIpAddress': 'string',
+ 'PrivateIpAddresses': [
+ {
+ 'Primary': True|False,
+ 'PrivateIpAddress': 'string'
+ },
+ ],
+ 'SecondaryPrivateIpAddressCount': 123,
+ 'SubnetId': 'string'
+ },
+ """
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ interfaces = []
+ network = params.get('network') or {}
+ if not network.get('interfaces'):
+ # they only specified one interface
+ spec = {
+ 'DeviceIndex': 0,
+ }
+ if network.get('assign_public_ip') is not None:
+ spec['AssociatePublicIpAddress'] = network['assign_public_ip']
+
+ if params.get('vpc_subnet_id'):
+ spec['SubnetId'] = params['vpc_subnet_id']
+ else:
+ default_vpc = get_default_vpc(ec2)
+ if default_vpc is None:
+ raise module.fail_json(
+ msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to create an instance")
+ else:
+ sub = get_default_subnet(ec2, default_vpc)
+ spec['SubnetId'] = sub['SubnetId']
+
+ if network.get('private_ip_address'):
+ spec['PrivateIpAddress'] = network['private_ip_address']
+
+ if params.get('security_group') or params.get('security_groups'):
+ groups = discover_security_groups(
+ group=params.get('security_group'),
+ groups=params.get('security_groups'),
+ subnet_id=spec['SubnetId'],
+ ec2=ec2
+ )
+ spec['Groups'] = [g['GroupId'] for g in groups]
+ if network.get('description') is not None:
+ spec['Description'] = network['description']
+ # TODO more special snowflake network things
+
+ return [spec]
+
+ # handle list of `network.interfaces` options
+ for idx, interface_params in enumerate(network.get('interfaces', [])):
+ spec = {
+ 'DeviceIndex': idx,
+ }
+
+ if isinstance(interface_params, string_types):
+ # naive case where user gave
+ # network_interfaces: [eni-1234, eni-4567, ....]
+ # put into normal data structure so we don't dupe code
+ interface_params = {'id': interface_params}
+
+ if interface_params.get('id') is not None:
+ # if an ID is provided, we don't want to set any other parameters.
+ spec['NetworkInterfaceId'] = interface_params['id']
+ interfaces.append(spec)
+ continue
+
+ spec['DeleteOnTermination'] = interface_params.get('delete_on_termination', True)
+
+ if interface_params.get('ipv6_addresses'):
+ spec['Ipv6Addresses'] = [{'Ipv6Address': a} for a in interface_params.get('ipv6_addresses', [])]
+
+ if interface_params.get('private_ip_address'):
+ spec['PrivateIpAddress'] = interface_params.get('private_ip_address')
+
+ if interface_params.get('description'):
+ spec['Description'] = interface_params.get('description')
+
+ if interface_params.get('subnet_id', params.get('vpc_subnet_id')):
+ spec['SubnetId'] = interface_params.get('subnet_id', params.get('vpc_subnet_id'))
+ elif not spec.get('SubnetId') and not interface_params['id']:
+ # TODO grab a subnet from default VPC
+ raise ValueError('Failed to assign subnet to interface {0}'.format(interface_params))
+
+ interfaces.append(spec)
+ return interfaces
+
+
+def warn_if_public_ip_assignment_changed(instance):
+ # This is a non-modifiable attribute.
+ assign_public_ip = (module.params.get('network') or {}).get('assign_public_ip')
+ if assign_public_ip is None:
+ return
+
+ # Check that public ip assignment is the same and warn if not
+ public_dns_name = instance.get('PublicDnsName')
+ if (public_dns_name and not assign_public_ip) or (assign_public_ip and not public_dns_name):
+ module.warn(
+ "Unable to modify public ip assignment to {0} for instance {1}. "
+ "Whether or not to assign a public IP is determined during instance creation.".format(
+ assign_public_ip, instance['InstanceId']))
+
+
+def warn_if_cpu_options_changed(instance):
+ # This is a non-modifiable attribute.
+ cpu_options = module.params.get('cpu_options')
+ if cpu_options is None:
+ return
+
+ # Check that the CpuOptions set are the same and warn if not
+ core_count_curr = instance['CpuOptions'].get('CoreCount')
+ core_count = cpu_options.get('core_count')
+ threads_per_core_curr = instance['CpuOptions'].get('ThreadsPerCore')
+ threads_per_core = cpu_options.get('threads_per_core')
+ if core_count_curr != core_count:
+ module.warn(
+ "Unable to modify core_count from {0} to {1}. "
+ "Assigning a number of core is determinted during instance creation".format(
+ core_count_curr, core_count))
+
+ if threads_per_core_curr != threads_per_core:
+ module.warn(
+ "Unable to modify threads_per_core from {0} to {1}. "
+ "Assigning a number of threads per core is determined during instance creation.".format(
+ threads_per_core_curr, threads_per_core))
+
+
+def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None, ec2=None):
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ if subnet_id is not None:
+ try:
+ sub = ec2.describe_subnets(SubnetIds=[subnet_id])
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidGroup.NotFound':
+ module.fail_json(
+ "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format(
+ subnet_id
+ )
+ )
+ module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id))
+ except botocore.exceptions.BotoCoreError as e:
+ module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id))
+ parent_vpc_id = sub['Subnets'][0]['VpcId']
+
+ vpc = {
+ 'Name': 'vpc-id',
+ 'Values': [parent_vpc_id]
+ }
+
+ # because filter lists are AND in the security groups API,
+ # make two separate requests for groups by ID and by name
+ id_filters = [vpc]
+ name_filters = [vpc]
+
+ if group:
+ name_filters.append(
+ dict(
+ Name='group-name',
+ Values=[group]
+ )
+ )
+ if group.startswith('sg-'):
+ id_filters.append(
+ dict(
+ Name='group-id',
+ Values=[group]
+ )
+ )
+ if groups:
+ name_filters.append(
+ dict(
+ Name='group-name',
+ Values=groups
+ )
+ )
+ if [g for g in groups if g.startswith('sg-')]:
+ id_filters.append(
+ dict(
+ Name='group-id',
+ Values=[g for g in groups if g.startswith('sg-')]
+ )
+ )
+
+ found_groups = []
+ for f_set in (id_filters, name_filters):
+ if len(f_set) > 1:
+ found_groups.extend(ec2.get_paginator(
+ 'describe_security_groups'
+ ).paginate(
+ Filters=f_set
+ ).search('SecurityGroups[]'))
+ return list(dict((g['GroupId'], g) for g in found_groups).values())
+
+
+def build_top_level_options(params):
+ spec = {}
+ if params.get('image_id'):
+ spec['ImageId'] = params['image_id']
+ elif isinstance(params.get('image'), dict):
+ image = params.get('image', {})
+ spec['ImageId'] = image.get('id')
+ if 'ramdisk' in image:
+ spec['RamdiskId'] = image['ramdisk']
+ if 'kernel' in image:
+ spec['KernelId'] = image['kernel']
+ if not spec.get('ImageId') and not params.get('launch_template'):
+ module.fail_json(msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template.")
+
+ if params.get('key_name') is not None:
+ spec['KeyName'] = params.get('key_name')
+ if params.get('user_data') is not None:
+ spec['UserData'] = to_native(params.get('user_data'))
+ elif params.get('tower_callback') is not None:
+ spec['UserData'] = tower_callback_script(
+ tower_conf=params.get('tower_callback'),
+ windows=params.get('tower_callback').get('windows', False),
+ passwd=params.get('tower_callback').get('set_password'),
+ )
+
+ if params.get('launch_template') is not None:
+ spec['LaunchTemplate'] = {}
+ if not params.get('launch_template').get('id') or params.get('launch_template').get('name'):
+ module.fail_json(msg="Could not create instance with launch template. Either launch_template.name or launch_template.id parameters are required")
+
+ if params.get('launch_template').get('id') is not None:
+ spec['LaunchTemplate']['LaunchTemplateId'] = params.get('launch_template').get('id')
+ if params.get('launch_template').get('name') is not None:
+ spec['LaunchTemplate']['LaunchTemplateName'] = params.get('launch_template').get('name')
+ if params.get('launch_template').get('version') is not None:
+ spec['LaunchTemplate']['Version'] = to_native(params.get('launch_template').get('version'))
+
+ if params.get('detailed_monitoring', False):
+ spec['Monitoring'] = {'Enabled': True}
+ if params.get('cpu_credit_specification') is not None:
+ spec['CreditSpecification'] = {'CpuCredits': params.get('cpu_credit_specification')}
+ if params.get('tenancy') is not None:
+ spec['Placement'] = {'Tenancy': params.get('tenancy')}
+ if params.get('placement_group'):
+ if 'Placement' in spec:
+ spec['Placement']['GroupName'] = str(params.get('placement_group'))
+ else:
+ spec.setdefault('Placement', {'GroupName': str(params.get('placement_group'))})
+ if params.get('ebs_optimized') is not None:
+ spec['EbsOptimized'] = params.get('ebs_optimized')
+ if params.get('instance_initiated_shutdown_behavior'):
+ spec['InstanceInitiatedShutdownBehavior'] = params.get('instance_initiated_shutdown_behavior')
+ if params.get('termination_protection') is not None:
+ spec['DisableApiTermination'] = params.get('termination_protection')
+ if params.get('cpu_options') is not None:
+ spec['CpuOptions'] = {}
+ spec['CpuOptions']['ThreadsPerCore'] = params.get('cpu_options').get('threads_per_core')
+ spec['CpuOptions']['CoreCount'] = params.get('cpu_options').get('core_count')
+ return spec
+
+
+def build_instance_tags(params, propagate_tags_to_volumes=True):
+ tags = params.get('tags', {})
+ if params.get('name') is not None:
+ if tags is None:
+ tags = {}
+ tags['Name'] = params.get('name')
+ return [
+ {
+ 'ResourceType': 'volume',
+ 'Tags': ansible_dict_to_boto3_tag_list(tags),
+ },
+ {
+ 'ResourceType': 'instance',
+ 'Tags': ansible_dict_to_boto3_tag_list(tags),
+ },
+ ]
+
+
+def build_run_instance_spec(params, ec2=None):
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ spec = dict(
+ ClientToken=uuid.uuid4().hex,
+ MaxCount=1,
+ MinCount=1,
+ )
+ # network parameters
+ spec['NetworkInterfaces'] = build_network_spec(params, ec2)
+ spec['BlockDeviceMappings'] = build_volume_spec(params)
+ spec.update(**build_top_level_options(params))
+ spec['TagSpecifications'] = build_instance_tags(params)
+
+ # IAM profile
+ if params.get('instance_role'):
+ spec['IamInstanceProfile'] = dict(Arn=determine_iam_role(params.get('instance_role')))
+
+ spec['InstanceType'] = params['instance_type']
+ return spec
+
+
+def await_instances(ids, state='OK'):
+ if not module.params.get('wait', True):
+ # the user asked not to wait for anything
+ return
+
+ if module.check_mode:
+ # In check mode, there is no change even if you wait.
+ return
+
+ state_opts = {
+ 'OK': 'instance_status_ok',
+ 'STOPPED': 'instance_stopped',
+ 'TERMINATED': 'instance_terminated',
+ 'EXISTS': 'instance_exists',
+ 'RUNNING': 'instance_running',
+ }
+ if state not in state_opts:
+ module.fail_json(msg="Cannot wait for state {0}, invalid state".format(state))
+ waiter = module.client('ec2').get_waiter(state_opts[state])
+ try:
+ waiter.wait(
+ InstanceIds=ids,
+ WaiterConfig={
+ 'Delay': 15,
+ 'MaxAttempts': module.params.get('wait_timeout', 600) // 15,
+ }
+ )
+ except botocore.exceptions.WaiterConfigError as e:
+ module.fail_json(msg="{0}. Error waiting for instances {1} to reach state {2}".format(
+ to_native(e), ', '.join(ids), state))
+ except botocore.exceptions.WaiterError as e:
+ module.warn("Instances {0} took too long to reach state {1}. {2}".format(
+ ', '.join(ids), state, to_native(e)))
+
+
+def diff_instance_and_params(instance, params, ec2=None, skip=None):
+ """boto3 instance obj, module params"""
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ if skip is None:
+ skip = []
+
+ changes_to_apply = []
+ id_ = instance['InstanceId']
+
+ ParamMapper = namedtuple('ParamMapper', ['param_key', 'instance_key', 'attribute_name', 'add_value'])
+
+ def value_wrapper(v):
+ return {'Value': v}
+
+ param_mappings = [
+ ParamMapper('ebs_optimized', 'EbsOptimized', 'ebsOptimized', value_wrapper),
+ ParamMapper('termination_protection', 'DisableApiTermination', 'disableApiTermination', value_wrapper),
+ # user data is an immutable property
+ # ParamMapper('user_data', 'UserData', 'userData', value_wrapper),
+ ]
+
+ for mapping in param_mappings:
+ if params.get(mapping.param_key) is not None and mapping.instance_key not in skip:
+ value = AWSRetry.jittered_backoff()(ec2.describe_instance_attribute)(Attribute=mapping.attribute_name, InstanceId=id_)
+ if params.get(mapping.param_key) is not None and value[mapping.instance_key]['Value'] != params.get(mapping.param_key):
+ arguments = dict(
+ InstanceId=instance['InstanceId'],
+ # Attribute=mapping.attribute_name,
+ )
+ arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key))
+ changes_to_apply.append(arguments)
+
+ if (params.get('network') or {}).get('source_dest_check') is not None:
+ # network.source_dest_check is nested, so needs to be treated separately
+ check = bool(params.get('network').get('source_dest_check'))
+ if instance['SourceDestCheck'] != check:
+ changes_to_apply.append(dict(
+ InstanceId=instance['InstanceId'],
+ SourceDestCheck={'Value': check},
+ ))
+
+ return changes_to_apply
+
+
+def change_network_attachments(instance, params, ec2):
+ if (params.get('network') or {}).get('interfaces') is not None:
+ new_ids = []
+ for inty in params.get('network').get('interfaces'):
+ if isinstance(inty, dict) and 'id' in inty:
+ new_ids.append(inty['id'])
+ elif isinstance(inty, string_types):
+ new_ids.append(inty)
+ # network.interfaces can create the need to attach new interfaces
+ old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']]
+ to_attach = set(new_ids) - set(old_ids)
+ for eni_id in to_attach:
+ ec2.attach_network_interface(
+ DeviceIndex=new_ids.index(eni_id),
+ InstanceId=instance['InstanceId'],
+ NetworkInterfaceId=eni_id,
+ )
+ return bool(len(to_attach))
+ return False
+
+
+def find_instances(ec2, ids=None, filters=None):
+ paginator = ec2.get_paginator('describe_instances')
+ if ids:
+ return list(paginator.paginate(
+ InstanceIds=ids,
+ ).search('Reservations[].Instances[]'))
+ elif filters is None:
+ module.fail_json(msg="No filters provided when they were required")
+ elif filters is not None:
+ for key in list(filters.keys()):
+ if not key.startswith("tag:"):
+ filters[key.replace("_", "-")] = filters.pop(key)
+ return list(paginator.paginate(
+ Filters=ansible_dict_to_boto3_filter_list(filters)
+ ).search('Reservations[].Instances[]'))
+ return []
+
+
+@AWSRetry.jittered_backoff()
+def get_default_vpc(ec2):
+ vpcs = ec2.describe_vpcs(Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'}))
+ if len(vpcs.get('Vpcs', [])):
+ return vpcs.get('Vpcs')[0]
+ return None
+
+
+@AWSRetry.jittered_backoff()
+def get_default_subnet(ec2, vpc, availability_zone=None):
+ subnets = ec2.describe_subnets(
+ Filters=ansible_dict_to_boto3_filter_list({
+ 'vpc-id': vpc['VpcId'],
+ 'state': 'available',
+ 'default-for-az': 'true',
+ })
+ )
+ if len(subnets.get('Subnets', [])):
+ if availability_zone is not None:
+ subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets'))
+ if availability_zone in subs_by_az:
+ return subs_by_az[availability_zone]
+
+ # to have a deterministic sorting order, we sort by AZ so we'll always pick the `a` subnet first
+ # there can only be one default-for-az subnet per AZ, so the AZ key is always unique in this list
+ by_az = sorted(subnets.get('Subnets'), key=lambda s: s['AvailabilityZone'])
+ return by_az[0]
+ return None
+
+
+def ensure_instance_state(state, ec2=None):
+ if ec2 is None:
+ module.client('ec2')
+ if state in ('running', 'started'):
+ changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING')
+
+ if failed:
+ module.fail_json(
+ msg="Unable to start instances: {0}".format(failure_reason),
+ reboot_success=list(changed),
+ reboot_failed=failed)
+
+ module.exit_json(
+ msg='Instances started',
+ reboot_success=list(changed),
+ changed=bool(len(changed)),
+ reboot_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+ elif state in ('restarted', 'rebooted'):
+ changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_state='STOPPED')
+ changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_state='RUNNING')
+
+ if failed:
+ module.fail_json(
+ msg="Unable to restart instances: {0}".format(failure_reason),
+ reboot_success=list(changed),
+ reboot_failed=failed)
+
+ module.exit_json(
+ msg='Instances restarted',
+ reboot_success=list(changed),
+ changed=bool(len(changed)),
+ reboot_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+ elif state in ('stopped',):
+ changed, failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_state='STOPPED')
+
+ if failed:
+ module.fail_json(
+ msg="Unable to stop instances: {0}".format(failure_reason),
+ stop_success=list(changed),
+ stop_failed=failed)
+
+ module.exit_json(
+ msg='Instances stopped',
+ stop_success=list(changed),
+ changed=bool(len(changed)),
+ stop_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+ elif state in ('absent', 'terminated'):
+ terminated, terminate_failed, instances, failure_reason = change_instance_state(
+ filters=module.params.get('filters'),
+ desired_state='TERMINATED')
+
+ if terminate_failed:
+ module.fail_json(
+ msg="Unable to terminate instances: {0}".format(failure_reason),
+ terminate_success=list(terminated),
+ terminate_failed=terminate_failed)
+ module.exit_json(
+ msg='Instances terminated',
+ terminate_success=list(terminated),
+ changed=bool(len(terminated)),
+ terminate_failed=[],
+ instances=[pretty_instance(i) for i in instances],
+ )
+
+
+@AWSRetry.jittered_backoff()
+def change_instance_state(filters, desired_state, ec2=None):
+ """Takes STOPPED/RUNNING/TERMINATED"""
+ if ec2 is None:
+ ec2 = module.client('ec2')
+
+ changed = set()
+ instances = find_instances(ec2, filters=filters)
+ to_change = set(i['InstanceId'] for i in instances if i['State']['Name'].upper() != desired_state)
+ unchanged = set()
+ failure_reason = ""
+
+ for inst in instances:
+ try:
+ if desired_state == 'TERMINATED':
+ if module.check_mode:
+ changed.add(inst['InstanceId'])
+ continue
+
+ # TODO use a client-token to prevent double-sends of these start/stop/terminate commands
+ # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html
+ resp = ec2.terminate_instances(InstanceIds=[inst['InstanceId']])
+ [changed.add(i['InstanceId']) for i in resp['TerminatingInstances']]
+ if desired_state == 'STOPPED':
+ if inst['State']['Name'] in ('stopping', 'stopped'):
+ unchanged.add(inst['InstanceId'])
+ continue
+
+ if module.check_mode:
+ changed.add(inst['InstanceId'])
+ continue
+
+ resp = ec2.stop_instances(InstanceIds=[inst['InstanceId']])
+ [changed.add(i['InstanceId']) for i in resp['StoppingInstances']]
+ if desired_state == 'RUNNING':
+ if module.check_mode:
+ changed.add(inst['InstanceId'])
+ continue
+
+ resp = ec2.start_instances(InstanceIds=[inst['InstanceId']])
+ [changed.add(i['InstanceId']) for i in resp['StartingInstances']]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ try:
+ failure_reason = to_native(e.message)
+ except AttributeError:
+ failure_reason = to_native(e)
+
+ if changed:
+ await_instances(ids=list(changed) + list(unchanged), state=desired_state)
+
+ change_failed = list(to_change - changed)
+ instances = find_instances(ec2, ids=list(i['InstanceId'] for i in instances))
+ return changed, change_failed, instances, failure_reason
+
+
+def pretty_instance(i):
+ instance = camel_dict_to_snake_dict(i, ignore_list=['Tags'])
+ instance['tags'] = boto3_tag_list_to_ansible_dict(i['Tags'])
+ return instance
+
+
+def determine_iam_role(name_or_arn):
+ if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn):
+ return name_or_arn
+ iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+ try:
+ role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True)
+ return role['InstanceProfile']['Arn']
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn))
+ module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn))
+
+
+def handle_existing(existing_matches, changed, ec2, state):
+ if state in ('running', 'started') and [i for i in existing_matches if i['State']['Name'] != 'running']:
+ ins_changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING')
+ if failed:
+ module.fail_json(msg="Couldn't start instances: {0}. Failure reason: {1}".format(instances, failure_reason))
+ module.exit_json(
+ changed=bool(len(ins_changed)) or changed,
+ instances=[pretty_instance(i) for i in instances],
+ instance_ids=[i['InstanceId'] for i in instances],
+ )
+ changes = diff_instance_and_params(existing_matches[0], module.params)
+ for c in changes:
+ AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c)
+ changed |= bool(changes)
+ changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('instance_role'))
+ changed |= change_network_attachments(existing_matches[0], module.params, ec2)
+ altered = find_instances(ec2, ids=[i['InstanceId'] for i in existing_matches])
+ module.exit_json(
+ changed=bool(len(changes)) or changed,
+ instances=[pretty_instance(i) for i in altered],
+ instance_ids=[i['InstanceId'] for i in altered],
+ changes=changes,
+ )
+
+
+def ensure_present(existing_matches, changed, ec2, state):
+ if len(existing_matches):
+ try:
+ handle_existing(existing_matches, changed, ec2, state)
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(
+ e, msg="Failed to handle existing instances {0}".format(', '.join([i['InstanceId'] for i in existing_matches])),
+ # instances=[pretty_instance(i) for i in existing_matches],
+ # instance_ids=[i['InstanceId'] for i in existing_matches],
+ )
+ try:
+ instance_spec = build_run_instance_spec(module.params)
+ # If check mode is enabled,suspend 'ensure function'.
+ if module.check_mode:
+ module.exit_json(
+ changed=True,
+ spec=instance_spec,
+ )
+ instance_response = run_instances(ec2, **instance_spec)
+ instances = instance_response['Instances']
+ instance_ids = [i['InstanceId'] for i in instances]
+
+ for ins in instances:
+ changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized'])
+ for c in changes:
+ try:
+ AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c)))
+
+ if not module.params.get('wait'):
+ module.exit_json(
+ changed=True,
+ instance_ids=instance_ids,
+ spec=instance_spec,
+ )
+ await_instances(instance_ids)
+ instances = ec2.get_paginator('describe_instances').paginate(
+ InstanceIds=instance_ids
+ ).search('Reservations[].Instances[]')
+
+ module.exit_json(
+ changed=True,
+ instances=[pretty_instance(i) for i in instances],
+ instance_ids=instance_ids,
+ spec=instance_spec,
+ )
+ except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to create new EC2 instance")
+
+
+@AWSRetry.jittered_backoff()
+def run_instances(ec2, **instance_spec):
+ try:
+ return ec2.run_instances(**instance_spec)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidParameterValue' and "Invalid IAM Instance Profile ARN" in e.response['Error']['Message']:
+ # If the instance profile has just been created, it takes some time to be visible by ec2
+ # So we wait 10 second and retry the run_instances
+ time.sleep(10)
+ return ec2.run_instances(**instance_spec)
+ else:
+ raise e
+
+
+def main():
+ global module
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'started', 'running', 'stopped', 'restarted', 'rebooted', 'terminated', 'absent']),
+ wait=dict(default=True, type='bool'),
+ wait_timeout=dict(default=600, type='int'),
+ # count=dict(default=1, type='int'),
+ image=dict(type='dict'),
+ image_id=dict(type='str'),
+ instance_type=dict(default='t2.micro', type='str'),
+ user_data=dict(type='str'),
+ tower_callback=dict(type='dict'),
+ ebs_optimized=dict(type='bool'),
+ vpc_subnet_id=dict(type='str', aliases=['subnet_id']),
+ availability_zone=dict(type='str'),
+ security_groups=dict(default=[], type='list'),
+ security_group=dict(type='str'),
+ instance_role=dict(type='str'),
+ name=dict(type='str'),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=False),
+ filters=dict(type='dict', default=None),
+ launch_template=dict(type='dict'),
+ key_name=dict(type='str'),
+ cpu_credit_specification=dict(type='str', choices=['standard', 'unlimited']),
+ cpu_options=dict(type='dict', options=dict(
+ core_count=dict(type='int', required=True),
+ threads_per_core=dict(type='int', choices=[1, 2], required=True)
+ )),
+ tenancy=dict(type='str', choices=['dedicated', 'default']),
+ placement_group=dict(type='str'),
+ instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']),
+ termination_protection=dict(type='bool'),
+ detailed_monitoring=dict(type='bool'),
+ instance_ids=dict(default=[], type='list'),
+ network=dict(default=None, type='dict'),
+ volumes=dict(default=None, type='list'),
+ )
+ # running/present are synonyms
+ # as are terminated/absent
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['security_groups', 'security_group'],
+ ['availability_zone', 'vpc_subnet_id'],
+ ['tower_callback', 'user_data'],
+ ['image_id', 'image'],
+ ],
+ supports_check_mode=True
+ )
+
+ if module.params.get('network'):
+ if module.params.get('network').get('interfaces'):
+ if module.params.get('security_group'):
+ module.fail_json(msg="Parameter network.interfaces can't be used with security_group")
+ if module.params.get('security_groups'):
+ module.fail_json(msg="Parameter network.interfaces can't be used with security_groups")
+
+ state = module.params.get('state')
+ ec2 = module.client('ec2')
+ if module.params.get('filters') is None:
+ filters = {
+ # all states except shutting-down and terminated
+ 'instance-state-name': ['pending', 'running', 'stopping', 'stopped']
+ }
+ if state == 'stopped':
+ # only need to change instances that aren't already stopped
+ filters['instance-state-name'] = ['stopping', 'pending', 'running']
+
+ if isinstance(module.params.get('instance_ids'), string_types):
+ filters['instance-id'] = [module.params.get('instance_ids')]
+ elif isinstance(module.params.get('instance_ids'), list) and len(module.params.get('instance_ids')):
+ filters['instance-id'] = module.params.get('instance_ids')
+ else:
+ if not module.params.get('vpc_subnet_id'):
+ if module.params.get('network'):
+ # grab AZ from one of the ENIs
+ ints = module.params.get('network').get('interfaces')
+ if ints:
+ filters['network-interface.network-interface-id'] = []
+ for i in ints:
+ if isinstance(i, dict):
+ i = i['id']
+ filters['network-interface.network-interface-id'].append(i)
+ else:
+ sub = get_default_subnet(ec2, get_default_vpc(ec2), availability_zone=module.params.get('availability_zone'))
+ filters['subnet-id'] = sub['SubnetId']
+ else:
+ filters['subnet-id'] = [module.params.get('vpc_subnet_id')]
+
+ if module.params.get('name'):
+ filters['tag:Name'] = [module.params.get('name')]
+
+ if module.params.get('image_id'):
+ filters['image-id'] = [module.params.get('image_id')]
+ elif (module.params.get('image') or {}).get('id'):
+ filters['image-id'] = [module.params.get('image', {}).get('id')]
+
+ module.params['filters'] = filters
+
+ if module.params.get('cpu_options') and not module.botocore_at_least('1.10.16'):
+ module.fail_json(msg="cpu_options is only supported with botocore >= 1.10.16")
+
+ existing_matches = find_instances(ec2, filters=module.params.get('filters'))
+ changed = False
+
+ if state not in ('terminated', 'absent') and existing_matches:
+ for match in existing_matches:
+ warn_if_public_ip_assignment_changed(match)
+ warn_if_cpu_options_changed(match)
+ tags = module.params.get('tags') or {}
+ name = module.params.get('name')
+ if name:
+ tags['Name'] = name
+ changed |= manage_tags(match, tags, module.params.get('purge_tags', False), ec2)
+
+ if state in ('present', 'running', 'started'):
+ ensure_present(existing_matches=existing_matches, changed=changed, ec2=ec2, state=state)
+ elif state in ('restarted', 'rebooted', 'stopped', 'absent', 'terminated'):
+ if existing_matches:
+ ensure_instance_state(state, ec2)
+ else:
+ module.exit_json(
+ msg='No matching instances found',
+ changed=False,
+ instances=[],
+ )
+ else:
+ module.fail_json(msg="We don't handle the state {0}".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_instance_info.py b/test/support/integration/plugins/modules/ec2_instance_info.py
new file mode 100644
index 0000000000..7615b958d3
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_instance_info.py
@@ -0,0 +1,571 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: ec2_instance_info
+short_description: Gather information about ec2 instances in AWS
+description:
+ - Gather information about ec2 instances in AWS
+ - This module was called C(ec2_instance_facts) before Ansible 2.9. The usage did not change.
+version_added: "2.4"
+author:
+ - Michael Schuett (@michaeljs1990)
+ - Rob White (@wimnat)
+requirements: [ "boto3", "botocore" ]
+options:
+ instance_ids:
+ description:
+ - If you specify one or more instance IDs, only instances that have the specified IDs are returned.
+ required: false
+ version_added: 2.4
+ type: list
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters. Filter
+ names and values are case sensitive.
+ required: false
+ default: {}
+ type: dict
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather information about all instances
+- ec2_instance_info:
+
+# Gather information about all instances in AZ ap-southeast-2a
+- ec2_instance_info:
+ filters:
+ availability-zone: ap-southeast-2a
+
+# Gather information about a particular instance using ID
+- ec2_instance_info:
+ instance_ids:
+ - i-12345678
+
+# Gather information about any instance with a tag key Name and value Example
+- ec2_instance_info:
+ filters:
+ "tag:Name": Example
+
+# Gather information about any instance in states "shutting-down", "stopping", "stopped"
+- ec2_instance_info:
+ filters:
+ instance-state-name: [ "shutting-down", "stopping", "stopped" ]
+
+'''
+
+RETURN = '''
+instances:
+ description: a list of ec2 instances
+ returned: always
+ type: complex
+ contains:
+ ami_launch_index:
+ description: The AMI launch index, which can be used to find this instance in the launch group.
+ returned: always
+ type: int
+ sample: 0
+ architecture:
+ description: The architecture of the image
+ returned: always
+ type: str
+ sample: x86_64
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/sdh or xvdh).
+ returned: always
+ type: str
+ sample: /dev/sdh
+ ebs:
+ description: Parameters used to automatically set up EBS volumes when the instance is launched.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ volume_id:
+ description: The ID of the EBS volume
+ returned: always
+ type: str
+ sample: vol-12345678
+ cpu_options:
+ description: The CPU options set for the instance.
+ returned: always if botocore version >= 1.10.16
+ type: complex
+ contains:
+ core_count:
+ description: The number of CPU cores for the instance.
+ returned: always
+ type: int
+ sample: 1
+ threads_per_core:
+ description: The number of threads per CPU core. On supported instance, a value of 1 means Intel Hyper-Threading Technology is disabled.
+ returned: always
+ type: int
+ sample: 1
+ client_token:
+ description: The idempotency token you provided when you launched the instance, if applicable.
+ returned: always
+ type: str
+ sample: mytoken
+ ebs_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ hypervisor:
+ description: The hypervisor type of the instance.
+ returned: always
+ type: str
+ sample: xen
+ iam_instance_profile:
+ description: The IAM instance profile associated with the instance, if applicable.
+ returned: always
+ type: complex
+ contains:
+ arn:
+ description: The Amazon Resource Name (ARN) of the instance profile.
+ returned: always
+ type: str
+ sample: "arn:aws:iam::000012345678:instance-profile/myprofile"
+ id:
+ description: The ID of the instance profile
+ returned: always
+ type: str
+ sample: JFJ397FDG400FG9FD1N
+ image_id:
+ description: The ID of the AMI used to launch the instance.
+ returned: always
+ type: str
+ sample: ami-0011223344
+ instance_id:
+ description: The ID of the instance.
+ returned: always
+ type: str
+ sample: i-012345678
+ instance_type:
+ description: The instance type size of the running instance.
+ returned: always
+ type: str
+ sample: t2.micro
+ key_name:
+ description: The name of the key pair, if this instance was launched with an associated key pair.
+ returned: always
+ type: str
+ sample: my-key
+ launch_time:
+ description: The time the instance was launched.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ monitoring:
+ description: The monitoring for the instance.
+ returned: always
+ type: complex
+ contains:
+ state:
+ description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
+ returned: always
+ type: str
+ sample: disabled
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ association:
+ description: The association information for an Elastic IPv4 associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ attachment:
+ description: The network interface attachment.
+ returned: always
+ type: complex
+ contains:
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2017-03-23T22:51:24+00:00"
+ attachment_id:
+ description: The ID of the network interface attachment.
+ returned: always
+ type: str
+ sample: eni-attach-3aff3f
+ delete_on_termination:
+ description: Indicates whether the network interface is deleted when the instance is terminated.
+ returned: always
+ type: bool
+ sample: true
+ device_index:
+ description: The index of the device on the instance for the network interface attachment.
+ returned: always
+ type: int
+ sample: 0
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: attached
+ description:
+ description: The description.
+ returned: always
+ type: str
+ sample: My interface
+ groups:
+ description: One or more security groups.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-abcdef12
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: mygroup
+ ipv6_addresses:
+ description: One or more IPv6 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ ipv6_address:
+ description: The IPv6 address.
+ returned: always
+ type: str
+ sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ owner_id:
+ description: The AWS account ID of the owner of the network interface.
+ returned: always
+ type: str
+ sample: 01234567890
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ private_ip_addresses:
+ description: The private IPv4 addresses associated with the network interface.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ association:
+ description: The association information for an Elastic IP address (IPv4) associated with the network interface.
+ returned: always
+ type: complex
+ contains:
+ ip_owner_id:
+ description: The ID of the owner of the Elastic IP address.
+ returned: always
+ type: str
+ sample: amazon
+ public_dns_name:
+ description: The public DNS name.
+ returned: always
+ type: str
+ sample: ""
+ public_ip:
+ description: The public IP address or Elastic IP address bound to the network interface.
+ returned: always
+ type: str
+ sample: 1.2.3.4
+ primary:
+ description: Indicates whether this IPv4 address is the primary private IP address of the network interface.
+ returned: always
+ type: bool
+ sample: true
+ private_ip_address:
+ description: The private IPv4 address of the network interface.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The status of the network interface.
+ returned: always
+ type: str
+ sample: in-use
+ subnet_id:
+ description: The ID of the subnet for the network interface.
+ returned: always
+ type: str
+ sample: subnet-0123456
+ vpc_id:
+ description: The ID of the VPC for the network interface.
+ returned: always
+ type: str
+ sample: vpc-0123456
+ placement:
+ description: The location where the instance launched, if applicable.
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The Availability Zone of the instance.
+ returned: always
+ type: str
+ sample: ap-southeast-2a
+ group_name:
+ description: The name of the placement group the instance is in (for cluster compute instances).
+ returned: always
+ type: str
+ sample: ""
+ tenancy:
+ description: The tenancy of the instance (if the instance is running in a VPC).
+ returned: always
+ type: str
+ sample: default
+ private_dns_name:
+ description: The private DNS name.
+ returned: always
+ type: str
+ sample: ip-10-0-0-1.ap-southeast-2.compute.internal
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ product_codes:
+ description: One or more product codes.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ product_code_id:
+ description: The product code.
+ returned: always
+ type: str
+ sample: aw0evgkw8ef3n2498gndfgasdfsd5cce
+ product_code_type:
+ description: The type of product code.
+ returned: always
+ type: str
+ sample: marketplace
+ public_dns_name:
+ description: The public DNS name assigned to the instance.
+ returned: always
+ type: str
+ sample:
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance
+ returned: always
+ type: str
+ sample: 52.0.0.1
+ root_device_name:
+ description: The device name of the root device
+ returned: always
+ type: str
+ sample: /dev/sda1
+ root_device_type:
+ description: The type of root device used by the AMI.
+ returned: always
+ type: str
+ sample: ebs
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ source_dest_check:
+ description: Indicates whether source/destination checking is enabled.
+ returned: always
+ type: bool
+ sample: true
+ state:
+ description: The current state of the instance.
+ returned: always
+ type: complex
+ contains:
+ code:
+ description: The low byte represents the state.
+ returned: always
+ type: int
+ sample: 16
+ name:
+ description: The name of the state.
+ returned: always
+ type: str
+ sample: running
+ state_transition_reason:
+ description: The reason for the most recent state transition.
+ returned: always
+ type: str
+ sample:
+ subnet_id:
+ description: The ID of the subnet in which the instance is running.
+ returned: always
+ type: str
+ sample: subnet-00abcdef
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ virtualization_type:
+ description: The type of virtualization of the AMI.
+ returned: always
+ type: str
+ sample: hvm
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: dict
+ sample: vpc-0011223344
+'''
+
+import traceback
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
+ ec2_argument_spec, get_aws_connection_info)
+
+
+def list_ec2_instances(connection, module):
+
+ instance_ids = module.params.get("instance_ids")
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ reservations_paginator = connection.get_paginator('describe_instances')
+ reservations = reservations_paginator.paginate(InstanceIds=instance_ids, Filters=filters).build_full_result()
+ except ClientError as e:
+ module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
+
+ # Get instances from reservations
+ instances = []
+ for reservation in reservations['Reservations']:
+ instances = instances + reservation['Instances']
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances]
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for instance in snaked_instances:
+ instance['tags'] = boto3_tag_list_to_ansible_dict(instance.get('tags', []), 'key', 'value')
+
+ module.exit_json(instances=snaked_instances)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ instance_ids=dict(default=[], type='list'),
+ filters=dict(default={}, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['instance_ids', 'filters']
+ ],
+ supports_check_mode=True
+ )
+ if module._name == 'ec2_instance_facts':
+ module.deprecate("The 'ec2_instance_facts' module has been renamed to 'ec2_instance_info'", version='2.13')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_instances(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_key.py b/test/support/integration/plugins/modules/ec2_key.py
new file mode 100644
index 0000000000..de67af8bc0
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_key.py
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_key
+version_added: "1.5"
+short_description: create or delete an ec2 key pair
+description:
+ - create or delete an ec2 key pair.
+options:
+ name:
+ description:
+ - Name of the key pair.
+ required: true
+ type: str
+ key_material:
+ description:
+ - Public key material.
+ required: false
+ type: str
+ force:
+ description:
+ - Force overwrite of already existing key pair if key has changed.
+ required: false
+ default: true
+ type: bool
+ version_added: "2.3"
+ state:
+ description:
+ - create or delete keypair
+ required: false
+ choices: [ present, absent ]
+ default: 'present'
+ type: str
+ wait:
+ description:
+ - This option has no effect since version 2.5 and will be removed in 2.14.
+ version_added: "1.6"
+ type: bool
+ wait_timeout:
+ description:
+ - This option has no effect since version 2.5 and will be removed in 2.14.
+ version_added: "1.6"
+ type: int
+ required: false
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements: [ boto3 ]
+author:
+ - "Vincent Viallet (@zbal)"
+ - "Prasad Katti (@prasadkatti)"
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: create a new ec2 key pair, returns generated private key
+ ec2_key:
+ name: my_keypair
+
+- name: create key pair using provided key_material
+ ec2_key:
+ name: my_keypair
+ key_material: 'ssh-rsa AAAAxyz...== me@example.com'
+
+- name: create key pair using key_material obtained using 'file' lookup plugin
+ ec2_key:
+ name: my_keypair
+ key_material: "{{ lookup('file', '/path/to/public_key/id_rsa.pub') }}"
+
+# try creating a key pair with the name of an already existing keypair
+# but don't overwrite it even if the key is different (force=false)
+- name: try creating a key pair with name of an already existing keypair
+ ec2_key:
+ name: my_existing_keypair
+ key_material: 'ssh-rsa AAAAxyz...== me@example.com'
+ force: false
+
+- name: remove key pair by name
+ ec2_key:
+ name: my_keypair
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: whether a keypair was created/deleted
+ returned: always
+ type: bool
+ sample: true
+msg:
+ description: short message describing the action taken
+ returned: always
+ type: str
+ sample: key pair created
+key:
+ description: details of the keypair (this is set to null when state is absent)
+ returned: always
+ type: complex
+ contains:
+ fingerprint:
+ description: fingerprint of the key
+ returned: when state is present
+ type: str
+ sample: 'b0:22:49:61:d9:44:9d:0c:7e:ac:8a:32:93:21:6c:e8:fb:59:62:43'
+ name:
+ description: name of the keypair
+ returned: when state is present
+ type: str
+ sample: my_keypair
+ private_key:
+ description: private key of a newly created keypair
+ returned: when a new keypair is created by AWS (key_material is not provided)
+ type: str
+ sample: '-----BEGIN RSA PRIVATE KEY-----
+ MIIEowIBAAKC...
+ -----END RSA PRIVATE KEY-----'
+'''
+
+import uuid
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils._text import to_bytes
+
+try:
+ from botocore.exceptions import ClientError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def extract_key_data(key):
+
+ data = {
+ 'name': key['KeyName'],
+ 'fingerprint': key['KeyFingerprint']
+ }
+ if 'KeyMaterial' in key:
+ data['private_key'] = key['KeyMaterial']
+ return data
+
+
+def get_key_fingerprint(module, ec2_client, key_material):
+ '''
+ EC2's fingerprints are non-trivial to generate, so push this key
+ to a temporary name and make ec2 calculate the fingerprint for us.
+ http://blog.jbrowne.com/?p=23
+ https://forums.aws.amazon.com/thread.jspa?messageID=352828
+ '''
+
+ # find an unused name
+ name_in_use = True
+ while name_in_use:
+ random_name = "ansible-" + str(uuid.uuid4())
+ name_in_use = find_key_pair(module, ec2_client, random_name)
+
+ temp_key = import_key_pair(module, ec2_client, random_name, key_material)
+ delete_key_pair(module, ec2_client, random_name, finish_task=False)
+ return temp_key['KeyFingerprint']
+
+
+def find_key_pair(module, ec2_client, name):
+
+ try:
+ key = ec2_client.describe_key_pairs(KeyNames=[name])['KeyPairs'][0]
+ except ClientError as err:
+ if err.response['Error']['Code'] == "InvalidKeyPair.NotFound":
+ return None
+ module.fail_json_aws(err, msg="error finding keypair")
+ except IndexError:
+ key = None
+ return key
+
+
+def create_key_pair(module, ec2_client, name, key_material, force):
+
+ key = find_key_pair(module, ec2_client, name)
+ if key:
+ if key_material and force:
+ if not module.check_mode:
+ new_fingerprint = get_key_fingerprint(module, ec2_client, key_material)
+ if key['KeyFingerprint'] != new_fingerprint:
+ delete_key_pair(module, ec2_client, name, finish_task=False)
+ key = import_key_pair(module, ec2_client, name, key_material)
+ key_data = extract_key_data(key)
+ module.exit_json(changed=True, key=key_data, msg="key pair updated")
+ else:
+ # Assume a change will be made in check mode since a comparison can't be done
+ module.exit_json(changed=True, key=extract_key_data(key), msg="key pair updated")
+ key_data = extract_key_data(key)
+ module.exit_json(changed=False, key=key_data, msg="key pair already exists")
+ else:
+ # key doesn't exist, create it now
+ key_data = None
+ if not module.check_mode:
+ if key_material:
+ key = import_key_pair(module, ec2_client, name, key_material)
+ else:
+ try:
+ key = ec2_client.create_key_pair(KeyName=name)
+ except ClientError as err:
+ module.fail_json_aws(err, msg="error creating key")
+ key_data = extract_key_data(key)
+ module.exit_json(changed=True, key=key_data, msg="key pair created")
+
+
+def import_key_pair(module, ec2_client, name, key_material):
+
+ try:
+ key = ec2_client.import_key_pair(KeyName=name, PublicKeyMaterial=to_bytes(key_material))
+ except ClientError as err:
+ module.fail_json_aws(err, msg="error importing key")
+ return key
+
+
+def delete_key_pair(module, ec2_client, name, finish_task=True):
+
+ key = find_key_pair(module, ec2_client, name)
+ if key:
+ if not module.check_mode:
+ try:
+ ec2_client.delete_key_pair(KeyName=name)
+ except ClientError as err:
+ module.fail_json_aws(err, msg="error deleting key")
+ if not finish_task:
+ return
+ module.exit_json(changed=True, key=None, msg="key deleted")
+ module.exit_json(key=None, msg="key did not exist")
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(required=True),
+ key_material=dict(),
+ force=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', removed_in_version='2.14'),
+ wait_timeout=dict(type='int', removed_in_version='2.14')
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ ec2_client = module.client('ec2')
+
+ name = module.params['name']
+ state = module.params.get('state')
+ key_material = module.params.get('key_material')
+ force = module.params.get('force')
+
+ if state == 'absent':
+ delete_key_pair(module, ec2_client, name)
+ elif state == 'present':
+ create_key_pair(module, ec2_client, name, key_material, force)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_vpc_igw.py b/test/support/integration/plugins/modules/ec2_vpc_igw.py
new file mode 100644
index 0000000000..5198527af7
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_vpc_igw.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_igw
+short_description: Manage an AWS VPC Internet gateway
+description:
+ - Manage an AWS VPC Internet gateway
+version_added: "2.0"
+author: Robert Estelle (@erydo)
+options:
+ vpc_id:
+ description:
+ - The VPC ID for the VPC in which to manage the Internet Gateway.
+ required: true
+ type: str
+ tags:
+ description:
+ - "A dict of tags to apply to the internet gateway. Any tags currently applied to the internet gateway and not present here will be removed."
+ aliases: [ 'resource_tags' ]
+ version_added: "2.4"
+ type: dict
+ state:
+ description:
+ - Create or terminate the IGW
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements:
+ - botocore
+ - boto3
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Ensure that the VPC has an Internet Gateway.
+# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
+ec2_vpc_igw:
+ vpc_id: vpc-abcdefgh
+ state: present
+register: igw
+
+'''
+
+RETURN = '''
+changed:
+ description: If any changes have been made to the Internet Gateway.
+ type: bool
+ returned: always
+ sample:
+ changed: false
+gateway_id:
+ description: The unique identifier for the Internet Gateway.
+ type: str
+ returned: I(state=present)
+ sample:
+ gateway_id: "igw-XXXXXXXX"
+tags:
+ description: The tags associated the Internet Gateway.
+ type: dict
+ returned: I(state=present)
+ sample:
+ tags:
+ "Ansible": "Test"
+vpc_id:
+ description: The VPC ID associated with the Internet Gateway.
+ type: str
+ returned: I(state=present)
+ sample:
+ vpc_id: "vpc-XXXXXXXX"
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.aws.waiters import get_waiter
+from ansible.module_utils.ec2 import (
+ AWSRetry,
+ camel_dict_to_snake_dict,
+ boto3_tag_list_to_ansible_dict,
+ ansible_dict_to_boto3_filter_list,
+ ansible_dict_to_boto3_tag_list,
+ compare_aws_tags
+)
+from ansible.module_utils.six import string_types
+
+
+class AnsibleEc2Igw(object):
+
+ def __init__(self, module, results):
+ self._module = module
+ self._results = results
+ self._connection = self._module.client('ec2')
+ self._check_mode = self._module.check_mode
+
+ def process(self):
+ vpc_id = self._module.params.get('vpc_id')
+ state = self._module.params.get('state', 'present')
+ tags = self._module.params.get('tags')
+
+ if state == 'present':
+ self.ensure_igw_present(vpc_id, tags)
+ elif state == 'absent':
+ self.ensure_igw_absent(vpc_id)
+
+ def get_matching_igw(self, vpc_id):
+ filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id})
+ igws = []
+ try:
+ response = self._connection.describe_internet_gateways(Filters=filters)
+ igws = response.get('InternetGateways', [])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e)
+
+ igw = None
+ if len(igws) > 1:
+ self._module.fail_json(
+ msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting'.format(vpc_id))
+ elif igws:
+ igw = camel_dict_to_snake_dict(igws[0])
+
+ return igw
+
+ def check_input_tags(self, tags):
+ nonstring_tags = [k for k, v in tags.items() if not isinstance(v, string_types)]
+ if nonstring_tags:
+ self._module.fail_json(msg='One or more tags contain non-string values: {0}'.format(nonstring_tags))
+
+ def ensure_tags(self, igw_id, tags, add_only):
+ final_tags = []
+
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': igw_id, 'resource-type': 'internet-gateway'})
+ cur_tags = None
+ try:
+ cur_tags = self._connection.describe_tags(Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ purge_tags = bool(not add_only)
+ to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
+ final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags'))
+
+ if to_update:
+ try:
+ if self._check_mode:
+ # update tags
+ final_tags.update(to_update)
+ else:
+ AWSRetry.exponential_backoff()(self._connection.create_tags)(
+ Resources=[igw_id],
+ Tags=ansible_dict_to_boto3_tag_list(to_update)
+ )
+
+ self._results['changed'] = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't create tags")
+
+ if to_delete:
+ try:
+ if self._check_mode:
+ # update tags
+ for key in to_delete:
+ del final_tags[key]
+ else:
+ tags_list = []
+ for key in to_delete:
+ tags_list.append({'Key': key})
+
+ AWSRetry.exponential_backoff()(self._connection.delete_tags)(Resources=[igw_id], Tags=tags_list)
+
+ self._results['changed'] = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't delete tags")
+
+ if not self._check_mode and (to_update or to_delete):
+ try:
+ response = self._connection.describe_tags(Filters=filters)
+ final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags'))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ return final_tags
+
+ @staticmethod
+ def get_igw_info(igw):
+ return {
+ 'gateway_id': igw['internet_gateway_id'],
+ 'tags': igw['tags'],
+ 'vpc_id': igw['vpc_id']
+ }
+
+ def ensure_igw_absent(self, vpc_id):
+ igw = self.get_matching_igw(vpc_id)
+ if igw is None:
+ return self._results
+
+ if self._check_mode:
+ self._results['changed'] = True
+ return self._results
+
+ try:
+ self._results['changed'] = True
+ self._connection.detach_internet_gateway(InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id)
+ self._connection.delete_internet_gateway(InternetGatewayId=igw['internet_gateway_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg="Unable to delete Internet Gateway")
+
+ return self._results
+
+ def ensure_igw_present(self, vpc_id, tags):
+ self.check_input_tags(tags)
+
+ igw = self.get_matching_igw(vpc_id)
+
+ if igw is None:
+ if self._check_mode:
+ self._results['changed'] = True
+ self._results['gateway_id'] = None
+ return self._results
+
+ try:
+ response = self._connection.create_internet_gateway()
+
+ # Ensure the gateway exists before trying to attach it or add tags
+ waiter = get_waiter(self._connection, 'internet_gateway_exists')
+ waiter.wait(InternetGatewayIds=[response['InternetGateway']['InternetGatewayId']])
+
+ igw = camel_dict_to_snake_dict(response['InternetGateway'])
+ self._connection.attach_internet_gateway(InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id)
+ self._results['changed'] = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ self._module.fail_json_aws(e, msg='Unable to create Internet Gateway')
+
+ igw['vpc_id'] = vpc_id
+
+ igw['tags'] = self.ensure_tags(igw_id=igw['internet_gateway_id'], tags=tags, add_only=False)
+
+ igw_info = self.get_igw_info(igw)
+ self._results.update(igw_info)
+
+ return self._results
+
+
+def main():
+ argument_spec = dict(
+ vpc_id=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(default=dict(), required=False, type='dict', aliases=['resource_tags'])
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ results = dict(
+ changed=False
+ )
+ igw_manager = AnsibleEc2Igw(module=module, results=results)
+ igw_manager.process()
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_vpc_net.py b/test/support/integration/plugins/modules/ec2_vpc_net.py
new file mode 100644
index 0000000000..30e4b1e94c
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_vpc_net.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_net
+short_description: Configure AWS virtual private clouds
+description:
+ - Create, modify, and terminate AWS virtual private clouds.
+version_added: "2.0"
+author:
+ - Jonathan Davila (@defionscode)
+ - Sloane Hertel (@s-hertel)
+options:
+ name:
+ description:
+ - The name to give your VPC. This is used in combination with C(cidr_block) to determine if a VPC already exists.
+ required: yes
+ type: str
+ cidr_block:
+ description:
+ - The primary CIDR of the VPC. After 2.5 a list of CIDRs can be provided. The first in the list will be used as the primary CIDR
+ and is used in conjunction with the C(name) to ensure idempotence.
+ required: yes
+ type: list
+ elements: str
+ ipv6_cidr:
+ description:
+ - Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses,
+ or the size of the CIDR block.
+ default: False
+ type: bool
+ version_added: '2.10'
+ purge_cidrs:
+ description:
+ - Remove CIDRs that are associated with the VPC and are not specified in C(cidr_block).
+ default: no
+ type: bool
+ version_added: '2.5'
+ tenancy:
+ description:
+ - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
+ default: default
+ choices: [ 'default', 'dedicated' ]
+ type: str
+ dns_support:
+ description:
+ - Whether to enable AWS DNS support.
+ default: yes
+ type: bool
+ dns_hostnames:
+ description:
+ - Whether to enable AWS hostname support.
+ default: yes
+ type: bool
+ dhcp_opts_id:
+ description:
+ - The id of the DHCP options to use for this VPC.
+ type: str
+ tags:
+ description:
+ - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of
+ the VPC if it's different.
+ aliases: [ 'resource_tags' ]
+ type: dict
+ state:
+ description:
+ - The state of the VPC. Either absent or present.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ multi_ok:
+ description:
+ - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want
+ duplicate VPCs created.
+ type: bool
+ default: false
+requirements:
+ - boto3
+ - botocore
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: create a VPC with dedicated tenancy and a couple of tags
+ ec2_vpc_net:
+ name: Module_dev2
+ cidr_block: 10.10.0.0/16
+ region: us-east-1
+ tags:
+ module: ec2_vpc_net
+ this: works
+ tenancy: dedicated
+
+- name: create a VPC with dedicated tenancy and request an IPv6 CIDR
+ ec2_vpc_net:
+ name: Module_dev2
+ cidr_block: 10.10.0.0/16
+ ipv6_cidr: True
+ region: us-east-1
+ tenancy: dedicated
+'''
+
+RETURN = '''
+vpc:
+ description: info about the VPC that was created or deleted
+ returned: always
+ type: complex
+ contains:
+ cidr_block:
+ description: The CIDR of the VPC
+ returned: always
+ type: str
+ sample: 10.0.0.0/16
+ cidr_block_association_set:
+ description: IPv4 CIDR blocks associated with the VPC
+ returned: success
+ type: list
+ sample:
+ "cidr_block_association_set": [
+ {
+ "association_id": "vpc-cidr-assoc-97aeeefd",
+ "cidr_block": "20.0.0.0/24",
+ "cidr_block_state": {
+ "state": "associated"
+ }
+ }
+ ]
+ classic_link_enabled:
+ description: indicates whether ClassicLink is enabled
+ returned: always
+ type: bool
+ sample: false
+ dhcp_options_id:
+ description: the id of the DHCP options associated with this VPC
+ returned: always
+ type: str
+ sample: dopt-0fb8bd6b
+ id:
+ description: VPC resource id
+ returned: always
+ type: str
+ sample: vpc-c2e00da5
+ instance_tenancy:
+ description: indicates whether VPC uses default or dedicated tenancy
+ returned: always
+ type: str
+ sample: default
+ ipv6_cidr_block_association_set:
+ description: IPv6 CIDR blocks associated with the VPC
+ returned: success
+ type: list
+ sample:
+ "ipv6_cidr_block_association_set": [
+ {
+ "association_id": "vpc-cidr-assoc-97aeeefd",
+ "ipv6_cidr_block": "2001:db8::/56",
+ "ipv6_cidr_block_state": {
+ "state": "associated"
+ }
+ }
+ ]
+ is_default:
+ description: indicates whether this is the default VPC
+ returned: always
+ type: bool
+ sample: false
+ state:
+ description: state of the VPC
+ returned: always
+ type: str
+ sample: available
+ tags:
+ description: tags attached to the VPC, includes name
+ returned: always
+ type: complex
+ contains:
+ Name:
+ description: name tag for the VPC
+ returned: always
+ type: str
+ sample: pk_vpc4
+'''
+
+try:
+ import botocore
+except ImportError:
+ pass # Handled by AnsibleAWSModule
+
+from time import sleep, time
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict, compare_aws_tags,
+ ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict)
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native
+from ansible.module_utils.network.common.utils import to_subnet
+
+
+def vpc_exists(module, vpc, name, cidr_block, multi):
+ """Returns None or a vpc object depending on the existence of a VPC. When supplied
+ with a CIDR, it will check for matching tags to determine if it is a match
+ otherwise it will assume the VPC does not exist and thus return None.
+ """
+ try:
+ matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': cidr_block}])['Vpcs']
+ # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block)
+ if not matching_vpcs:
+ matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': [cidr_block[0]]}])['Vpcs']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+
+ if multi:
+ return None
+ elif len(matching_vpcs) == 1:
+ return matching_vpcs[0]['VpcId']
+ elif len(matching_vpcs) > 1:
+ module.fail_json(msg='Currently there are %d VPCs that have the same name and '
+ 'CIDR block you specified. If you would like to create '
+ 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
+ return None
+
+
+@AWSRetry.backoff(delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound'])
+def get_classic_link_with_backoff(connection, vpc_id):
+ try:
+ return connection.describe_vpc_classic_link(VpcIds=[vpc_id])['Vpcs'][0].get('ClassicLinkEnabled')
+ except botocore.exceptions.ClientError as e:
+ if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
+ return False
+ else:
+ raise
+
+
+def get_vpc(module, connection, vpc_id):
+ # wait for vpc to be available
+ try:
+ connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id))
+
+ try:
+ vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0]
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+ try:
+ vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff(connection, vpc_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to describe VPCs")
+
+ return vpc_obj
+
+
+def update_vpc_tags(connection, module, vpc_id, tags, name):
+ if tags is None:
+ tags = dict()
+
+ tags.update({'Name': name})
+ tags = dict((k, to_native(v)) for k, v in tags.items())
+ try:
+ current_tags = dict((t['Key'], t['Value']) for t in connection.describe_tags(Filters=[{'Name': 'resource-id', 'Values': [vpc_id]}])['Tags'])
+ tags_to_update, dummy = compare_aws_tags(current_tags, tags, False)
+ if tags_to_update:
+ if not module.check_mode:
+ tags = ansible_dict_to_boto3_tag_list(tags_to_update)
+ vpc_obj = connection.create_tags(Resources=[vpc_id], Tags=tags, aws_retry=True)
+
+ # Wait for tags to be updated
+ expected_tags = boto3_tag_list_to_ansible_dict(tags)
+ filters = [{'Name': 'tag:{0}'.format(key), 'Values': [value]} for key, value in expected_tags.items()]
+ connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters)
+
+ return True
+ else:
+ return False
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update tags")
+
+
+def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
+ if vpc_obj['DhcpOptionsId'] != dhcp_id:
+ if not module.check_mode:
+ try:
+ connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id))
+
+ try:
+ # Wait for DhcpOptionsId to be updated
+ filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}]
+ connection.get_waiter('vpc_available').wait(VpcIds=[vpc_obj['VpcId']], Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json(msg="Failed to wait for DhcpOptionsId to be updated")
+
+ return True
+ else:
+ return False
+
+
+def create_vpc(connection, module, cidr_block, tenancy):
+ try:
+ if not module.check_mode:
+ vpc_obj = connection.create_vpc(CidrBlock=cidr_block, InstanceTenancy=tenancy)
+ else:
+ module.exit_json(changed=True)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to create the VPC")
+
+ # wait for vpc to exist
+ try:
+ connection.get_waiter('vpc_exists').wait(VpcIds=[vpc_obj['Vpc']['VpcId']])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be created.".format(vpc_obj['Vpc']['VpcId']))
+
+ return vpc_obj['Vpc']['VpcId']
+
+
+def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value):
+ start_time = time()
+ updated = False
+ while time() < start_time + 300:
+ current_value = connection.describe_vpc_attribute(
+ Attribute=attribute,
+ VpcId=vpc_id
+ )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value']
+ if current_value != expected_value:
+ sleep(3)
+ else:
+ updated = True
+ break
+ if not updated:
+ module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute))
+
+
+def get_cidr_network_bits(module, cidr_block):
+ fixed_cidrs = []
+ for cidr in cidr_block:
+ split_addr = cidr.split('/')
+ if len(split_addr) == 2:
+ # this_ip is a IPv4 CIDR that may or may not have host bits set
+ # Get the network bits.
+ valid_cidr = to_subnet(split_addr[0], split_addr[1])
+ if cidr != valid_cidr:
+ module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
+ "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr))
+ fixed_cidrs.append(valid_cidr)
+ else:
+ # let AWS handle invalid CIDRs
+ fixed_cidrs.append(cidr)
+ return fixed_cidrs
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ cidr_block=dict(type='list', required=True),
+ ipv6_cidr=dict(type='bool', default=False),
+ tenancy=dict(choices=['default', 'dedicated'], default='default'),
+ dns_support=dict(type='bool', default=True),
+ dns_hostnames=dict(type='bool', default=True),
+ dhcp_opts_id=dict(),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ multi_ok=dict(type='bool', default=False),
+ purge_cidrs=dict(type='bool', default=False),
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params.get('name')
+ cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block'))
+ ipv6_cidr = module.params.get('ipv6_cidr')
+ purge_cidrs = module.params.get('purge_cidrs')
+ tenancy = module.params.get('tenancy')
+ dns_support = module.params.get('dns_support')
+ dns_hostnames = module.params.get('dns_hostnames')
+ dhcp_id = module.params.get('dhcp_opts_id')
+ tags = module.params.get('tags')
+ state = module.params.get('state')
+ multi = module.params.get('multi_ok')
+
+ changed = False
+
+ connection = module.client(
+ 'ec2',
+ retry_decorator=AWSRetry.jittered_backoff(
+ retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound']
+ )
+ )
+
+ if dns_hostnames and not dns_support:
+ module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support')
+
+ if state == 'present':
+
+ # Check if VPC exists
+ vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
+
+ if vpc_id is None:
+ vpc_id = create_vpc(connection, module, cidr_block[0], tenancy)
+ changed = True
+
+ vpc_obj = get_vpc(module, connection, vpc_id)
+
+ associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', [])
+ if cidr['CidrBlockState']['State'] != 'disassociated')
+ to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs]
+ to_remove = [associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block]
+ expected_cidrs = [cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove] + to_add
+
+ if len(cidr_block) > 1:
+ for cidr in to_add:
+ changed = True
+ try:
+ connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
+ if ipv6_cidr:
+ if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys():
+ module.warn("Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}".format(
+ vpc_id,
+ vpc_obj['Ipv6CidrBlockAssociationSet'][0]['Ipv6CidrBlock']))
+ else:
+ try:
+ connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
+
+ if purge_cidrs:
+ for association_id in to_remove:
+ changed = True
+ try:
+ connection.disassociate_vpc_cidr_block(AssociationId=association_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that "
+ "are associated with the CIDR block before you can disassociate it.".format(association_id))
+
+ if dhcp_id is not None:
+ try:
+ if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update DHCP options")
+
+ if tags is not None or name is not None:
+ try:
+ if update_vpc_tags(connection, module, vpc_id, tags, name):
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to update tags")
+
+ current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value']
+ current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value']
+ if current_dns_enabled != dns_support:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update enabled dns support attribute")
+ if current_dns_hostnames != dns_hostnames:
+ changed = True
+ if not module.check_mode:
+ try:
+ connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute")
+
+ # wait for associated cidrs to match
+ if to_add or to_remove:
+ try:
+ connection.get_waiter('vpc_available').wait(
+ VpcIds=[vpc_id],
+ Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}]
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Failed to wait for CIDRs to update")
+
+ # try to wait for enableDnsSupport and enableDnsHostnames to match
+ wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support)
+ wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames)
+
+ final_state = camel_dict_to_snake_dict(get_vpc(module, connection, vpc_id))
+ final_state['tags'] = boto3_tag_list_to_ansible_dict(final_state.get('tags', []))
+ final_state['id'] = final_state.pop('vpc_id')
+
+ module.exit_json(changed=changed, vpc=final_state)
+
+ elif state == 'absent':
+
+ # Check if VPC exists
+ vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
+
+ if vpc_id is not None:
+ try:
+ if not module.check_mode:
+ connection.delete_vpc(VpcId=vpc_id)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
+ "and/or ec2_vpc_route_table modules to ensure the other components are absent.".format(vpc_id))
+
+ module.exit_json(changed=changed, vpc={})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_vpc_route_table.py b/test/support/integration/plugins/modules/ec2_vpc_route_table.py
new file mode 100644
index 0000000000..96c9b2d04d
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_vpc_route_table.py
@@ -0,0 +1,750 @@
+#!/usr/bin/python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_route_table
+short_description: Manage route tables for AWS virtual private clouds
+description:
+ - Manage route tables for AWS virtual private clouds
+version_added: "2.0"
+author:
+- Robert Estelle (@erydo)
+- Rob White (@wimnat)
+- Will Thames (@willthames)
+options:
+ lookup:
+ description: Look up route table by either tags or by route table ID. Non-unique tag lookup will fail.
+ If no tags are specified then no lookup for an existing route table is performed and a new
+ route table will be created. To change tags of a route table you must look up by id.
+ default: tag
+ choices: [ 'tag', 'id' ]
+ type: str
+ propagating_vgw_ids:
+ description: Enable route propagation from virtual gateways specified by ID.
+ type: list
+ elements: str
+ purge_routes:
+ version_added: "2.3"
+ description: Purge existing routes that are not found in routes.
+ type: bool
+ default: 'yes'
+ purge_subnets:
+ version_added: "2.3"
+ description: Purge existing subnets that are not found in subnets. Ignored unless the subnets option is supplied.
+ default: 'true'
+ type: bool
+ purge_tags:
+ version_added: "2.5"
+ description: Purge existing tags that are not found in route table.
+ type: bool
+ default: 'no'
+ route_table_id:
+ description:
+ - The ID of the route table to update or delete.
+ - Required when I(lookup=id).
+ type: str
+ routes:
+ description: List of routes in the route table.
+ Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id',
+ 'instance_id', 'network_interface_id', or 'vpc_peering_connection_id'.
+ If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'.
+ Routes are required for present states.
+ type: list
+ elements: dict
+ state:
+ description: Create or destroy the VPC route table.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ subnets:
+ description: An array of subnets to add to this route table. Subnets may be specified
+ by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'.
+ type: list
+ elements: str
+ tags:
+ description: >
+ A dictionary of resource tags of the form: C({ tag1: value1, tag2: value2 }). Tags are
+ used to uniquely identify route tables within a VPC when the route_table_id is not supplied.
+ aliases: [ "resource_tags" ]
+ type: dict
+ vpc_id:
+ description:
+ - VPC ID of the VPC in which to create the route table.
+ - Required when I(state=present) or I(lookup=tag).
+ type: str
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic creation example:
+- name: Set up public subnet route table
+ ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ tags:
+ Name: Public
+ subnets:
+ - "{{ jumpbox_subnet.subnet.id }}"
+ - "{{ frontend_subnet.subnet.id }}"
+ - "{{ vpn_subnet.subnet_id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ register: public_route_table
+
+- name: Set up NAT-protected route table
+ ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ tags:
+ Name: Internal
+ subnets:
+ - "{{ application_subnet.subnet.id }}"
+ - 'Database Subnet'
+ - '10.0.0.0/8'
+ routes:
+ - dest: 0.0.0.0/0
+ instance_id: "{{ nat.instance_id }}"
+ register: nat_route_table
+
+- name: delete route table
+ ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ route_table_id: "{{ route_table.id }}"
+ lookup: id
+ state: absent
+'''
+
+RETURN = '''
+route_table:
+ description: Route Table result
+ returned: always
+ type: complex
+ contains:
+ associations:
+ description: List of subnets associated with the route table
+ returned: always
+ type: complex
+ contains:
+ main:
+ description: Whether this is the main route table
+ returned: always
+ type: bool
+ sample: false
+ route_table_association_id:
+ description: ID of association between route table and subnet
+ returned: always
+ type: str
+ sample: rtbassoc-ab47cfc3
+ route_table_id:
+ description: ID of the route table
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ subnet_id:
+ description: ID of the subnet
+ returned: always
+ type: str
+ sample: subnet-82055af9
+ id:
+ description: ID of the route table (same as route_table_id for backwards compatibility)
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ propagating_vgws:
+ description: List of Virtual Private Gateways propagating routes
+ returned: always
+ type: list
+ sample: []
+ route_table_id:
+ description: ID of the route table
+ returned: always
+ type: str
+ sample: rtb-bf779ed7
+ routes:
+ description: List of routes in the route table
+ returned: always
+ type: complex
+ contains:
+ destination_cidr_block:
+ description: CIDR block of destination
+ returned: always
+ type: str
+ sample: 10.228.228.0/22
+ gateway_id:
+ description: ID of the gateway
+ returned: when gateway is local or internet gateway
+ type: str
+ sample: local
+ instance_id:
+ description: ID of a NAT instance
+ returned: when the route is via an EC2 instance
+ type: str
+ sample: i-abcd123456789
+ instance_owner_id:
+ description: AWS account owning the NAT instance
+ returned: when the route is via an EC2 instance
+ type: str
+ sample: 123456789012
+ nat_gateway_id:
+ description: ID of the NAT gateway
+ returned: when the route is via a NAT gateway
+ type: str
+ sample: local
+ origin:
+ description: mechanism through which the route is in the table
+ returned: always
+ type: str
+ sample: CreateRouteTable
+ state:
+ description: state of the route
+ returned: always
+ type: str
+ sample: active
+ tags:
+ description: Tags applied to the route table
+ returned: always
+ type: dict
+ sample:
+ Name: Public route table
+ Public: 'true'
+ vpc_id:
+ description: ID for the VPC in which the route lives
+ returned: always
+ type: str
+ sample: vpc-6e2d2407
+'''
+
+import re
+from time import sleep
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.aws.waiters import get_waiter
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
+from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
+from ansible.module_utils.ec2 import compare_aws_tags, AWSRetry
+
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$')
+SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$')
+ROUTE_TABLE_RE = re.compile(r'^rtb-[A-z0-9]+$')
+
+
+@AWSRetry.exponential_backoff()
+def describe_subnets_with_backoff(connection, **params):
+ return connection.describe_subnets(**params)['Subnets']
+
+
+def find_subnets(connection, module, vpc_id, identified_subnets):
+ """
+ Finds a list of subnets, each identified either by a raw ID, a unique
+ 'Name' tag, or a CIDR such as 10.0.0.0/8.
+
+ Note that this function is duplicated in other ec2 modules, and should
+ potentially be moved into a shared module_utils
+ """
+ subnet_ids = []
+ subnet_names = []
+ subnet_cidrs = []
+ for subnet in (identified_subnets or []):
+ if re.match(SUBNET_RE, subnet):
+ subnet_ids.append(subnet)
+ elif re.match(CIDR_RE, subnet):
+ subnet_cidrs.append(subnet)
+ else:
+ subnet_names.append(subnet)
+
+ subnets_by_id = []
+ if subnet_ids:
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id})
+ try:
+ subnets_by_id = describe_subnets_with_backoff(connection, SubnetIds=subnet_ids, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't find subnet with id %s" % subnet_ids)
+
+ subnets_by_cidr = []
+ if subnet_cidrs:
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr': subnet_cidrs})
+ try:
+ subnets_by_cidr = describe_subnets_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't find subnet with cidr %s" % subnet_cidrs)
+
+ subnets_by_name = []
+ if subnet_names:
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'tag:Name': subnet_names})
+ try:
+ subnets_by_name = describe_subnets_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't find subnet with names %s" % subnet_names)
+
+ for name in subnet_names:
+ matching_count = len([1 for s in subnets_by_name for t in s.get('Tags', []) if t['Key'] == 'Name' and t['Value'] == name])
+ if matching_count == 0:
+ module.fail_json(msg='Subnet named "{0}" does not exist'.format(name))
+ elif matching_count > 1:
+ module.fail_json(msg='Multiple subnets named "{0}"'.format(name))
+
+ return subnets_by_id + subnets_by_cidr + subnets_by_name
+
+
+def find_igw(connection, module, vpc_id):
+ """
+ Finds the Internet gateway for the given VPC ID.
+ """
+ filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id})
+ try:
+ igw = connection.describe_internet_gateways(Filters=filters)['InternetGateways']
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='No IGW found for VPC {0}'.format(vpc_id))
+ if len(igw) == 1:
+ return igw[0]['InternetGatewayId']
+ elif len(igw) == 0:
+ module.fail_json(msg='No IGWs found for VPC {0}'.format(vpc_id))
+ else:
+ module.fail_json(msg='Multiple IGWs found for VPC {0}'.format(vpc_id))
+
+
+@AWSRetry.exponential_backoff()
+def describe_tags_with_backoff(connection, resource_id):
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': resource_id})
+ paginator = connection.get_paginator('describe_tags')
+ tags = paginator.paginate(Filters=filters).build_full_result()['Tags']
+ return boto3_tag_list_to_ansible_dict(tags)
+
+
+def tags_match(match_tags, candidate_tags):
+ return all((k in candidate_tags and candidate_tags[k] == v
+ for k, v in match_tags.items()))
+
+
+def ensure_tags(connection=None, module=None, resource_id=None, tags=None, purge_tags=None, check_mode=None):
+ try:
+ cur_tags = describe_tags_with_backoff(connection, resource_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to list tags for VPC')
+
+ to_add, to_delete = compare_aws_tags(cur_tags, tags, purge_tags)
+
+ if not to_add and not to_delete:
+ return {'changed': False, 'tags': cur_tags}
+ if check_mode:
+ if not purge_tags:
+ tags = cur_tags.update(tags)
+ return {'changed': True, 'tags': tags}
+
+ if to_delete:
+ try:
+ connection.delete_tags(Resources=[resource_id], Tags=[{'Key': k} for k in to_delete])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete tags")
+ if to_add:
+ try:
+ connection.create_tags(Resources=[resource_id], Tags=ansible_dict_to_boto3_tag_list(to_add))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create tags")
+
+ try:
+ latest_tags = describe_tags_with_backoff(connection, resource_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to list tags for VPC')
+ return {'changed': True, 'tags': latest_tags}
+
+
+@AWSRetry.exponential_backoff()
+def describe_route_tables_with_backoff(connection, **params):
+ try:
+ return connection.describe_route_tables(**params)['RouteTables']
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'InvalidRouteTableID.NotFound':
+ return None
+ else:
+ raise
+
+
+def get_route_table_by_id(connection, module, route_table_id):
+
+ route_table = None
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, RouteTableIds=[route_table_id])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route table")
+ if route_tables:
+ route_table = route_tables[0]
+
+ return route_table
+
+
+def get_route_table_by_tags(connection, module, vpc_id, tags):
+ count = 0
+ route_table = None
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id})
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route table")
+ for table in route_tables:
+ this_tags = describe_tags_with_backoff(connection, table['RouteTableId'])
+ if tags_match(tags, this_tags):
+ route_table = table
+ count += 1
+
+ if count > 1:
+ module.fail_json(msg="Tags provided do not identify a unique route table")
+ else:
+ return route_table
+
+
+def route_spec_matches_route(route_spec, route):
+ if route_spec.get('GatewayId') and 'nat-' in route_spec['GatewayId']:
+ route_spec['NatGatewayId'] = route_spec.pop('GatewayId')
+ if route_spec.get('GatewayId') and 'vpce-' in route_spec['GatewayId']:
+ if route_spec.get('DestinationCidrBlock', '').startswith('pl-'):
+ route_spec['DestinationPrefixListId'] = route_spec.pop('DestinationCidrBlock')
+
+ return set(route_spec.items()).issubset(route.items())
+
+
+def route_spec_matches_route_cidr(route_spec, route):
+ return route_spec['DestinationCidrBlock'] == route.get('DestinationCidrBlock')
+
+
+def rename_key(d, old_key, new_key):
+ d[new_key] = d.pop(old_key)
+
+
+def index_of_matching_route(route_spec, routes_to_match):
+ for i, route in enumerate(routes_to_match):
+ if route_spec_matches_route(route_spec, route):
+ return "exact", i
+ elif 'Origin' in route_spec and route_spec['Origin'] != 'EnableVgwRoutePropagation':
+ if route_spec_matches_route_cidr(route_spec, route):
+ return "replace", i
+
+
+def ensure_routes(connection=None, module=None, route_table=None, route_specs=None,
+ propagating_vgw_ids=None, check_mode=None, purge_routes=None):
+ routes_to_match = [route for route in route_table['Routes']]
+ route_specs_to_create = []
+ route_specs_to_recreate = []
+ for route_spec in route_specs:
+ match = index_of_matching_route(route_spec, routes_to_match)
+ if match is None:
+ if route_spec.get('DestinationCidrBlock'):
+ route_specs_to_create.append(route_spec)
+ else:
+ module.warn("Skipping creating {0} because it has no destination cidr block. "
+ "To add VPC endpoints to route tables use the ec2_vpc_endpoint module.".format(route_spec))
+ else:
+ if match[0] == "replace":
+ if route_spec.get('DestinationCidrBlock'):
+ route_specs_to_recreate.append(route_spec)
+ else:
+ module.warn("Skipping recreating route {0} because it has no destination cidr block.".format(route_spec))
+ del routes_to_match[match[1]]
+
+ routes_to_delete = []
+ if purge_routes:
+ for r in routes_to_match:
+ if not r.get('DestinationCidrBlock'):
+ module.warn("Skipping purging route {0} because it has no destination cidr block. "
+ "To remove VPC endpoints from route tables use the ec2_vpc_endpoint module.".format(r))
+ continue
+ if r['Origin'] == 'CreateRoute':
+ routes_to_delete.append(r)
+
+ changed = bool(routes_to_delete or route_specs_to_create or route_specs_to_recreate)
+ if changed and not check_mode:
+ for route in routes_to_delete:
+ try:
+ connection.delete_route(RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=route['DestinationCidrBlock'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete route")
+
+ for route_spec in route_specs_to_recreate:
+ try:
+ connection.replace_route(RouteTableId=route_table['RouteTableId'],
+ **route_spec)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't recreate route")
+
+ for route_spec in route_specs_to_create:
+ try:
+ connection.create_route(RouteTableId=route_table['RouteTableId'],
+ **route_spec)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create route")
+
+ return {'changed': bool(changed)}
+
+
+def ensure_subnet_association(connection=None, module=None, vpc_id=None, route_table_id=None, subnet_id=None,
+ check_mode=None):
+ filters = ansible_dict_to_boto3_filter_list({'association.subnet-id': subnet_id, 'vpc-id': vpc_id})
+ try:
+ route_tables = describe_route_tables_with_backoff(connection, Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get route tables")
+ for route_table in route_tables:
+ if route_table['RouteTableId'] is None:
+ continue
+ for a in route_table['Associations']:
+ if a['Main']:
+ continue
+ if a['SubnetId'] == subnet_id:
+ if route_table['RouteTableId'] == route_table_id:
+ return {'changed': False, 'association_id': a['RouteTableAssociationId']}
+ else:
+ if check_mode:
+ return {'changed': True}
+ try:
+ connection.disassociate_route_table(AssociationId=a['RouteTableAssociationId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table")
+
+ try:
+ association_id = connection.associate_route_table(RouteTableId=route_table_id, SubnetId=subnet_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't associate subnet with route table")
+ return {'changed': True, 'association_id': association_id}
+
+
+def ensure_subnet_associations(connection=None, module=None, route_table=None, subnets=None,
+ check_mode=None, purge_subnets=None):
+ current_association_ids = [a['RouteTableAssociationId'] for a in route_table['Associations'] if not a['Main']]
+ new_association_ids = []
+ changed = False
+ for subnet in subnets:
+ result = ensure_subnet_association(connection=connection, module=module, vpc_id=route_table['VpcId'],
+ route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId'], check_mode=check_mode)
+ changed = changed or result['changed']
+ if changed and check_mode:
+ return {'changed': True}
+ new_association_ids.append(result['association_id'])
+
+ if purge_subnets:
+ to_delete = [a_id for a_id in current_association_ids
+ if a_id not in new_association_ids]
+
+ for a_id in to_delete:
+ changed = True
+ if not check_mode:
+ try:
+ connection.disassociate_route_table(AssociationId=a_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table")
+
+ return {'changed': changed}
+
+
+def ensure_propagation(connection=None, module=None, route_table=None, propagating_vgw_ids=None,
+ check_mode=None):
+ changed = False
+ gateways = [gateway['GatewayId'] for gateway in route_table['PropagatingVgws']]
+ to_add = set(propagating_vgw_ids) - set(gateways)
+ if to_add:
+ changed = True
+ if not check_mode:
+ for vgw_id in to_add:
+ try:
+ connection.enable_vgw_route_propagation(RouteTableId=route_table['RouteTableId'],
+ GatewayId=vgw_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't enable route propagation")
+
+ return {'changed': changed}
+
+
+def ensure_route_table_absent(connection, module):
+
+ lookup = module.params.get('lookup')
+ route_table_id = module.params.get('route_table_id')
+ tags = module.params.get('tags')
+ vpc_id = module.params.get('vpc_id')
+ purge_subnets = module.params.get('purge_subnets')
+
+ if lookup == 'tag':
+ if tags is not None:
+ route_table = get_route_table_by_tags(connection, module, vpc_id, tags)
+ else:
+ route_table = None
+ elif lookup == 'id':
+ route_table = get_route_table_by_id(connection, module, route_table_id)
+
+ if route_table is None:
+ return {'changed': False}
+
+ # disassociate subnets before deleting route table
+ if not module.check_mode:
+ ensure_subnet_associations(connection=connection, module=module, route_table=route_table,
+ subnets=[], check_mode=False, purge_subnets=purge_subnets)
+ try:
+ connection.delete_route_table(RouteTableId=route_table['RouteTableId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error deleting route table")
+
+ return {'changed': True}
+
+
+def get_route_table_info(connection, module, route_table):
+ result = get_route_table_by_id(connection, module, route_table['RouteTableId'])
+ try:
+ result['Tags'] = describe_tags_with_backoff(connection, route_table['RouteTableId'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get tags for route table")
+ result = camel_dict_to_snake_dict(result, ignore_list=['Tags'])
+ # backwards compatibility
+ result['id'] = result['route_table_id']
+ return result
+
+
+def create_route_spec(connection, module, vpc_id):
+ routes = module.params.get('routes')
+
+ for route_spec in routes:
+ rename_key(route_spec, 'dest', 'destination_cidr_block')
+
+ if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw':
+ igw = find_igw(connection, module, vpc_id)
+ route_spec['gateway_id'] = igw
+ if route_spec.get('gateway_id') and route_spec['gateway_id'].startswith('nat-'):
+ rename_key(route_spec, 'gateway_id', 'nat_gateway_id')
+
+ return snake_dict_to_camel_dict(routes, capitalize_first=True)
+
+
+def ensure_route_table_present(connection, module):
+
+ lookup = module.params.get('lookup')
+ propagating_vgw_ids = module.params.get('propagating_vgw_ids')
+ purge_routes = module.params.get('purge_routes')
+ purge_subnets = module.params.get('purge_subnets')
+ purge_tags = module.params.get('purge_tags')
+ route_table_id = module.params.get('route_table_id')
+ subnets = module.params.get('subnets')
+ tags = module.params.get('tags')
+ vpc_id = module.params.get('vpc_id')
+ routes = create_route_spec(connection, module, vpc_id)
+
+ changed = False
+ tags_valid = False
+
+ if lookup == 'tag':
+ if tags is not None:
+ try:
+ route_table = get_route_table_by_tags(connection, module, vpc_id, tags)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error finding route table with lookup 'tag'")
+ else:
+ route_table = None
+ elif lookup == 'id':
+ try:
+ route_table = get_route_table_by_id(connection, module, route_table_id)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error finding route table with lookup 'id'")
+
+ # If no route table returned then create new route table
+ if route_table is None:
+ changed = True
+ if not module.check_mode:
+ try:
+ route_table = connection.create_route_table(VpcId=vpc_id)['RouteTable']
+ # try to wait for route table to be present before moving on
+ get_waiter(
+ connection, 'route_table_exists'
+ ).wait(
+ RouteTableIds=[route_table['RouteTableId']],
+ )
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Error creating route table")
+ else:
+ route_table = {"id": "rtb-xxxxxxxx", "route_table_id": "rtb-xxxxxxxx", "vpc_id": vpc_id}
+ module.exit_json(changed=changed, route_table=route_table)
+
+ if routes is not None:
+ result = ensure_routes(connection=connection, module=module, route_table=route_table,
+ route_specs=routes, propagating_vgw_ids=propagating_vgw_ids,
+ check_mode=module.check_mode, purge_routes=purge_routes)
+ changed = changed or result['changed']
+
+ if propagating_vgw_ids is not None:
+ result = ensure_propagation(connection=connection, module=module, route_table=route_table,
+ propagating_vgw_ids=propagating_vgw_ids, check_mode=module.check_mode)
+ changed = changed or result['changed']
+
+ if not tags_valid and tags is not None:
+ result = ensure_tags(connection=connection, module=module, resource_id=route_table['RouteTableId'], tags=tags,
+ purge_tags=purge_tags, check_mode=module.check_mode)
+ route_table['Tags'] = result['tags']
+ changed = changed or result['changed']
+
+ if subnets is not None:
+ associated_subnets = find_subnets(connection, module, vpc_id, subnets)
+
+ result = ensure_subnet_associations(connection=connection, module=module, route_table=route_table,
+ subnets=associated_subnets, check_mode=module.check_mode,
+ purge_subnets=purge_subnets)
+ changed = changed or result['changed']
+
+ if changed:
+ # pause to allow route table routes/subnets/associations to be updated before exiting with final state
+ sleep(5)
+ module.exit_json(changed=changed, route_table=get_route_table_info(connection, module, route_table))
+
+
+def main():
+ argument_spec = dict(
+ lookup=dict(default='tag', choices=['tag', 'id']),
+ propagating_vgw_ids=dict(type='list'),
+ purge_routes=dict(default=True, type='bool'),
+ purge_subnets=dict(default=True, type='bool'),
+ purge_tags=dict(default=False, type='bool'),
+ route_table_id=dict(),
+ routes=dict(default=[], type='list'),
+ state=dict(default='present', choices=['present', 'absent']),
+ subnets=dict(type='list'),
+ tags=dict(type='dict', aliases=['resource_tags']),
+ vpc_id=dict()
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[['lookup', 'id', ['route_table_id']],
+ ['lookup', 'tag', ['vpc_id']],
+ ['state', 'present', ['vpc_id']]],
+ supports_check_mode=True)
+
+ connection = module.client('ec2')
+
+ state = module.params.get('state')
+
+ if state == 'present':
+ result = ensure_route_table_present(connection, module)
+ elif state == 'absent':
+ result = ensure_route_table_absent(connection, module)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/ec2_vpc_subnet.py b/test/support/integration/plugins/modules/ec2_vpc_subnet.py
new file mode 100644
index 0000000000..5085e99b79
--- /dev/null
+++ b/test/support/integration/plugins/modules/ec2_vpc_subnet.py
@@ -0,0 +1,604 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_subnet
+short_description: Manage subnets in AWS virtual private clouds
+description:
+ - Manage subnets in AWS virtual private clouds.
+version_added: "2.0"
+author:
+- Robert Estelle (@erydo)
+- Brad Davidson (@brandond)
+requirements: [ boto3 ]
+options:
+ az:
+ description:
+ - "The availability zone for the subnet."
+ type: str
+ cidr:
+ description:
+ - "The CIDR block for the subnet. E.g. 192.0.2.0/24."
+ type: str
+ required: true
+ ipv6_cidr:
+ description:
+ - "The IPv6 CIDR block for the subnet. The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range."
+ - "Required if I(assign_instances_ipv6=true)"
+ version_added: "2.5"
+ type: str
+ tags:
+ description:
+ - "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
+ aliases: [ 'resource_tags' ]
+ type: dict
+ state:
+ description:
+ - "Create or remove the subnet."
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ vpc_id:
+ description:
+ - "VPC ID of the VPC in which to create or delete the subnet."
+ required: true
+ type: str
+ map_public:
+ description:
+ - "Specify C(yes) to indicate that instances launched into the subnet should be assigned public IP address by default."
+ type: bool
+ default: 'no'
+ version_added: "2.4"
+ assign_instances_ipv6:
+ description:
+ - "Specify C(yes) to indicate that instances launched into the subnet should be automatically assigned an IPv6 address."
+ type: bool
+ default: false
+ version_added: "2.5"
+ wait:
+ description:
+ - "When I(wait=true) and I(state=present), module will wait for subnet to be in available state before continuing."
+ type: bool
+ default: true
+ version_added: "2.5"
+ wait_timeout:
+ description:
+ - "Number of seconds to wait for subnet to become available I(wait=True)."
+ default: 300
+ version_added: "2.5"
+ type: int
+ purge_tags:
+ description:
+ - Whether or not to remove tags that do not appear in the I(tags) list.
+ type: bool
+ default: true
+ version_added: "2.5"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create subnet for database servers
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+ tags:
+ Name: Database Subnet
+ register: database_subnet
+
+- name: Remove subnet for database servers
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+
+- name: Create subnet with IPv6 block assigned
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.1.100.0/24
+ ipv6_cidr: 2001:db8:0:102::/64
+
+- name: Remove IPv6 block assigned to subnet
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.1.100.0/24
+ ipv6_cidr: ''
+'''
+
+RETURN = '''
+subnet:
+ description: Dictionary of subnet values
+ returned: I(state=present)
+ type: complex
+ contains:
+ id:
+ description: Subnet resource id
+ returned: I(state=present)
+ type: str
+ sample: subnet-b883b2c4
+ cidr_block:
+ description: The IPv4 CIDR of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: "10.0.0.0/16"
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block actively associated with the Subnet
+ returned: I(state=present)
+ type: str
+ sample: "2001:db8:0:102::/64"
+ availability_zone:
+ description: Availability zone of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: us-east-1a
+ state:
+ description: state of the Subnet
+ returned: I(state=present)
+ type: str
+ sample: available
+ tags:
+ description: tags attached to the Subnet, includes name
+ returned: I(state=present)
+ type: dict
+ sample: {"Name": "My Subnet", "env": "staging"}
+ map_public_ip_on_launch:
+ description: whether public IP is auto-assigned to new instances
+ returned: I(state=present)
+ type: bool
+ sample: false
+ assign_ipv6_address_on_creation:
+ description: whether IPv6 address is auto-assigned to new instances
+ returned: I(state=present)
+ type: bool
+ sample: false
+ vpc_id:
+ description: the id of the VPC where this Subnet exists
+ returned: I(state=present)
+ type: str
+ sample: vpc-67236184
+ available_ip_address_count:
+ description: number of available IPv4 addresses
+ returned: I(state=present)
+ type: str
+ sample: 251
+ default_for_az:
+ description: indicates whether this is the default Subnet for this Availability Zone
+ returned: I(state=present)
+ type: bool
+ sample: false
+ ipv6_association_id:
+ description: The IPv6 association ID for the currently associated CIDR
+ returned: I(state=present)
+ type: str
+ sample: subnet-cidr-assoc-b85c74d2
+ ipv6_cidr_block_association_set:
+ description: An array of IPv6 cidr block association set information.
+ returned: I(state=present)
+ type: complex
+ contains:
+ association_id:
+ description: The association ID
+ returned: always
+ type: str
+ ipv6_cidr_block:
+ description: The IPv6 CIDR block that is associated with the subnet.
+ returned: always
+ type: str
+ ipv6_cidr_block_state:
+ description: A hash/dict that contains a single item. The state of the cidr block association.
+ returned: always
+ type: dict
+ contains:
+ state:
+ description: The CIDR block association state.
+ returned: always
+ type: str
+'''
+
+
+import time
+
+try:
+ import botocore
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.aws.waiters import get_waiter
+from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, ansible_dict_to_boto3_tag_list,
+ camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags, AWSRetry)
+
+
+def get_subnet_info(subnet):
+ if 'Subnets' in subnet:
+ return [get_subnet_info(s) for s in subnet['Subnets']]
+ elif 'Subnet' in subnet:
+ subnet = camel_dict_to_snake_dict(subnet['Subnet'])
+ else:
+ subnet = camel_dict_to_snake_dict(subnet)
+
+ if 'tags' in subnet:
+ subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags'])
+ else:
+ subnet['tags'] = dict()
+
+ if 'subnet_id' in subnet:
+ subnet['id'] = subnet['subnet_id']
+ del subnet['subnet_id']
+
+ subnet['ipv6_cidr_block'] = ''
+ subnet['ipv6_association_id'] = ''
+ ipv6set = subnet.get('ipv6_cidr_block_association_set')
+ if ipv6set:
+ for item in ipv6set:
+ if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'):
+ subnet['ipv6_cidr_block'] = item['ipv6_cidr_block']
+ subnet['ipv6_association_id'] = item['association_id']
+
+ return subnet
+
+
+@AWSRetry.exponential_backoff()
+def describe_subnets_with_backoff(client, **params):
+ return client.describe_subnets(**params)
+
+
+def waiter_params(module, params, start_time):
+ if not module.botocore_at_least("1.7.0"):
+ remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time())
+ params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5}
+ return params
+
+
+def handle_waiter(conn, module, waiter_name, params, start_time):
+ try:
+ get_waiter(conn, waiter_name).wait(
+ **waiter_params(module, params, start_time)
+ )
+ except botocore.exceptions.WaiterError as e:
+ module.fail_json_aws(e, "Failed to wait for updates to complete")
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "An exception happened while trying to wait for updates")
+
+
+def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, az=None, start_time=None):
+ wait = module.params['wait']
+ wait_timeout = module.params['wait_timeout']
+
+ params = dict(VpcId=vpc_id,
+ CidrBlock=cidr)
+
+ if ipv6_cidr:
+ params['Ipv6CidrBlock'] = ipv6_cidr
+
+ if az:
+ params['AvailabilityZone'] = az
+
+ try:
+ subnet = get_subnet_info(conn.create_subnet(**params))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create subnet")
+
+ # Sometimes AWS takes its time to create a subnet and so using
+ # new subnets's id to do things like create tags results in
+ # exception.
+ if wait and subnet.get('state') != 'available':
+ handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
+ try:
+ conn.get_waiter('subnet_available').wait(
+ **waiter_params(module, {'SubnetIds': [subnet['id']]}, start_time)
+ )
+ subnet['state'] = 'available'
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, "Create subnet action timed out waiting for subnet to become available")
+
+ return subnet
+
+
+def ensure_tags(conn, module, subnet, tags, purge_tags, start_time):
+ changed = False
+
+ filters = ansible_dict_to_boto3_filter_list({'resource-id': subnet['id'], 'resource-type': 'subnet'})
+ try:
+ cur_tags = conn.describe_tags(Filters=filters)
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't describe tags")
+
+ to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
+
+ if to_update:
+ try:
+ if not module.check_mode:
+ AWSRetry.exponential_backoff(
+ catch_extra_error_codes=['InvalidSubnetID.NotFound']
+ )(conn.create_tags)(
+ Resources=[subnet['id']],
+ Tags=ansible_dict_to_boto3_tag_list(to_update)
+ )
+
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't create tags")
+
+ if to_delete:
+ try:
+ if not module.check_mode:
+ tags_list = []
+ for key in to_delete:
+ tags_list.append({'Key': key})
+
+ AWSRetry.exponential_backoff(
+ catch_extra_error_codes=['InvalidSubnetID.NotFound']
+ )(conn.delete_tags)(Resources=[subnet['id']], Tags=tags_list)
+
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete tags")
+
+ if module.params['wait'] and not module.check_mode:
+ # Wait for tags to be updated
+ filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()]
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+ return changed
+
+
+def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time):
+ if check_mode:
+ return
+ try:
+ conn.modify_subnet_attribute(SubnetId=subnet['id'], MapPublicIpOnLaunch={'Value': map_public})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
+
+
+def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time):
+ if check_mode:
+ return
+ try:
+ conn.modify_subnet_attribute(SubnetId=subnet['id'], AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6})
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
+
+
+def disassociate_ipv6_cidr(conn, module, subnet, start_time):
+ if subnet.get('assign_ipv6_address_on_creation'):
+ ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time)
+
+ try:
+ conn.disassociate_subnet_cidr_block(AssociationId=subnet['ipv6_association_id'])
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}"
+ .format(subnet['ipv6_association_id'], subnet['id']))
+
+ # Wait for cidr block to be disassociated
+ if module.params['wait']:
+ filters = ansible_dict_to_boto3_filter_list(
+ {'ipv6-cidr-block-association.state': ['disassociated'],
+ 'vpc-id': subnet['vpc_id']}
+ )
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+
+def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time):
+ wait = module.params['wait']
+ changed = False
+
+ if subnet['ipv6_association_id'] and not ipv6_cidr:
+ if not check_mode:
+ disassociate_ipv6_cidr(conn, module, subnet, start_time)
+ changed = True
+
+ if ipv6_cidr:
+ filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr,
+ 'vpc-id': subnet['vpc_id']})
+
+ try:
+ check_subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get subnet info")
+
+ if check_subnets and check_subnets[0]['ipv6_cidr_block']:
+ module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr))
+
+ if subnet['ipv6_association_id']:
+ if not check_mode:
+ disassociate_ipv6_cidr(conn, module, subnet, start_time)
+ changed = True
+
+ try:
+ if not check_mode:
+ associate_resp = conn.associate_subnet_cidr_block(SubnetId=subnet['id'], Ipv6CidrBlock=ipv6_cidr)
+ changed = True
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id']))
+ else:
+ if not check_mode and wait:
+ filters = ansible_dict_to_boto3_filter_list(
+ {'ipv6-cidr-block-association.state': ['associated'],
+ 'vpc-id': subnet['vpc_id']}
+ )
+ handle_waiter(conn, module, 'subnet_exists',
+ {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
+
+ if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'):
+ subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId']
+ subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock']
+ if subnet['ipv6_cidr_block_association_set']:
+ subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])
+ else:
+ subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']))
+
+ return changed
+
+
+def get_matching_subnet(conn, module, vpc_id, cidr):
+ filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr})
+ try:
+ subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't get matching subnet")
+
+ if subnets:
+ return subnets[0]
+
+ return None
+
+
+def ensure_subnet_present(conn, module):
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ changed = False
+
+ # Initialize start so max time does not exceed the specified wait_timeout for multiple operations
+ start_time = time.time()
+
+ if subnet is None:
+ if not module.check_mode:
+ subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'],
+ ipv6_cidr=module.params['ipv6_cidr'], az=module.params['az'], start_time=start_time)
+ changed = True
+ # Subnet will be None when check_mode is true
+ if subnet is None:
+ return {
+ 'changed': changed,
+ 'subnet': {}
+ }
+ if module.params['wait']:
+ handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'):
+ if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time):
+ changed = True
+
+ if module.params['map_public'] != subnet['map_public_ip_on_launch']:
+ ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time)
+ changed = True
+
+ if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'):
+ ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time)
+ changed = True
+
+ if module.params['tags'] != subnet['tags']:
+ stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items())
+ if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time):
+ changed = True
+
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ if not module.check_mode and module.params['wait']:
+ # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation
+ # so we only wait for those if necessary just before returning the subnet
+ subnet = ensure_final_subnet(conn, module, subnet, start_time)
+
+ return {
+ 'changed': changed,
+ 'subnet': subnet
+ }
+
+
+def ensure_final_subnet(conn, module, subnet, start_time):
+ for rewait in range(0, 30):
+ map_public_correct = False
+ assign_ipv6_correct = False
+
+ if module.params['map_public'] == subnet['map_public_ip_on_launch']:
+ map_public_correct = True
+ else:
+ if module.params['map_public']:
+ handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time)
+ else:
+ handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'):
+ assign_ipv6_correct = True
+ else:
+ if module.params['assign_instances_ipv6']:
+ handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
+ else:
+ handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
+
+ if map_public_correct and assign_ipv6_correct:
+ break
+
+ time.sleep(5)
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+
+ return subnet
+
+
+def ensure_subnet_absent(conn, module):
+ subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
+ if subnet is None:
+ return {'changed': False}
+
+ try:
+ if not module.check_mode:
+ conn.delete_subnet(SubnetId=subnet['id'])
+ if module.params['wait']:
+ handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time())
+ return {'changed': True}
+ except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Couldn't delete subnet")
+
+
+def main():
+ argument_spec = dict(
+ az=dict(default=None, required=False),
+ cidr=dict(required=True),
+ ipv6_cidr=dict(default='', required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']),
+ vpc_id=dict(required=True),
+ map_public=dict(default=False, required=False, type='bool'),
+ assign_instances_ipv6=dict(default=False, required=False, type='bool'),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300, required=False),
+ purge_tags=dict(default=True, type='bool')
+ )
+
+ required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])]
+
+ module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
+
+ if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'):
+ module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string")
+
+ if not module.botocore_at_least("1.7.0"):
+ module.warn("botocore >= 1.7.0 is required to use wait_timeout for custom wait times")
+
+ connection = module.client('ec2')
+
+ state = module.params.get('state')
+
+ try:
+ if state == 'present':
+ result = ensure_subnet_present(connection, module)
+ elif state == 'absent':
+ result = ensure_subnet_absent(connection, module)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json_aws(e)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/hcloud_server.py b/test/support/integration/plugins/modules/hcloud_server.py
new file mode 100644
index 0000000000..791c890a29
--- /dev/null
+++ b/test/support/integration/plugins/modules/hcloud_server.py
@@ -0,0 +1,555 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ "metadata_version": "1.1",
+ "status": ["preview"],
+ "supported_by": "community",
+}
+
+DOCUMENTATION = """
+---
+module: hcloud_server
+
+short_description: Create and manage cloud servers on the Hetzner Cloud.
+
+version_added: "2.8"
+
+description:
+ - Create, update and manage cloud servers on the Hetzner Cloud.
+
+author:
+ - Lukas Kaemmerling (@LKaemmerling)
+
+options:
+ id:
+ description:
+ - The ID of the Hetzner Cloud server to manage.
+ - Only required if no server I(name) is given
+ type: int
+ name:
+ description:
+ - The Name of the Hetzner Cloud server to manage.
+ - Only required if no server I(id) is given or a server does not exists.
+ type: str
+ server_type:
+ description:
+ - The Server Type of the Hetzner Cloud server to manage.
+ - Required if server does not exists.
+ type: str
+ ssh_keys:
+ description:
+ - List of SSH key names
+ - The key names correspond to the SSH keys configured for your
+ Hetzner Cloud account access.
+ type: list
+ volumes:
+ description:
+ - List of Volumes IDs that should be attached to the server on server creation.
+ type: list
+ image:
+ description:
+ - Image the server should be created from.
+ - Required if server does not exists.
+ type: str
+ location:
+ description:
+ - Location of Server.
+ - Required if no I(datacenter) is given and server does not exists.
+ type: str
+ datacenter:
+ description:
+ - Datacenter of Server.
+ - Required of no I(location) is given and server does not exists.
+ type: str
+ backups:
+ description:
+ - Enable or disable Backups for the given Server.
+ type: bool
+ default: no
+ upgrade_disk:
+ description:
+ - Resize the disk size, when resizing a server.
+ - If you want to downgrade the server later, this value should be False.
+ type: bool
+ default: no
+ force_upgrade:
+ description:
+ - Force the upgrade of the server.
+ - Power off the server if it is running on upgrade.
+ type: bool
+ default: no
+ user_data:
+ description:
+ - User Data to be passed to the server on creation.
+ - Only used if server does not exists.
+ type: str
+ rescue_mode:
+ description:
+ - Add the Hetzner rescue system type you want the server to be booted into.
+ type: str
+ version_added: 2.9
+ labels:
+ description:
+ - User-defined labels (key-value pairs).
+ type: dict
+ delete_protection:
+ description:
+ - Protect the Server for deletion.
+ - Needs to be the same as I(rebuild_protection).
+ type: bool
+ version_added: "2.10"
+ rebuild_protection:
+ description:
+ - Protect the Server for rebuild.
+ - Needs to be the same as I(delete_protection).
+ type: bool
+ version_added: "2.10"
+ state:
+ description:
+ - State of the server.
+ default: present
+ choices: [ absent, present, restarted, started, stopped, rebuild ]
+ type: str
+extends_documentation_fragment: hcloud
+"""
+
+EXAMPLES = """
+- name: Create a basic server
+ hcloud_server:
+ name: my-server
+ server_type: cx11
+ image: ubuntu-18.04
+ state: present
+
+- name: Create a basic server with ssh key
+ hcloud_server:
+ name: my-server
+ server_type: cx11
+ image: ubuntu-18.04
+ location: fsn1
+ ssh_keys:
+ - me@myorganisation
+ state: present
+
+- name: Resize an existing server
+ hcloud_server:
+ name: my-server
+ server_type: cx21
+ upgrade_disk: yes
+ state: present
+
+- name: Ensure the server is absent (remove if needed)
+ hcloud_server:
+ name: my-server
+ state: absent
+
+- name: Ensure the server is started
+ hcloud_server:
+ name: my-server
+ state: started
+
+- name: Ensure the server is stopped
+ hcloud_server:
+ name: my-server
+ state: stopped
+
+- name: Ensure the server is restarted
+ hcloud_server:
+ name: my-server
+ state: restarted
+
+- name: Ensure the server is will be booted in rescue mode and therefore restarted
+ hcloud_server:
+ name: my-server
+ rescue_mode: linux64
+ state: restarted
+
+- name: Ensure the server is rebuild
+ hcloud_server:
+ name: my-server
+ image: ubuntu-18.04
+ state: rebuild
+"""
+
+RETURN = """
+hcloud_server:
+ description: The server instance
+ returned: Always
+ type: complex
+ contains:
+ id:
+ description: Numeric identifier of the server
+ returned: always
+ type: int
+ sample: 1937415
+ name:
+ description: Name of the server
+ returned: always
+ type: str
+ sample: my-server
+ status:
+ description: Status of the server
+ returned: always
+ type: str
+ sample: running
+ server_type:
+ description: Name of the server type of the server
+ returned: always
+ type: str
+ sample: cx11
+ ipv4_address:
+ description: Public IPv4 address of the server
+ returned: always
+ type: str
+ sample: 116.203.104.109
+ ipv6:
+ description: IPv6 network of the server
+ returned: always
+ type: str
+ sample: 2a01:4f8:1c1c:c140::/64
+ location:
+ description: Name of the location of the server
+ returned: always
+ type: str
+ sample: fsn1
+ datacenter:
+ description: Name of the datacenter of the server
+ returned: always
+ type: str
+ sample: fsn1-dc14
+ rescue_enabled:
+ description: True if rescue mode is enabled, Server will then boot into rescue system on next reboot
+ returned: always
+ type: bool
+ sample: false
+ backup_window:
+ description: Time window (UTC) in which the backup will run, or null if the backups are not enabled
+ returned: always
+ type: bool
+ sample: 22-02
+ labels:
+ description: User-defined labels (key-value pairs)
+ returned: always
+ type: dict
+ delete_protection:
+ description: True if server is protected for deletion
+ type: bool
+ returned: always
+ sample: false
+ version_added: "2.10"
+ rebuild_protection:
+ description: True if server is protected for rebuild
+ type: bool
+ returned: always
+ sample: false
+ version_added: "2.10"
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.hcloud import Hcloud
+
+try:
+ from hcloud.volumes.domain import Volume
+ from hcloud.ssh_keys.domain import SSHKey
+ from hcloud.servers.domain import Server
+ from hcloud import APIException
+except ImportError:
+ pass
+
+
+class AnsibleHcloudServer(Hcloud):
+ def __init__(self, module):
+ Hcloud.__init__(self, module, "hcloud_server")
+ self.hcloud_server = None
+
+ def _prepare_result(self):
+ image = None if self.hcloud_server.image is None else to_native(self.hcloud_server.image.name)
+ return {
+ "id": to_native(self.hcloud_server.id),
+ "name": to_native(self.hcloud_server.name),
+ "ipv4_address": to_native(self.hcloud_server.public_net.ipv4.ip),
+ "ipv6": to_native(self.hcloud_server.public_net.ipv6.ip),
+ "image": image,
+ "server_type": to_native(self.hcloud_server.server_type.name),
+ "datacenter": to_native(self.hcloud_server.datacenter.name),
+ "location": to_native(self.hcloud_server.datacenter.location.name),
+ "rescue_enabled": self.hcloud_server.rescue_enabled,
+ "backup_window": to_native(self.hcloud_server.backup_window),
+ "labels": self.hcloud_server.labels,
+ "delete_protection": self.hcloud_server.protection["delete"],
+ "rebuild_protection": self.hcloud_server.protection["rebuild"],
+ "status": to_native(self.hcloud_server.status),
+ }
+
+ def _get_server(self):
+ try:
+ if self.module.params.get("id") is not None:
+ self.hcloud_server = self.client.servers.get_by_id(
+ self.module.params.get("id")
+ )
+ else:
+ self.hcloud_server = self.client.servers.get_by_name(
+ self.module.params.get("name")
+ )
+ except APIException as e:
+ self.module.fail_json(msg=e.message)
+
+ def _create_server(self):
+
+ self.module.fail_on_missing_params(
+ required_params=["name", "server_type", "image"]
+ )
+ params = {
+ "name": self.module.params.get("name"),
+ "server_type": self.client.server_types.get_by_name(
+ self.module.params.get("server_type")
+ ),
+ "user_data": self.module.params.get("user_data"),
+ "labels": self.module.params.get("labels"),
+ }
+ if self.client.images.get_by_name(self.module.params.get("image")) is not None:
+ # When image name is not available look for id instead
+ params["image"] = self.client.images.get_by_name(self.module.params.get("image"))
+ else:
+ params["image"] = self.client.images.get_by_id(self.module.params.get("image"))
+
+ if self.module.params.get("ssh_keys") is not None:
+ params["ssh_keys"] = [
+ SSHKey(name=ssh_key_name)
+ for ssh_key_name in self.module.params.get("ssh_keys")
+ ]
+
+ if self.module.params.get("volumes") is not None:
+ params["volumes"] = [
+ Volume(id=volume_id) for volume_id in self.module.params.get("volumes")
+ ]
+
+ if self.module.params.get("location") is None and self.module.params.get("datacenter") is None:
+ # When not given, the API will choose the location.
+ params["location"] = None
+ params["datacenter"] = None
+ elif self.module.params.get("location") is not None and self.module.params.get("datacenter") is None:
+ params["location"] = self.client.locations.get_by_name(
+ self.module.params.get("location")
+ )
+ elif self.module.params.get("location") is None and self.module.params.get("datacenter") is not None:
+ params["datacenter"] = self.client.datacenters.get_by_name(
+ self.module.params.get("datacenter")
+ )
+
+ if not self.module.check_mode:
+ resp = self.client.servers.create(**params)
+ self.result["root_password"] = resp.root_password
+ resp.action.wait_until_finished(max_retries=1000)
+ [action.wait_until_finished() for action in resp.next_actions]
+
+ rescue_mode = self.module.params.get("rescue_mode")
+ if rescue_mode:
+ self._get_server()
+ self._set_rescue_mode(rescue_mode)
+
+ self._mark_as_changed()
+ self._get_server()
+
+ def _update_server(self):
+ try:
+ rescue_mode = self.module.params.get("rescue_mode")
+ if rescue_mode and self.hcloud_server.rescue_enabled is False:
+ if not self.module.check_mode:
+ self._set_rescue_mode(rescue_mode)
+ self._mark_as_changed()
+ elif not rescue_mode and self.hcloud_server.rescue_enabled is True:
+ if not self.module.check_mode:
+ self.hcloud_server.disable_rescue().wait_until_finished()
+ self._mark_as_changed()
+
+ if self.module.params.get("backups") and self.hcloud_server.backup_window is None:
+ if not self.module.check_mode:
+ self.hcloud_server.enable_backup().wait_until_finished()
+ self._mark_as_changed()
+ elif not self.module.params.get("backups") and self.hcloud_server.backup_window is not None:
+ if not self.module.check_mode:
+ self.hcloud_server.disable_backup().wait_until_finished()
+ self._mark_as_changed()
+
+ labels = self.module.params.get("labels")
+ if labels is not None and labels != self.hcloud_server.labels:
+ if not self.module.check_mode:
+ self.hcloud_server.update(labels=labels)
+ self._mark_as_changed()
+
+ server_type = self.module.params.get("server_type")
+ if server_type is not None and self.hcloud_server.server_type.name != server_type:
+ previous_server_status = self.hcloud_server.status
+ state = self.module.params.get("state")
+ if previous_server_status == Server.STATUS_RUNNING:
+ if not self.module.check_mode:
+ if self.module.params.get("force_upgrade") or state == "stopped":
+ self.stop_server() # Only stopped server can be upgraded
+ else:
+ self.module.warn(
+ "You can not upgrade a running instance %s. You need to stop the instance or use force_upgrade=yes."
+ % self.hcloud_server.name
+ )
+ timeout = 100
+ if self.module.params.get("upgrade_disk"):
+ timeout = (
+ 1000
+ ) # When we upgrade the disk too the resize progress takes some more time.
+ if not self.module.check_mode:
+ self.hcloud_server.change_type(
+ server_type=self.client.server_types.get_by_name(server_type),
+ upgrade_disk=self.module.params.get("upgrade_disk"),
+ ).wait_until_finished(timeout)
+ if state == "present" and previous_server_status == Server.STATUS_RUNNING or state == "started":
+ self.start_server()
+
+ self._mark_as_changed()
+
+ delete_protection = self.module.params.get("delete_protection")
+ rebuild_protection = self.module.params.get("rebuild_protection")
+ if (delete_protection is not None and rebuild_protection is not None) and (
+ delete_protection != self.hcloud_server.protection["delete"] or rebuild_protection !=
+ self.hcloud_server.protection["rebuild"]):
+ if not self.module.check_mode:
+ self.hcloud_server.change_protection(delete=delete_protection,
+ rebuild=rebuild_protection).wait_until_finished()
+ self._mark_as_changed()
+ self._get_server()
+ except APIException as e:
+ self.module.fail_json(msg=e.message)
+
+ def _set_rescue_mode(self, rescue_mode):
+ if self.module.params.get("ssh_keys"):
+ resp = self.hcloud_server.enable_rescue(type=rescue_mode,
+ ssh_keys=[self.client.ssh_keys.get_by_name(ssh_key_name).id
+ for
+ ssh_key_name in
+ self.module.params.get("ssh_keys")])
+ else:
+ resp = self.hcloud_server.enable_rescue(type=rescue_mode)
+ resp.action.wait_until_finished()
+ self.result["root_password"] = resp.root_password
+
+ def start_server(self):
+ try:
+ if self.hcloud_server.status != Server.STATUS_RUNNING:
+ if not self.module.check_mode:
+ self.client.servers.power_on(self.hcloud_server).wait_until_finished()
+ self._mark_as_changed()
+ self._get_server()
+ except APIException as e:
+ self.module.fail_json(msg=e.message)
+
+ def stop_server(self):
+ try:
+ if self.hcloud_server.status != Server.STATUS_OFF:
+ if not self.module.check_mode:
+ self.client.servers.power_off(self.hcloud_server).wait_until_finished()
+ self._mark_as_changed()
+ self._get_server()
+ except APIException as e:
+ self.module.fail_json(msg=e.message)
+
+ def rebuild_server(self):
+ self.module.fail_on_missing_params(
+ required_params=["image"]
+ )
+ try:
+ if not self.module.check_mode:
+ self.client.servers.rebuild(self.hcloud_server, self.client.images.get_by_name(
+ self.module.params.get("image"))).wait_until_finished()
+ self._mark_as_changed()
+
+ self._get_server()
+ except APIException as e:
+ self.module.fail_json(msg=e.message)
+
+ def present_server(self):
+ self._get_server()
+ if self.hcloud_server is None:
+ self._create_server()
+ else:
+ self._update_server()
+
+ def delete_server(self):
+ try:
+ self._get_server()
+ if self.hcloud_server is not None:
+ if not self.module.check_mode:
+ self.client.servers.delete(self.hcloud_server).wait_until_finished()
+ self._mark_as_changed()
+ self.hcloud_server = None
+ except APIException as e:
+ self.module.fail_json(msg=e.message)
+
+ @staticmethod
+ def define_module():
+ return AnsibleModule(
+ argument_spec=dict(
+ id={"type": "int"},
+ name={"type": "str"},
+ image={"type": "str"},
+ server_type={"type": "str"},
+ location={"type": "str"},
+ datacenter={"type": "str"},
+ user_data={"type": "str"},
+ ssh_keys={"type": "list"},
+ volumes={"type": "list"},
+ labels={"type": "dict"},
+ backups={"type": "bool", "default": False},
+ upgrade_disk={"type": "bool", "default": False},
+ force_upgrade={"type": "bool", "default": False},
+ rescue_mode={"type": "str"},
+ delete_protection={"type": "bool"},
+ rebuild_protection={"type": "bool"},
+ state={
+ "choices": ["absent", "present", "restarted", "started", "stopped", "rebuild"],
+ "default": "present",
+ },
+ **Hcloud.base_module_arguments()
+ ),
+ required_one_of=[['id', 'name']],
+ mutually_exclusive=[["location", "datacenter"]],
+ required_together=[["delete_protection", "rebuild_protection"]],
+ supports_check_mode=True,
+ )
+
+
+def main():
+ module = AnsibleHcloudServer.define_module()
+
+ hcloud = AnsibleHcloudServer(module)
+ state = module.params.get("state")
+ if state == "absent":
+ hcloud.delete_server()
+ elif state == "present":
+ hcloud.present_server()
+ elif state == "started":
+ hcloud.present_server()
+ hcloud.start_server()
+ elif state == "stopped":
+ hcloud.present_server()
+ hcloud.stop_server()
+ elif state == "restarted":
+ hcloud.present_server()
+ hcloud.stop_server()
+ hcloud.start_server()
+ elif state == "rebuild":
+ hcloud.present_server()
+ hcloud.rebuild_server()
+
+ module.exit_json(**hcloud.get_result())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/support/integration/plugins/modules/iam_role.py b/test/support/integration/plugins/modules/iam_role.py
new file mode 100644
index 0000000000..71a5b0377e
--- /dev/null
+++ b/test/support/integration/plugins/modules/iam_role.py
@@ -0,0 +1,673 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: iam_role
+short_description: Manage AWS IAM roles
+description:
+ - Manage AWS IAM roles.
+version_added: "2.3"
+author: "Rob White (@wimnat)"
+options:
+ path:
+ description:
+ - The path to the role. For more information about paths, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html).
+ default: "/"
+ type: str
+ name:
+ description:
+ - The name of the role to create.
+ required: true
+ type: str
+ description:
+ description:
+ - Provides a description of the role.
+ version_added: "2.5"
+ type: str
+ boundary:
+ description:
+ - The ARN of an IAM managed policy to use to restrict the permissions this role can pass on to IAM roles/users that it creates.
+ - Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false).
+ - This is intended for roles/users that have permissions to create new IAM objects.
+ - For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html).
+ - Requires botocore 1.10.57 or above.
+ aliases: [boundary_policy_arn]
+ version_added: "2.7"
+ type: str
+ assume_role_policy_document:
+ description:
+ - The trust relationship policy document that grants an entity permission to assume the role.
+ - This parameter is required when I(state=present).
+ type: json
+ managed_policies:
+ description:
+ - A list of managed policy ARNs or, since Ansible 2.4, a list of either managed policy ARNs or friendly names.
+ - To remove all policies set I(purge_polices=true) and I(managed_policies=[None]).
+ - To embed an inline policy, use M(iam_policy).
+ aliases: ['managed_policy']
+ type: list
+ max_session_duration:
+ description:
+ - The maximum duration (in seconds) of a session when assuming the role.
+ - Valid values are between 1 and 12 hours (3600 and 43200 seconds).
+ version_added: "2.10"
+ type: int
+ purge_policies:
+ description:
+ - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
+ - By default I(purge_policies=true). In Ansible 2.14 this will be changed to I(purge_policies=false).
+ version_added: "2.5"
+ type: bool
+ aliases: ['purge_policy', 'purge_managed_policies']
+ state:
+ description:
+ - Create or remove the IAM role.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ create_instance_profile:
+ description:
+ - Creates an IAM instance profile along with the role.
+ default: true
+ version_added: "2.5"
+ type: bool
+ delete_instance_profile:
+ description:
+ - When I(delete_instance_profile=true) and I(state=absent) deleting a role will also delete the instance
+ profile created with the same I(name) as the role.
+ - Only applies when I(state=absent).
+ default: false
+ version_added: "2.10"
+ type: bool
+ tags:
+ description:
+ - Tag dict to apply to the queue.
+ - Requires botocore 1.12.46 or above.
+ version_added: "2.10"
+ type: dict
+ purge_tags:
+ description:
+ - Remove tags not listed in I(tags) when tags is specified.
+ default: true
+ version_added: "2.10"
+ type: bool
+requirements: [ botocore, boto3 ]
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create a role with description and tags
+ iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file','policy.json') }}"
+ description: This is My New Role
+ tags:
+ env: dev
+
+- name: "Create a role and attach a managed policy called 'PowerUserAccess'"
+ iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file','policy.json') }}"
+ managed_policies:
+ - arn:aws:iam::aws:policy/PowerUserAccess
+
+- name: Keep the role created above but remove all managed policies
+ iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file','policy.json') }}"
+ managed_policies: []
+
+- name: Delete the role
+ iam_role:
+ name: mynewrole
+ assume_role_policy_document: "{{ lookup('file', 'policy.json') }}"
+ state: absent
+
+'''
+RETURN = '''
+iam_role:
+ description: dictionary containing the IAM Role data
+ returned: success
+ type: complex
+ contains:
+ path:
+ description: the path to the role
+ type: str
+ returned: always
+ sample: /
+ role_name:
+ description: the friendly name that identifies the role
+ type: str
+ returned: always
+ sample: myrole
+ role_id:
+ description: the stable and unique string identifying the role
+ type: str
+ returned: always
+ sample: ABCDEFF4EZ4ABCDEFV4ZC
+ arn:
+ description: the Amazon Resource Name (ARN) specifying the role
+ type: str
+ returned: always
+ sample: "arn:aws:iam::1234567890:role/mynewrole"
+ create_date:
+ description: the date and time, in ISO 8601 date-time format, when the role was created
+ type: str
+ returned: always
+ sample: "2016-08-14T04:36:28+00:00"
+ assume_role_policy_document:
+ description: the policy that grants an entity permission to assume the role
+ type: str
+ returned: always
+ sample: {
+ 'statement': [
+ {
+ 'action': 'sts:AssumeRole',
+ 'effect': 'Allow',
+ 'principal': {
+ 'service': 'ec2.amazonaws.com'
+ },
+ 'sid': ''
+ }
+ ],
+ 'version': '2012-10-17'
+ }
+ attached_policies:
+ description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role
+ type: list
+ returned: always
+ sample: [
+ {
+ 'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess',
+ 'policy_name': 'PowerUserAccess'
+ }
+ ]
+ tags:
+ description: role tags
+ type: dict
+ returned: always
+ sample: '{"Env": "Prod"}'
+'''
+
+import json
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies
+from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, compare_aws_tags
+
+try:
+ from botocore.exceptions import ClientError, BotoCoreError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc):
+ if not compare_policies(current_policy_doc, json.loads(new_policy_doc)):
+ return True
+ else:
+ return False
+
+
+@AWSRetry.jittered_backoff()
+def _list_policies(connection):
+ paginator = connection.get_paginator('list_policies')
+ return paginator.paginate().build_full_result()['Policies']
+
+
+def convert_friendly_names_to_arns(connection, module, policy_names):
+ if not any([not policy.startswith('arn:') for policy in policy_names]):
+ return policy_names
+ allpolicies = {}
+ policies = _list_policies(connection)
+
+ for policy in policies:
+ allpolicies[policy['PolicyName']] = policy['Arn']
+ allpolicies[policy['Arn']] = policy['Arn']
+ try:
+ return [allpolicies[policy] for policy in policy_names]
+ except KeyError as e:
+ module.fail_json_aws(e, msg="Couldn't find policy")
+
+
+def attach_policies(connection, module, policies_to_attach, params):
+ changed = False
+ for policy_arn in policies_to_attach:
+ try:
+ if not module.check_mode:
+ connection.attach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, params['RoleName']))
+ changed = True
+ return changed
+
+
+def remove_policies(connection, module, policies_to_remove, params):
+ changed = False
+ for policy in policies_to_remove:
+ try:
+ if not module.check_mode:
+ connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, params['RoleName']))
+ changed = True
+ return changed
+
+
+def generate_create_params(module):
+ params = dict()
+ params['Path'] = module.params.get('path')
+ params['RoleName'] = module.params.get('name')
+ params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document')
+ if module.params.get('description') is not None:
+ params['Description'] = module.params.get('description')
+ if module.params.get('max_session_duration') is not None:
+ params['MaxSessionDuration'] = module.params.get('max_session_duration')
+ if module.params.get('boundary') is not None:
+ params['PermissionsBoundary'] = module.params.get('boundary')
+ if module.params.get('tags') is not None:
+ params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags'))
+
+ return params
+
+
+def create_basic_role(connection, module, params):
+ """
+ Perform the Role creation.
+ Assumes tests for the role existing have already been performed.
+ """
+
+ try:
+ if not module.check_mode:
+ role = connection.create_role(aws_retry=True, **params)
+ # 'Description' is documented as key of the role returned by create_role
+ # but appears to be an AWS bug (the value is not returned using the AWS CLI either).
+ # Get the role after creating it.
+ role = get_role_with_backoff(connection, module, params['RoleName'])
+ else:
+ role = {'MadeInCheckMode': True}
+ role['AssumeRolePolicyDocument'] = json.loads(params['AssumeRolePolicyDocument'])
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to create role")
+
+ return role
+
+
+def update_role_assumed_policy(connection, module, params, role):
+ # Check Assumed Policy document
+ if compare_assume_role_policy_doc(role['AssumeRolePolicyDocument'], params['AssumeRolePolicyDocument']):
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.update_assume_role_policy(
+ RoleName=params['RoleName'],
+ PolicyDocument=json.dumps(json.loads(params['AssumeRolePolicyDocument'])),
+ aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(params['RoleName']))
+ return True
+
+
+def update_role_description(connection, module, params, role):
+ # Check Description update
+ if params.get('Description') is None:
+ return False
+ if role.get('Description') == params['Description']:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.update_role_description(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update description for role {0}".format(params['RoleName']))
+ return True
+
+
+def update_role_max_session_duration(connection, module, params, role):
+ # Check MaxSessionDuration update
+ if params.get('MaxSessionDuration') is None:
+ return False
+ if role.get('MaxSessionDuration') == params['MaxSessionDuration']:
+ return False
+
+ if module.check_mode:
+ return True
+
+ try:
+ connection.update_role(RoleName=params['RoleName'], MaxSessionDuration=params['MaxSessionDuration'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(params['RoleName']))
+ return True
+
+
+def update_role_permissions_boundary(connection, module, params, role):
+ # Check PermissionsBoundary
+ if params.get('PermissionsBoundary') is None:
+ return False
+ if params.get('PermissionsBoundary') == role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', ''):
+ return False
+
+ if module.check_mode:
+ return True
+
+ if params.get('PermissionsBoundary') == '':
+ try:
+ connection.delete_role_permissions_boundary(RoleName=params['RoleName'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(params['RoleName']))
+ else:
+ try:
+ connection.put_role_permissions_boundary(RoleName=params['RoleName'], PermissionsBoundary=params['PermissionsBoundary'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(params['RoleName']))
+ return True
+
+
+def update_managed_policies(connection, module, params, role, managed_policies, purge_policies):
+ # Check Managed Policies
+ if managed_policies is None:
+ return False
+
+ # If we're manipulating a fake role
+ if role.get('MadeInCheckMode', False):
+ role['AttachedPolicies'] = list(map(lambda x: {'PolicyArn': x, 'PolicyName': x.split(':')[5]}, managed_policies))
+ return True
+
+ # Get list of current attached managed policies
+ current_attached_policies = get_attached_policy_list(connection, module, params['RoleName'])
+ current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies]
+
+ if len(managed_policies) == 1 and managed_policies[0] is None:
+ managed_policies = []
+
+ policies_to_remove = set(current_attached_policies_arn_list) - set(managed_policies)
+ policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list)
+
+ changed = False
+
+ if purge_policies:
+ changed |= remove_policies(connection, module, policies_to_remove, params)
+
+ changed |= attach_policies(connection, module, policies_to_attach, params)
+
+ return changed
+
+
+def create_or_update_role(connection, module):
+
+ params = generate_create_params(module)
+ role_name = params['RoleName']
+ create_instance_profile = module.params.get('create_instance_profile')
+ purge_policies = module.params.get('purge_policies')
+ if purge_policies is None:
+ purge_policies = True
+ managed_policies = module.params.get('managed_policies')
+ if managed_policies:
+ # Attempt to list the policies early so we don't leave things behind if we can't find them.
+ managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
+
+ changed = False
+
+ # Get role
+ role = get_role(connection, module, role_name)
+
+ # If role is None, create it
+ if role is None:
+ role = create_basic_role(connection, module, params)
+ changed = True
+ else:
+ changed |= update_role_tags(connection, module, params, role)
+ changed |= update_role_assumed_policy(connection, module, params, role)
+ changed |= update_role_description(connection, module, params, role)
+ changed |= update_role_max_session_duration(connection, module, params, role)
+ changed |= update_role_permissions_boundary(connection, module, params, role)
+
+ if create_instance_profile:
+ changed |= create_instance_profiles(connection, module, params, role)
+
+ changed |= update_managed_policies(connection, module, params, role, managed_policies, purge_policies)
+
+ # Get the role again
+ if not role.get('MadeInCheckMode', False):
+ role = get_role(connection, module, params['RoleName'])
+ role['AttachedPolicies'] = get_attached_policy_list(connection, module, params['RoleName'])
+ role['tags'] = get_role_tags(connection, module)
+
+ module.exit_json(
+ changed=changed, iam_role=camel_dict_to_snake_dict(role, ignore_list=['tags']),
+ **camel_dict_to_snake_dict(role, ignore_list=['tags']))
+
+
+def create_instance_profiles(connection, module, params, role):
+
+ if role.get('MadeInCheckMode', False):
+ return False
+
+ # Fetch existing Profiles
+ try:
+ instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'], aws_retry=True)['InstanceProfiles']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName']))
+
+ # Profile already exists
+ if any(p['InstanceProfileName'] == params['RoleName'] for p in instance_profiles):
+ return False
+
+ if module.check_mode:
+ return True
+
+ # Make sure an instance profile is created
+ try:
+ connection.create_instance_profile(InstanceProfileName=params['RoleName'], Path=params['Path'], aws_retry=True)
+ except ClientError as e:
+ # If the profile already exists, no problem, move on.
+ # Implies someone's changing things at the same time...
+ if e.response['Error']['Code'] == 'EntityAlreadyExists':
+ return False
+ else:
+ module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName']))
+ except BotoCoreError as e:
+ module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName']))
+
+ # And attach the role to the profile
+ try:
+ connection.add_role_to_instance_profile(InstanceProfileName=params['RoleName'], RoleName=params['RoleName'], aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(params['RoleName']))
+
+ return True
+
+
+def remove_instance_profiles(connection, module, role_params, role):
+ role_name = module.params.get('name')
+ delete_profiles = module.params.get("delete_instance_profile")
+
+ try:
+ instance_profiles = connection.list_instance_profiles_for_role(aws_retry=True, **role_params)['InstanceProfiles']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name))
+
+ # Remove the role from the instance profile(s)
+ for profile in instance_profiles:
+ profile_name = profile['InstanceProfileName']
+ try:
+ if not module.check_mode:
+ connection.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, **role_params)
+ if profile_name == role_name:
+ if delete_profiles:
+ try:
+ connection.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name))
+
+
+def destroy_role(connection, module):
+
+ role_name = module.params.get('name')
+ role = get_role(connection, module, role_name)
+ role_params = dict()
+ role_params['RoleName'] = role_name
+ boundary_params = dict(role_params)
+ boundary_params['PermissionsBoundary'] = ''
+
+ if role is None:
+ module.exit_json(changed=False)
+
+ # Before we try to delete the role we need to remove any
+ # - attached instance profiles
+ # - attached managed policies
+ # - permissions boundary
+ remove_instance_profiles(connection, module, role_params, role)
+ update_managed_policies(connection, module, role_params, role, [], True)
+ update_role_permissions_boundary(connection, module, boundary_params, role)
+
+ try:
+ if not module.check_mode:
+ connection.delete_role(aws_retry=True, **role_params)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to delete role")
+
+ module.exit_json(changed=True)
+
+
+def get_role_with_backoff(connection, module, name):
+ try:
+ return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(connection.get_role)(RoleName=name)['Role']
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
+
+
+def get_role(connection, module, name):
+ try:
+ return connection.get_role(RoleName=name, aws_retry=True)['Role']
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ return None
+ else:
+ module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
+ except BotoCoreError as e:
+ module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
+
+
+def get_attached_policy_list(connection, module, name):
+ try:
+ return connection.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies']
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name))
+
+
+def get_role_tags(connection, module):
+ role_name = module.params.get('name')
+ if not hasattr(connection, 'list_role_tags'):
+ return {}
+ try:
+ return boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name))
+
+
+def update_role_tags(connection, module, params, role):
+ new_tags = params.get('Tags')
+ if new_tags is None:
+ return False
+ new_tags = boto3_tag_list_to_ansible_dict(new_tags)
+
+ role_name = module.params.get('name')
+ purge_tags = module.params.get('purge_tags')
+
+ try:
+ existing_tags = boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
+ except (ClientError, KeyError):
+ existing_tags = {}
+
+ tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
+
+ if not module.check_mode:
+ try:
+ if tags_to_remove:
+ connection.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True)
+ if tags_to_add:
+ connection.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name)
+
+ changed = bool(tags_to_add) or bool(tags_to_remove)
+ return changed
+
+
+def main():
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ path=dict(type='str', default="/"),
+ assume_role_policy_document=dict(type='json'),
+ managed_policies=dict(type='list', aliases=['managed_policy']),
+ max_session_duration=dict(type='int'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ description=dict(type='str'),
+ boundary=dict(type='str', aliases=['boundary_policy_arn']),
+ create_instance_profile=dict(type='bool', default=True),
+ delete_instance_profile=dict(type='bool', default=False),
+ purge_policies=dict(type='bool', aliases=['purge_policy', 'purge_managed_policies']),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=True),
+ )
+ module = AnsibleAWSModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['assume_role_policy_document'])],
+ supports_check_mode=True)
+
+ if module.params.get('purge_policies') is None:
+ module.deprecate('In Ansible 2.14 the default value of purge_policies will change from true to false.'
+ ' To maintain the existing behaviour explicity set purge_policies=true', version='2.14')
+
+ if module.params.get('boundary'):
+ if module.params.get('create_instance_profile'):
+ module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.")
+ if not module.params.get('boundary').startswith('arn:aws:iam'):
+ module.fail_json(msg="Boundary policy must be an ARN")
+ if module.params.get('tags') is not None and not module.botocore_at_least('1.12.46'):
+ module.fail_json(msg="When managing tags botocore must be at least v1.12.46. "
+ "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions()))
+ if module.params.get('boundary') is not None and not module.botocore_at_least('1.10.57'):
+ module.fail_json(msg="When using a boundary policy, botocore must be at least v1.10.57. "
+ "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions()))
+ if module.params.get('max_session_duration'):
+ max_session_duration = module.params.get('max_session_duration')
+ if max_session_duration < 3600 or max_session_duration > 43200:
+ module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)")
+ if module.params.get('path'):
+ path = module.params.get('path')
+ if not path.endswith('/') or not path.startswith('/'):
+ module.fail_json(msg="path must begin and end with /")
+
+ connection = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_role(connection, module)
+ else:
+ destroy_role(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/k8s.py b/test/support/integration/plugins/modules/k8s.py
new file mode 100644
index 0000000000..f3938bf39c
--- /dev/null
+++ b/test/support/integration/plugins/modules/k8s.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Chris Houseknecht <@chouseknecht>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+
+module: k8s
+
+short_description: Manage Kubernetes (K8s) objects
+
+version_added: "2.6"
+
+author:
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Fabian von Feilitzsch (@fabianvf)"
+
+description:
+ - Use the OpenShift Python client to perform CRUD operations on K8s objects.
+ - Pass the object definition from a source file or inline. See examples for reading
+ files and using Jinja templates or vault-encrypted files.
+ - Access to the full range of K8s APIs.
+ - Use the M(k8s_info) module to obtain a list of items about an object of type C(kind)
+ - Authenticate using either a config file, certificates, password or token.
+ - Supports check mode.
+
+extends_documentation_fragment:
+ - k8s_state_options
+ - k8s_name_options
+ - k8s_resource_options
+ - k8s_auth_options
+
+notes:
+ - If your OpenShift Python library is not 0.9.0 or newer and you are trying to
+ remove an item from an associative array/dictionary, for example a label or
+ an annotation, you will need to explicitly set the value of the item to be
+ removed to `null`. Simply deleting the entry in the dictionary will not
+ remove it from openshift or kubernetes.
+
+options:
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ - For example, Custom Resource Definitions typically aren't updatable by the usual strategic merge. You may
+ want to use C(merge) if you see "strategic merge patch format is not supported"
+ - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
+ - Requires openshift >= 0.6.2
+ - If more than one merge_type is given, the merge_types will be tried in order
+ - If openshift >= 0.6.2, this defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
+ on resource kinds that combine Custom Resources and built-in resources. For openshift < 0.6.2, the default
+ is simply C(strategic-merge).
+ - mutually exclusive with C(apply)
+ choices:
+ - json
+ - merge
+ - strategic-merge
+ type: list
+ version_added: "2.7"
+ wait:
+ description:
+ - Whether to wait for certain resource kinds to end up in the desired state. By default the module exits once Kubernetes has
+ received the request
+ - Implemented for C(state=present) for C(Deployment), C(DaemonSet) and C(Pod), and for C(state=absent) for all resource kinds.
+ - For resource kinds without an implementation, C(wait) returns immediately unless C(wait_condition) is set.
+ default: no
+ type: bool
+ version_added: "2.8"
+ wait_sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ default: 5
+ version_added: "2.9"
+ wait_timeout:
+ description:
+ - How long in seconds to wait for the resource to end up in the desired state. Ignored if C(wait) is not set.
+ default: 120
+ version_added: "2.8"
+ wait_condition:
+ description:
+ - Specifies a custom condition on the status to wait for. Ignored if C(wait) is not set or is set to False.
+ suboptions:
+ type:
+ description:
+ - The type of condition to wait for. For example, the C(Pod) resource will set the C(Ready) condition (among others)
+ - Required if you are specifying a C(wait_condition). If left empty, the C(wait_condition) field will be ignored.
+ - The possible types for a condition are specific to each resource type in Kubernetes. See the API documentation of the status field
+ for a given resource to see possible choices.
+ status:
+ description:
+ - The value of the status field in your desired condition.
+ - For example, if a C(Deployment) is paused, the C(Progressing) C(type) will have the C(Unknown) status.
+ choices:
+ - True
+ - False
+ - Unknown
+ reason:
+ description:
+ - The value of the reason field in your desired condition
+ - For example, if a C(Deployment) is paused, The C(Progressing) C(type) will have the C(DeploymentPaused) reason.
+ - The possible reasons in a condition are specific to each resource type in Kubernetes. See the API documentation of the status field
+ for a given resource to see possible choices.
+ version_added: "2.8"
+ validate:
+ description:
+ - how (if at all) to validate the resource definition against the kubernetes schema.
+ Requires the kubernetes-validate python module
+ suboptions:
+ fail_on_error:
+ description: whether to fail on validation errors.
+ required: yes
+ type: bool
+ version:
+ description: version of Kubernetes to validate against. defaults to Kubernetes server version
+ strict:
+ description: whether to fail when passing unexpected properties
+ default: no
+ type: bool
+ version_added: "2.8"
+ append_hash:
+ description:
+ - Whether to append a hash to a resource name for immutability purposes
+ - Applies only to ConfigMap and Secret resources
+ - The parameter will be silently ignored for other resource kinds
+ - The full definition of an object is needed to generate the hash - this means that deleting an object created with append_hash
+ will only work if the same object is passed with state=absent (alternatively, just use state=absent with the name including
+ the generated hash and append_hash=no)
+ type: bool
+ version_added: "2.8"
+ apply:
+ description:
+ - C(apply) compares the desired resource definition with the previously supplied resource definition,
+ ignoring properties that are automatically generated
+ - C(apply) works better with Services than 'force=yes'
+ - mutually exclusive with C(merge_type)
+ type: bool
+ version_added: "2.9"
+
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+- name: Create a k8s namespace
+ k8s:
+ name: testing
+ api_version: v1
+ kind: Namespace
+ state: present
+
+- name: Create a Service object from an inline definition
+ k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: web
+ namespace: testing
+ labels:
+ app: galaxy
+ service: web
+ spec:
+ selector:
+ app: galaxy
+ service: web
+ ports:
+ - protocol: TCP
+ targetPort: 8000
+ name: port-8000-tcp
+ port: 8000
+
+- name: Remove an existing Service object
+ k8s:
+ state: absent
+ api_version: v1
+ kind: Service
+ namespace: testing
+ name: web
+
+# Passing the object definition from a file
+
+- name: Create a Deployment by reading the definition from a local file
+ k8s:
+ state: present
+ src: /testing/deployment.yml
+
+- name: >-
+ Read definition file from the Ansible controller file system.
+ If the definition file has been encrypted with Ansible Vault it will automatically be decrypted.
+ k8s:
+ state: present
+ definition: "{{ lookup('file', '/testing/deployment.yml') | from_yaml }}"
+
+- name: Read definition file from the Ansible controller file system after Jinja templating
+ k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+
+- name: fail on validation errors
+ k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+ validate:
+ fail_on_error: yes
+
+- name: warn on validation errors, check for unexpected properties
+ k8s:
+ state: present
+ definition: "{{ lookup('template', '/testing/deployment.yml') | from_yaml }}"
+ validate:
+ fail_on_error: no
+ strict: yes
+'''
+
+RETURN = '''
+result:
+ description:
+ - The created, patched, or otherwise present object. Will be empty in the case of a deletion.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: complex
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+ items:
+ description: Returned only when multiple yaml documents are passed to src or resource_definition
+ returned: when resource_definition or src contains list of objects
+ type: list
+ duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+'''
+
+from ansible.module_utils.k8s.raw import KubernetesRawModule
+
+
+def main():
+ KubernetesRawModule().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/k8s_info.py b/test/support/integration/plugins/modules/k8s_info.py
new file mode 100644
index 0000000000..99a8fd8cec
--- /dev/null
+++ b/test/support/integration/plugins/modules/k8s_info.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Will Thames <@willthames>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: k8s_info
+
+short_description: Describe Kubernetes (K8s) objects
+
+version_added: "2.7"
+
+author:
+ - "Will Thames (@willthames)"
+
+description:
+ - Use the OpenShift Python client to perform read operations on K8s objects.
+ - Access to the full range of K8s APIs.
+ - Authenticate using either a config file, certificates, password or token.
+ - Supports check mode.
+ - This module was called C(k8s_facts) before Ansible 2.9. The usage did not change.
+
+options:
+ api_version:
+ description:
+ - Use to specify the API version. in conjunction with I(kind), I(name), and I(namespace) to identify a
+ specific object.
+ default: v1
+ aliases:
+ - api
+ - version
+ kind:
+ description:
+ - Use to specify an object model. Use in conjunction with I(api_version), I(name), and I(namespace) to identify a
+ specific object.
+ required: yes
+ name:
+ description:
+ - Use to specify an object name. Use in conjunction with I(api_version), I(kind) and I(namespace) to identify a
+ specific object.
+ namespace:
+ description:
+ - Use to specify an object namespace. Use in conjunction with I(api_version), I(kind), and I(name)
+ to identify a specific object.
+ label_selectors:
+ description: List of label selectors to use to filter results
+ field_selectors:
+ description: List of field selectors to use to filter results
+
+extends_documentation_fragment:
+ - k8s_auth_options
+
+requirements:
+ - "python >= 2.7"
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+- name: Get an existing Service object
+ k8s_info:
+ api_version: v1
+ kind: Service
+ name: web
+ namespace: testing
+ register: web_service
+
+- name: Get a list of all service objects
+ k8s_info:
+ api_version: v1
+ kind: Service
+ namespace: testing
+ register: service_list
+
+- name: Get a list of all pods from any namespace
+ k8s_info:
+ kind: Pod
+ register: pod_list
+
+- name: Search for all Pods labelled app=web
+ k8s_info:
+ kind: Pod
+ label_selectors:
+ - app = web
+ - tier in (dev, test)
+
+- name: Search for all running pods
+ k8s_info:
+ kind: Pod
+ field_selectors:
+ - status.phase=Running
+'''
+
+RETURN = '''
+resources:
+ description:
+ - The object(s) that exists
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: dict
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: dict
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: dict
+'''
+
+
+from ansible.module_utils.k8s.common import KubernetesAnsibleModule, AUTH_ARG_SPEC
+import copy
+
+
+class KubernetesInfoModule(KubernetesAnsibleModule):
+
+ def __init__(self, *args, **kwargs):
+ KubernetesAnsibleModule.__init__(self, *args,
+ supports_check_mode=True,
+ **kwargs)
+ if self._name == 'k8s_facts':
+ self.deprecate("The 'k8s_facts' module has been renamed to 'k8s_info'", version='2.13')
+
+ def execute_module(self):
+ self.client = self.get_api_client()
+
+ self.exit_json(changed=False,
+ **self.kubernetes_facts(self.params['kind'],
+ self.params['api_version'],
+ self.params['name'],
+ self.params['namespace'],
+ self.params['label_selectors'],
+ self.params['field_selectors']))
+
+ @property
+ def argspec(self):
+ args = copy.deepcopy(AUTH_ARG_SPEC)
+ args.update(
+ dict(
+ kind=dict(required=True),
+ api_version=dict(default='v1', aliases=['api', 'version']),
+ name=dict(),
+ namespace=dict(),
+ label_selectors=dict(type='list', default=[]),
+ field_selectors=dict(type='list', default=[]),
+ )
+ )
+ return args
+
+
+def main():
+ KubernetesInfoModule().execute_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/nios_txt_record.py b/test/support/integration/plugins/modules/nios_txt_record.py
new file mode 100644
index 0000000000..b9e63dfc6e
--- /dev/null
+++ b/test/support/integration/plugins/modules/nios_txt_record.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+---
+module: nios_txt_record
+version_added: "2.7"
+author: "Corey Wanless (@coreywan)"
+short_description: Configure Infoblox NIOS txt records
+description:
+ - Adds and/or removes instances of txt record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:txt) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment: nios
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this tst record with. The DNS
+ view must already be configured on the system
+ required: true
+ default: default
+ aliases:
+ - dns_view
+ text:
+ description:
+ - Text associated with the record. It can contain up to 255 bytes
+ per substring, up to a total of 512 bytes. To enter leading,
+ trailing, or embedded spaces in the text, add quotes around the
+ text to preserve the spaces.
+ required: true
+ ttl:
+ description:
+ - Configures the TTL to be associated with this tst record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+ - name: Ensure a text Record Exists
+ nios_txt_record:
+ name: fqdn.txt.record.com
+ text: mytext
+ state: present
+ view: External
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+
+ - name: Ensure a text Record does not exist
+ nios_txt_record:
+ name: fqdn.txt.record.com
+ text: mytext
+ state: absent
+ view: External
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.net_tools.nios.api import WapiModule
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+ text=dict(ib_req=True),
+ ttl=dict(type='int'),
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run('record:txt', ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/nios_zone.py b/test/support/integration/plugins/modules/nios_zone.py
new file mode 100644
index 0000000000..0ffb2ff0a4
--- /dev/null
+++ b/test/support/integration/plugins/modules/nios_zone.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'certified'}
+
+
+DOCUMENTATION = '''
+---
+module: nios_zone
+version_added: "2.5"
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS DNS zones
+description:
+ - Adds and/or removes instances of DNS zone objects from
+ Infoblox NIOS servers. This module manages NIOS C(zone_auth) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment: nios
+options:
+ fqdn:
+ description:
+ - Specifies the qualified domain name to either add or remove from
+ the NIOS instance based on the configured C(state) value.
+ required: true
+ aliases:
+ - name
+ view:
+ description:
+ - Configures the DNS view name for the configured resource. The
+ specified DNS zone must already exist on the running NIOS instance
+ prior to configuring zones.
+ required: true
+ default: default
+ aliases:
+ - dns_view
+ grid_primary:
+ description:
+ - Configures the grid primary servers for this zone.
+ suboptions:
+ name:
+ description:
+ - The name of the grid primary server
+ grid_secondaries:
+ description:
+ - Configures the grid secondary servers for this zone.
+ suboptions:
+ name:
+ description:
+ - The name of the grid secondary server
+ ns_group:
+ version_added: "2.6"
+ description:
+ - Configures the name server group for this zone. Name server group is
+ mutually exclusive with grid primary and grid secondaries.
+ restart_if_needed:
+ version_added: "2.6"
+ description:
+ - If set to true, causes the NIOS DNS service to restart and load the
+ new zone configuration
+ type: bool
+ zone_format:
+ version_added: "2.7"
+ description:
+ - Create an authorative Reverse-Mapping Zone which is an area of network
+ space for which one or more name servers-primary and secondary-have the
+ responsibility to respond to address-to-name queries. It supports
+ reverse-mapping zones for both IPv4 and IPv6 addresses.
+ default: FORWARD
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: configure a zone on the system using grid primary and secondaries
+ nios_zone:
+ name: ansible.com
+ grid_primary:
+ - name: gridprimary.grid.com
+ grid_secondaries:
+ - name: gridsecondary1.grid.com
+ - name: gridsecondary2.grid.com
+ restart_if_needed: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: configure a zone on the system using a name server group
+ nios_zone:
+ name: ansible.com
+ ns_group: examplensg
+ restart_if_needed: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: configure a reverse mapping zone on the system using IPV4 zone format
+ nios_zone:
+ name: 10.10.10.0/24
+ zone_format: IPV4
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: configure a reverse mapping zone on the system using IPV6 zone format
+ nios_zone:
+ name: 100::1/128
+ zone_format: IPV6
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: update the comment and ext attributes for an existing zone
+ nios_zone:
+ name: ansible.com
+ comment: this is an example comment
+ extattrs:
+ Site: west-dc
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: remove the dns zone
+ nios_zone:
+ name: ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: remove the reverse mapping dns zone from the system with IPV4 zone format
+ nios_zone:
+ name: 10.10.10.0/24
+ zone_format: IPV4
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.net_tools.nios.api import WapiModule
+from ansible.module_utils.net_tools.nios.api import NIOS_ZONE
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ grid_spec = dict(
+ name=dict(required=True),
+ )
+
+ ib_spec = dict(
+ fqdn=dict(required=True, aliases=['name'], ib_req=True, update=False),
+ zone_format=dict(default='FORWARD', aliases=['zone_format'], ib_req=False),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ grid_primary=dict(type='list', elements='dict', options=grid_spec),
+ grid_secondaries=dict(type='list', elements='dict', options=grid_spec),
+ ns_group=dict(),
+ restart_if_needed=dict(type='bool'),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['ns_group', 'grid_primary'],
+ ['ns_group', 'grid_secondaries']
+ ])
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_ZONE, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/python_requirements_info.py b/test/support/integration/plugins/modules/python_requirements_info.py
new file mode 100644
index 0000000000..aa9e70ec86
--- /dev/null
+++ b/test/support/integration/plugins/modules/python_requirements_info.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = '''
+module: python_requirements_info
+short_description: Show python path and assert dependency versions
+description:
+ - Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
+ - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
+version_added: "2.7"
+options:
+ dependencies:
+ description: >
+ A list of version-likes or module names to check for installation.
+ Supported operators: <, >, <=, >=, or ==. The bare module name like
+ I(ansible), the module with a specific version like I(boto3==1.6.1), or a
+ partial version like I(requests>2) are all valid specifications.
+author:
+- Will Thames (@willthames)
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = '''
+- name: show python lib/site paths
+ python_requirements_info:
+- name: check for modern boto3 and botocore versions
+ python_requirements_info:
+ dependencies:
+ - boto3>1.6
+ - botocore<2
+'''
+
+RETURN = '''
+python:
+ description: path to python version used
+ returned: always
+ type: str
+ sample: /usr/local/opt/python@2/bin/python2.7
+python_version:
+ description: version of python
+ returned: always
+ type: str
+ sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
+python_system_path:
+ description: List of paths python is looking for modules in
+ returned: always
+ type: list
+ sample:
+ - /usr/local/opt/python@2/site-packages/
+ - /usr/lib/python/site-packages/
+ - /usr/lib/python/site-packages/
+valid:
+ description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
+ returned: always
+ type: dict
+ sample:
+ boto3:
+ desired: null
+ installed: 1.7.60
+ botocore:
+ desired: botocore<2
+ installed: 1.10.60
+mismatched:
+ description: A dictionary of dependencies that did not satisfy the desired version
+ returned: always
+ type: dict
+ sample:
+ botocore:
+ desired: botocore>2
+ installed: 1.10.60
+not_found:
+ description: A list of packages that could not be imported at all, and are not installed
+ returned: always
+ type: list
+ sample:
+ - boto4
+ - requests
+'''
+
+import re
+import sys
+import operator
+
+HAS_DISTUTILS = False
+try:
+ import pkg_resources
+ from distutils.version import LooseVersion
+ HAS_DISTUTILS = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+
+operations = {
+ '<=': operator.le,
+ '>=': operator.ge,
+ '<': operator.lt,
+ '>': operator.gt,
+ '==': operator.eq,
+}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dependencies=dict(type='list')
+ ),
+ supports_check_mode=True,
+ )
+ if module._name == 'python_requirements_facts':
+ module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'", version='2.13')
+ if not HAS_DISTUTILS:
+ module.fail_json(
+ msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ )
+ pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
+
+ results = dict(
+ not_found=[],
+ mismatched={},
+ valid={},
+ )
+
+ for dep in (module.params.get('dependencies') or []):
+ match = pkg_dep_re.match(dep)
+ if match is None:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
+ pkg, op, version = match.groups()
+ if op is not None and op not in operations:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
+ try:
+ existing = pkg_resources.get_distribution(pkg).version
+ except pkg_resources.DistributionNotFound:
+ # not there
+ results['not_found'].append(pkg)
+ continue
+ if op is None and version is None:
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': None,
+ }
+ elif operations[op](LooseVersion(existing), LooseVersion(version)):
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+ else:
+ results['mismatched'] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+
+ module.exit_json(
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/s3_bucket.py b/test/support/integration/plugins/modules/s3_bucket.py
new file mode 100644
index 0000000000..f35cf53b5e
--- /dev/null
+++ b/test/support/integration/plugins/modules/s3_bucket.py
@@ -0,0 +1,740 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+
+DOCUMENTATION = '''
+---
+module: s3_bucket
+short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
+description:
+ - Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
+version_added: "2.0"
+requirements: [ boto3 ]
+author: "Rob White (@wimnat)"
+options:
+ force:
+ description:
+ - When trying to delete a bucket, delete all keys (including versions and delete markers)
+ in the bucket first (an s3 bucket must be empty for a successful deletion)
+ type: bool
+ default: 'no'
+ name:
+ description:
+ - Name of the s3 bucket
+ required: true
+ type: str
+ policy:
+ description:
+ - The JSON policy as a string.
+ type: json
+ s3_url:
+ description:
+ - S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and fakes3 etc.
+ - Assumes AWS if not specified.
+ - For Walrus, use FQDN of the endpoint without scheme nor path.
+ aliases: [ S3_URL ]
+ type: str
+ ceph:
+ description:
+ - Enable API compatibility with Ceph. It takes into account the S3 API subset working
+ with Ceph in order to provide the same module behaviour where possible.
+ type: bool
+ version_added: "2.2"
+ requester_pays:
+ description:
+ - With Requester Pays buckets, the requester instead of the bucket owner pays the cost
+ of the request and the data download from the bucket.
+ type: bool
+ default: False
+ state:
+ description:
+ - Create or remove the s3 bucket
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ tags:
+ description:
+ - tags dict to apply to bucket
+ type: dict
+ purge_tags:
+ description:
+ - whether to remove tags that aren't present in the C(tags) parameter
+ type: bool
+ default: True
+ version_added: "2.9"
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
+ type: bool
+ encryption:
+ description:
+ - Describes the default server-side encryption to apply to new objects in the bucket.
+ In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly.
+ choices: [ 'none', 'AES256', 'aws:kms' ]
+ version_added: "2.9"
+ type: str
+ encryption_key_id:
+ description: KMS master key ID to use for the default encryption. This parameter is allowed if encryption is aws:kms. If
+ not specified then it will default to the AWS provided KMS key.
+ version_added: "2.9"
+ type: str
+extends_documentation_fragment:
+ - aws
+ - ec2
+notes:
+ - If C(requestPayment), C(policy), C(tagging) or C(versioning)
+ operations/API aren't implemented by the endpoint, module doesn't fail
+ if each parameter satisfies the following condition.
+ I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None).
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create a simple s3 bucket
+- s3_bucket:
+ name: mys3bucket
+ state: present
+
+# Create a simple s3 bucket on Ceph Rados Gateway
+- s3_bucket:
+ name: mys3bucket
+ s3_url: http://your-ceph-rados-gateway-server.xxx
+ ceph: true
+
+# Remove an s3 bucket and any keys it contains
+- s3_bucket:
+ name: mys3bucket
+ state: absent
+ force: yes
+
+# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
+- s3_bucket:
+ name: mys3bucket
+ policy: "{{ lookup('file','policy.json') }}"
+ requester_pays: yes
+ versioning: yes
+ tags:
+ example: tag1
+ another: tag2
+
+# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint
+- s3_bucket:
+ name: mydobucket
+ s3_url: 'https://nyc3.digitaloceanspaces.com'
+
+# Create a bucket with AES256 encryption
+- s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "AES256"
+
+# Create a bucket with aws:kms encryption, KMS key
+- s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "aws:kms"
+ encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example"
+
+# Create a bucket with aws:kms encryption, default key
+- s3_bucket:
+ name: mys3bucket
+ state: present
+ encryption: "aws:kms"
+'''
+
+import json
+import os
+import time
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.six import string_types
+from ansible.module_utils.basic import to_text
+from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
+from ansible.module_utils.ec2 import compare_policies, ec2_argument_spec, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
+from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, AWSRetry
+
+try:
+ from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError
+except ImportError:
+ pass # handled by AnsibleAWSModule
+
+
+def create_or_update_bucket(s3_client, module, location):
+
+ policy = module.params.get("policy")
+ name = module.params.get("name")
+ requester_pays = module.params.get("requester_pays")
+ tags = module.params.get("tags")
+ purge_tags = module.params.get("purge_tags")
+ versioning = module.params.get("versioning")
+ encryption = module.params.get("encryption")
+ encryption_key_id = module.params.get("encryption_key_id")
+ changed = False
+ result = {}
+
+ try:
+ bucket_is_present = bucket_exists(s3_client, name)
+ except EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check bucket presence")
+
+ if not bucket_is_present:
+ try:
+ bucket_changed = create_bucket(s3_client, name, location)
+ s3_client.get_waiter('bucket_exists').wait(Bucket=name)
+ changed = changed or bucket_changed
+ except WaiterError as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available')
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while creating bucket")
+
+ # Versioning
+ try:
+ versioning_status = get_bucket_versioning(s3_client, name)
+ except BotoCoreError as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket versioning")
+ except ClientError as exp:
+ if exp.response['Error']['Code'] != 'NotImplemented' or versioning is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket versioning")
+ else:
+ if versioning is not None:
+ required_versioning = None
+ if versioning and versioning_status.get('Status') != "Enabled":
+ required_versioning = 'Enabled'
+ elif not versioning and versioning_status.get('Status') == "Enabled":
+ required_versioning = 'Suspended'
+
+ if required_versioning:
+ try:
+ put_bucket_versioning(s3_client, name, required_versioning)
+ changed = True
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket versioning")
+
+ versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
+
+ # This output format is there to ensure compatibility with previous versions of the module
+ result['versioning'] = {
+ 'Versioning': versioning_status.get('Status', 'Disabled'),
+ 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'),
+ }
+
+ # Requester pays
+ try:
+ requester_pays_status = get_bucket_request_payment(s3_client, name)
+ except BotoCoreError as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket request payment")
+ except ClientError as exp:
+ if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or requester_pays:
+ module.fail_json_aws(exp, msg="Failed to get bucket request payment")
+ else:
+ if requester_pays:
+ payer = 'Requester' if requester_pays else 'BucketOwner'
+ if requester_pays_status != payer:
+ put_bucket_request_payment(s3_client, name, payer)
+ requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False)
+ if requester_pays_status is None:
+ # We have seen that it happens quite a lot of times that the put request was not taken into
+ # account, so we retry one more time
+ put_bucket_request_payment(s3_client, name, payer)
+ requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
+ changed = True
+
+ result['requester_pays'] = requester_pays
+
+ # Policy
+ try:
+ current_policy = get_bucket_policy(s3_client, name)
+ except BotoCoreError as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket policy")
+ except ClientError as exp:
+ if exp.response['Error']['Code'] != 'NotImplemented' or policy is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket policy")
+ else:
+ if policy is not None:
+ if isinstance(policy, string_types):
+ policy = json.loads(policy)
+
+ if not policy and current_policy:
+ try:
+ delete_bucket_policy(s3_client, name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket policy")
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy)
+ changed = True
+ elif compare_policies(current_policy, policy):
+ try:
+ put_bucket_policy(s3_client, name, policy)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket policy")
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False)
+ if current_policy is None:
+ # As for request payement, it happens quite a lot of times that the put request was not taken into
+ # account, so we retry one more time
+ put_bucket_policy(s3_client, name, policy)
+ current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
+ changed = True
+
+ result['policy'] = current_policy
+
+ # Tags
+ try:
+ current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
+ except BotoCoreError as exp:
+ module.fail_json_aws(exp, msg="Failed to get bucket tags")
+ except ClientError as exp:
+ if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or tags is not None:
+ module.fail_json_aws(exp, msg="Failed to get bucket tags")
+ else:
+ if tags is not None:
+ # Tags are always returned as text
+ tags = dict((to_text(k), to_text(v)) for k, v in tags.items())
+ if not purge_tags:
+ # Ensure existing tags that aren't updated by desired tags remain
+ current_copy = current_tags_dict.copy()
+ current_copy.update(tags)
+ tags = current_copy
+ if current_tags_dict != tags:
+ if tags:
+ try:
+ put_bucket_tagging(s3_client, name, tags)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to update bucket tags")
+ else:
+ if purge_tags:
+ try:
+ delete_bucket_tagging(s3_client, name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket tags")
+ current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags)
+ changed = True
+
+ result['tags'] = current_tags_dict
+
+ # Encryption
+ if hasattr(s3_client, "get_bucket_encryption"):
+ try:
+ current_encryption = get_bucket_encryption(s3_client, name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket encryption")
+ elif encryption is not None:
+ module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41")
+
+ if encryption is not None:
+ current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None
+ current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None
+ if encryption == 'none' and current_encryption_algorithm is not None:
+ try:
+ delete_bucket_encryption(s3_client, name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket encryption")
+ current_encryption = wait_encryption_is_applied(module, s3_client, name, None)
+ changed = True
+ elif encryption != 'none' and (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id):
+ expected_encryption = {'SSEAlgorithm': encryption}
+ if encryption == 'aws:kms' and encryption_key_id is not None:
+ expected_encryption.update({'KMSMasterKeyID': encryption_key_id})
+ try:
+ put_bucket_encryption(s3_client, name, expected_encryption)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to set bucket encryption")
+ current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption)
+ changed = True
+
+ result['encryption'] = current_encryption
+
+ module.exit_json(changed=changed, name=name, **result)
+
+
+def bucket_exists(s3_client, bucket_name):
+ # head_bucket appeared to be really inconsistent, so we use list_buckets instead,
+ # and loop over all the buckets, even if we know it's less performant :(
+ all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets']
+ return any(bucket['Name'] == bucket_name for bucket in all_buckets)
+
+
+@AWSRetry.exponential_backoff(max_delay=120)
+def create_bucket(s3_client, bucket_name, location):
+ try:
+ configuration = {}
+ if location not in ('us-east-1', None):
+ configuration['LocationConstraint'] = location
+ if len(configuration) > 0:
+ s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
+ else:
+ s3_client.create_bucket(Bucket=bucket_name)
+ return True
+ except ClientError as e:
+ error_code = e.response['Error']['Code']
+ if error_code == 'BucketAlreadyOwnedByYou':
+ # We should never get there since we check the bucket presence before calling the create_or_update_bucket
+ # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception
+ return False
+ else:
+ raise e
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_tagging(s3_client, bucket_name, tags):
+ s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_policy(s3_client, bucket_name, policy):
+ s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def delete_bucket_policy(s3_client, bucket_name):
+ s3_client.delete_bucket_policy(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def get_bucket_policy(s3_client, bucket_name):
+ try:
+ current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy'))
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchBucketPolicy':
+ current_policy = None
+ else:
+ raise e
+ return current_policy
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_request_payment(s3_client, bucket_name, payer):
+ s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def get_bucket_request_payment(s3_client, bucket_name):
+ return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer')
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def get_bucket_versioning(s3_client, bucket_name):
+ return s3_client.get_bucket_versioning(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_versioning(s3_client, bucket_name, required_versioning):
+ s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning})
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def get_bucket_encryption(s3_client, bucket_name):
+ try:
+ result = s3_client.get_bucket_encryption(Bucket=bucket_name)
+ return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault')
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
+ return None
+ else:
+ raise e
+ except (IndexError, KeyError):
+ return None
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def put_bucket_encryption(s3_client, bucket_name, encryption):
+ server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]}
+ s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def delete_bucket_tagging(s3_client, bucket_name):
+ s3_client.delete_bucket_tagging(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
+def delete_bucket_encryption(s3_client, bucket_name):
+ s3_client.delete_bucket_encryption(Bucket=bucket_name)
+
+
+@AWSRetry.exponential_backoff(max_delay=120)
+def delete_bucket(s3_client, bucket_name):
+ try:
+ s3_client.delete_bucket(Bucket=bucket_name)
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchBucket':
+ # This means bucket should have been in a deleting state when we checked it existence
+ # We just ignore the error
+ pass
+ else:
+ raise e
+
+
+def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
+ for dummy in range(0, 12):
+ try:
+ current_policy = get_bucket_policy(s3_client, bucket_name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket policy")
+
+ if compare_policies(current_policy, expected_policy):
+ time.sleep(5)
+ else:
+ return current_policy
+ if should_fail:
+ module.fail_json(msg="Bucket policy failed to apply in the expected time")
+ else:
+ return None
+
+
+def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
+ for dummy in range(0, 12):
+ try:
+ requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket request payment")
+ if requester_pays_status != expected_payer:
+ time.sleep(5)
+ else:
+ return requester_pays_status
+ if should_fail:
+ module.fail_json(msg="Bucket request payment failed to apply in the expected time")
+ else:
+ return None
+
+
+def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption):
+ for dummy in range(0, 12):
+ try:
+ encryption = get_bucket_encryption(s3_client, bucket_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get updated encryption for bucket")
+ if encryption != expected_encryption:
+ time.sleep(5)
+ else:
+ return encryption
+ module.fail_json(msg="Bucket encryption failed to apply in the expected time")
+
+
+def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
+ for dummy in range(0, 24):
+ try:
+ versioning_status = get_bucket_versioning(s3_client, bucket_name)
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to get updated versioning for bucket")
+ if versioning_status.get('Status') != required_versioning:
+ time.sleep(8)
+ else:
+ return versioning_status
+ module.fail_json(msg="Bucket versioning failed to apply in the expected time")
+
+
+def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
+ for dummy in range(0, 12):
+ try:
+ current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
+ except (ClientError, BotoCoreError) as e:
+ module.fail_json_aws(e, msg="Failed to get bucket policy")
+ if current_tags_dict != expected_tags_dict:
+ time.sleep(5)
+ else:
+ return current_tags_dict
+ module.fail_json(msg="Bucket tags failed to apply in the expected time")
+
+
+def get_current_bucket_tags_dict(s3_client, bucket_name):
+ try:
+ current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet')
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchTagSet':
+ return {}
+ raise e
+
+ return boto3_tag_list_to_ansible_dict(current_tags)
+
+
+def paginated_list(s3_client, **pagination_params):
+ pg = s3_client.get_paginator('list_objects_v2')
+ for page in pg.paginate(**pagination_params):
+ yield [data['Key'] for data in page.get('Contents', [])]
+
+
+def paginated_versions_list(s3_client, **pagination_params):
+ try:
+ pg = s3_client.get_paginator('list_object_versions')
+ for page in pg.paginate(**pagination_params):
+ # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion
+ yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))]
+ except is_boto3_error_code('NoSuchBucket'):
+ yield []
+
+
+def destroy_bucket(s3_client, module):
+
+ force = module.params.get("force")
+ name = module.params.get("name")
+ try:
+ bucket_is_present = bucket_exists(s3_client, name)
+ except EndpointConnectionError as e:
+ module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to check bucket presence")
+
+ if not bucket_is_present:
+ module.exit_json(changed=False)
+
+ if force:
+ # if there are contents then we need to delete them (including versions) before we can delete the bucket
+ try:
+ for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
+ formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs]
+ for fk in formatted_keys:
+ # remove VersionId from cases where they are `None` so that
+ # unversioned objects are deleted using `DeleteObject`
+ # rather than `DeleteObjectVersion`, improving backwards
+ # compatibility with older IAM policies.
+ if not fk.get('VersionId'):
+ fk.pop('VersionId')
+
+ if formatted_keys:
+ resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys})
+ if resp.get('Errors'):
+ module.fail_json(
+ msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format(
+ ', '.join([k['Key'] for k in resp['Errors']])
+ ),
+ errors=resp['Errors'], response=resp
+ )
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed while deleting bucket")
+
+ try:
+ delete_bucket(s3_client, name)
+ s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60))
+ except WaiterError as e:
+ module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.')
+ except (BotoCoreError, ClientError) as e:
+ module.fail_json_aws(e, msg="Failed to delete bucket")
+
+ module.exit_json(changed=True)
+
+
+def is_fakes3(s3_url):
+ """ Return True if s3_url has scheme fakes3:// """
+ if s3_url is not None:
+ return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
+ if s3_url and ceph: # TODO - test this
+ ceph = urlparse(s3_url)
+ params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ elif is_fakes3(s3_url):
+ fakes3 = urlparse(s3_url)
+ port = fakes3.port
+ if fakes3.scheme == 'fakes3s':
+ protocol = "https"
+ if port is None:
+ port = 443
+ else:
+ protocol = "http"
+ if port is None:
+ port = 80
+ params = dict(module=module, conn_type='client', resource='s3', region=location,
+ endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
+ use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
+ else:
+ params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
+ return boto3_conn(**params)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ force=dict(default=False, type='bool'),
+ policy=dict(type='json'),
+ name=dict(required=True),
+ requester_pays=dict(default=False, type='bool'),
+ s3_url=dict(aliases=['S3_URL']),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='dict'),
+ purge_tags=dict(type='bool', default=True),
+ versioning=dict(type='bool'),
+ ceph=dict(default=False, type='bool'),
+ encryption=dict(choices=['none', 'AES256', 'aws:kms']),
+ encryption_key_id=dict()
+ )
+ )
+
+ module = AnsibleAWSModule(
+ argument_spec=argument_spec,
+ )
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+
+ if region in ('us-east-1', '', None):
+ # default to US Standard region
+ location = 'us-east-1'
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+
+ s3_url = module.params.get('s3_url')
+ ceph = module.params.get('ceph')
+
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not s3_url and 'S3_URL' in os.environ:
+ s3_url = os.environ['S3_URL']
+
+ if ceph and not s3_url:
+ module.fail_json(msg='ceph flavour requires s3_url')
+
+ # Look at s3_url and tweak connection settings
+ # if connecting to Ceph RGW, Walrus or fakes3
+ if s3_url:
+ for key in ['validate_certs', 'security_token', 'profile_name']:
+ aws_connect_kwargs.pop(key, None)
+ s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)
+
+ if s3_client is None: # this should never happen
+ module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')
+
+ state = module.params.get("state")
+ encryption = module.params.get("encryption")
+ encryption_key_id = module.params.get("encryption_key_id")
+
+ # Parameter validation
+ if encryption_key_id is not None and encryption is None:
+ module.fail_json(msg="You must specify encryption parameter along with encryption_key_id.")
+ elif encryption_key_id is not None and encryption != 'aws:kms':
+ module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")
+
+ if state == 'present':
+ create_or_update_bucket(s3_client, module, location)
+ elif state == 'absent':
+ destroy_bucket(s3_client, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/sts_assume_role.py b/test/support/integration/plugins/modules/sts_assume_role.py
new file mode 100644
index 0000000000..cd82a549cb
--- /dev/null
+++ b/test/support/integration/plugins/modules/sts_assume_role.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: sts_assume_role
+short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
+description:
+ - Assume a role using AWS Security Token Service and obtain temporary credentials.
+version_added: "2.0"
+author:
+ - Boris Ekelchik (@bekelchik)
+ - Marek Piatek (@piontas)
+options:
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the role that the caller is
+ assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs).
+ required: true
+ type: str
+ role_session_name:
+ description:
+ - Name of the role's session - will be used by CloudTrail.
+ required: true
+ type: str
+ policy:
+ description:
+ - Supplemental policy to use in addition to assumed role's policies.
+ type: str
+ duration_seconds:
+ description:
+ - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours).
+ - The max depends on the IAM role's sessions duration setting.
+ - By default, the value is set to 3600 seconds.
+ type: int
+ external_id:
+ description:
+ - A unique identifier that is used by third parties to assume a role in their customers' accounts.
+ type: str
+ mfa_serial_number:
+ description:
+ - The identification number of the MFA device that is associated with the user who is making the AssumeRole call.
+ type: str
+ mfa_token:
+ description:
+ - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
+ type: str
+notes:
+ - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token.
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements:
+ - boto3
+ - botocore
+ - python >= 2.6
+'''
+
+RETURN = '''
+sts_creds:
+ description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token
+ returned: always
+ type: dict
+ sample:
+ access_key: XXXXXXXXXXXXXXXXXXXX
+ expiration: 2017-11-11T11:11:11+00:00
+ secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+sts_user:
+ description: The Amazon Resource Name (ARN) and the assumed role ID
+ returned: always
+ type: dict
+ sample:
+ assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob
+ arn: ARO123EXAMPLE123:Bob
+changed:
+ description: True if obtaining the credentials succeeds
+ type: bool
+ returned: always
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
+- sts_assume_role:
+ role_arn: "arn:aws:iam::123456789012:role/someRole"
+ role_session_name: "someRoleSession"
+ register: assumed_role
+
+# Use the assumed role above to tag an instance in account 123456789012
+- ec2_tag:
+ aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
+ aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
+ security_token: "{{ assumed_role.sts_creds.session_token }}"
+ resource: i-xyzxyz01
+ state: present
+ tags:
+ MyNewTag: value
+
+'''
+
+from ansible.module_utils.aws.core import AnsibleAWSModule
+from ansible.module_utils.ec2 import camel_dict_to_snake_dict
+
+try:
+ from botocore.exceptions import ClientError, ParamValidationError
+except ImportError:
+ pass # caught by AnsibleAWSModule
+
+
+def _parse_response(response):
+ credentials = response.get('Credentials', {})
+ user = response.get('AssumedRoleUser', {})
+
+ sts_cred = {
+ 'access_key': credentials.get('AccessKeyId'),
+ 'secret_key': credentials.get('SecretAccessKey'),
+ 'session_token': credentials.get('SessionToken'),
+ 'expiration': credentials.get('Expiration')
+
+ }
+ sts_user = camel_dict_to_snake_dict(user)
+ return sts_cred, sts_user
+
+
+def assume_role_policy(connection, module):
+ params = {
+ 'RoleArn': module.params.get('role_arn'),
+ 'RoleSessionName': module.params.get('role_session_name'),
+ 'Policy': module.params.get('policy'),
+ 'DurationSeconds': module.params.get('duration_seconds'),
+ 'ExternalId': module.params.get('external_id'),
+ 'SerialNumber': module.params.get('mfa_serial_number'),
+ 'TokenCode': module.params.get('mfa_token')
+ }
+ changed = False
+
+ kwargs = dict((k, v) for k, v in params.items() if v is not None)
+
+ try:
+ response = connection.assume_role(**kwargs)
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json_aws(e)
+
+ sts_cred, sts_user = _parse_response(response)
+ module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user)
+
+
+def main():
+ argument_spec = dict(
+ role_arn=dict(required=True),
+ role_session_name=dict(required=True),
+ duration_seconds=dict(required=False, default=None, type='int'),
+ external_id=dict(required=False, default=None),
+ policy=dict(required=False, default=None),
+ mfa_serial_number=dict(required=False, default=None),
+ mfa_token=dict(required=False, default=None)
+ )
+
+ module = AnsibleAWSModule(argument_spec=argument_spec)
+
+ connection = module.client('sts')
+
+ assume_role_policy(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/tower_credential_type.py b/test/support/integration/plugins/modules/tower_credential_type.py
new file mode 100644
index 0000000000..831a35ad3f
--- /dev/null
+++ b/test/support/integration/plugins/modules/tower_credential_type.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+#
+# (c) 2018, Adrien Fleury <fleu42@gmail.com>
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'metadata_version': '1.1'}
+
+
+DOCUMENTATION = '''
+---
+module: tower_credential_type
+author: "Adrien Fleury (@fleu42)"
+version_added: "2.7"
+short_description: Create, update, or destroy custom Ansible Tower credential type.
+description:
+ - Create, update, or destroy Ansible Tower credential type. See
+ U(https://www.ansible.com/tower) for an overview.
+options:
+ name:
+ description:
+ - The name of the credential type.
+ required: True
+ description:
+ description:
+ - The description of the credential type to give more detail about it.
+ required: False
+ kind:
+ description:
+ - >-
+ The type of credential type being added. Note that only cloud and
+ net can be used for creating credential types. Refer to the Ansible
+ for more information.
+ choices: [ 'ssh', 'vault', 'net', 'scm', 'cloud', 'insights' ]
+ required: False
+ inputs:
+ description:
+ - >-
+ Enter inputs using either JSON or YAML syntax. Refer to the Ansible
+ Tower documentation for example syntax.
+ required: False
+ injectors:
+ description:
+ - >-
+ Enter injectors using either JSON or YAML syntax. Refer to the
+ Ansible Tower documentation for example syntax.
+ required: False
+ state:
+ description:
+ - Desired state of the resource.
+ required: False
+ default: "present"
+ choices: ["present", "absent"]
+ validate_certs:
+ description:
+ - Tower option to avoid certificates check.
+ required: False
+ type: bool
+ aliases: [ tower_verify_ssl ]
+extends_documentation_fragment: tower
+'''
+
+
+EXAMPLES = '''
+- tower_credential_type:
+ name: Nexus
+ description: Credentials type for Nexus
+ kind: cloud
+ inputs: "{{ lookup('file', 'tower_credential_inputs_nexus.json') }}"
+ injectors: {'extra_vars': {'nexus_credential': 'test' }}
+ state: present
+ validate_certs: false
+
+- tower_credential_type:
+ name: Nexus
+ state: absent
+'''
+
+
+RETURN = ''' # '''
+
+
+from ansible.module_utils.ansible_tower import (
+ TowerModule,
+ tower_auth_config,
+ tower_check_mode
+)
+
+try:
+ import tower_cli
+ import tower_cli.exceptions as exc
+ from tower_cli.conf import settings
+except ImportError:
+ pass
+
+
+KIND_CHOICES = {
+ 'ssh': 'Machine',
+ 'vault': 'Ansible Vault',
+ 'net': 'Network',
+ 'scm': 'Source Control',
+ 'cloud': 'Lots of others',
+ 'insights': 'Insights'
+}
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(required=False),
+ kind=dict(required=False, choices=KIND_CHOICES.keys()),
+ inputs=dict(type='dict', required=False),
+ injectors=dict(type='dict', required=False),
+ state=dict(choices=['present', 'absent'], default='present'),
+ )
+
+ module = TowerModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False
+ )
+
+ name = module.params.get('name')
+ kind = module.params.get('kind')
+ state = module.params.get('state')
+
+ json_output = {'credential_type': name, 'state': state}
+
+ tower_auth = tower_auth_config(module)
+ with settings.runtime_values(**tower_auth):
+ tower_check_mode(module)
+ credential_type_res = tower_cli.get_resource('credential_type')
+
+ params = {}
+ params['name'] = name
+ params['kind'] = kind
+ params['managed_by_tower'] = False
+
+ if module.params.get('description'):
+ params['description'] = module.params.get('description')
+
+ if module.params.get('inputs'):
+ params['inputs'] = module.params.get('inputs')
+
+ if module.params.get('injectors'):
+ params['injectors'] = module.params.get('injectors')
+
+ try:
+ if state == 'present':
+ params['create_on_missing'] = True
+ result = credential_type_res.modify(**params)
+ json_output['id'] = result['id']
+ elif state == 'absent':
+ params['fail_on_missing'] = False
+ result = credential_type_res.delete(**params)
+
+ except (exc.ConnectionError, exc.BadRequest, exc.AuthError) as excinfo:
+ module.fail_json(
+ msg='Failed to update credential type: {0}'.format(excinfo),
+ changed=False
+ )
+
+ json_output['changed'] = result['changed']
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/tower_receive.py b/test/support/integration/plugins/modules/tower_receive.py
new file mode 100644
index 0000000000..57fdd16df4
--- /dev/null
+++ b/test/support/integration/plugins/modules/tower_receive.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2017, John Westcott IV <john.westcott.iv@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+---
+module: tower_receive
+author: "John Westcott IV (@john-westcott-iv)"
+version_added: "2.8"
+short_description: Receive assets from Ansible Tower.
+description:
+ - Receive assets from Ansible Tower. See
+ U(https://www.ansible.com/tower) for an overview.
+options:
+ all:
+ description:
+ - Export all assets
+ type: bool
+ default: 'False'
+ organization:
+ description:
+ - List of organization names to export
+ default: []
+ user:
+ description:
+ - List of user names to export
+ default: []
+ team:
+ description:
+ - List of team names to export
+ default: []
+ credential_type:
+ description:
+ - List of credential type names to export
+ default: []
+ credential:
+ description:
+ - List of credential names to export
+ default: []
+ notification_template:
+ description:
+ - List of notification template names to export
+ default: []
+ inventory_script:
+ description:
+ - List of inventory script names to export
+ default: []
+ inventory:
+ description:
+ - List of inventory names to export
+ default: []
+ project:
+ description:
+ - List of project names to export
+ default: []
+ job_template:
+ description:
+ - List of job template names to export
+ default: []
+ workflow:
+ description:
+ - List of workflow names to export
+ default: []
+
+requirements:
+ - "ansible-tower-cli >= 3.3.0"
+
+notes:
+ - Specifying a name of "all" for any asset type will export all items of that asset type.
+
+extends_documentation_fragment: tower
+'''
+
+EXAMPLES = '''
+- name: Export all tower assets
+ tower_receive:
+ all: True
+ tower_config_file: "~/tower_cli.cfg"
+
+- name: Export all inventories
+ tower_receive:
+ inventory:
+ - all
+
+- name: Export a job template named "My Template" and all Credentials
+ tower_receive:
+ job_template:
+ - "My Template"
+ credential:
+ - all
+'''
+
+RETURN = '''
+assets:
+ description: The exported assets
+ returned: success
+ type: dict
+ sample: [ {}, {} ]
+'''
+
+from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, HAS_TOWER_CLI
+
+try:
+ from tower_cli.cli.transfer.receive import Receiver
+ from tower_cli.cli.transfer.common import SEND_ORDER
+ from tower_cli.utils.exceptions import TowerCLIError
+
+ from tower_cli.conf import settings
+ TOWER_CLI_HAS_EXPORT = True
+except ImportError:
+ TOWER_CLI_HAS_EXPORT = False
+
+
+def main():
+ argument_spec = dict(
+ all=dict(type='bool', default=False),
+ credential=dict(type='list', default=[]),
+ credential_type=dict(type='list', default=[]),
+ inventory=dict(type='list', default=[]),
+ inventory_script=dict(type='list', default=[]),
+ job_template=dict(type='list', default=[]),
+ notification_template=dict(type='list', default=[]),
+ organization=dict(type='list', default=[]),
+ project=dict(type='list', default=[]),
+ team=dict(type='list', default=[]),
+ user=dict(type='list', default=[]),
+ workflow=dict(type='list', default=[]),
+ )
+
+ module = TowerModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_TOWER_CLI:
+ module.fail_json(msg='ansible-tower-cli required for this module')
+
+ if not TOWER_CLI_HAS_EXPORT:
+ module.fail_json(msg='ansible-tower-cli version does not support export')
+
+ export_all = module.params.get('all')
+ assets_to_export = {}
+ for asset_type in SEND_ORDER:
+ assets_to_export[asset_type] = module.params.get(asset_type)
+
+ result = dict(
+ assets=None,
+ changed=False,
+ message='',
+ )
+
+ tower_auth = tower_auth_config(module)
+ with settings.runtime_values(**tower_auth):
+ try:
+ receiver = Receiver()
+ result['assets'] = receiver.export_assets(all=export_all, asset_input=assets_to_export)
+ module.exit_json(**result)
+ except TowerCLIError as e:
+ result['message'] = e.message
+ module.fail_json(msg='Receive Failed', **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/vmware_guest.py b/test/support/integration/plugins/modules/vmware_guest.py
new file mode 100644
index 0000000000..df9f695be5
--- /dev/null
+++ b/test/support/integration/plugins/modules/vmware_guest.py
@@ -0,0 +1,2914 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This module is also sponsored by E.T.A.I. (www.etai.fr)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = r'''
+---
+module: vmware_guest
+short_description: Manages virtual machines in vCenter
+description: >
+ This module can be used to create new virtual machines from templates or other virtual machines,
+ manage power state of virtual machine such as power on, power off, suspend, shutdown, reboot, restart etc.,
+ modify various virtual machine components like network, disk, customization etc.,
+ rename a virtual machine and remove a virtual machine with associated components.
+version_added: '2.2'
+author:
+- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+- Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
+- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
+requirements:
+- python >= 2.6
+- PyVmomi
+notes:
+ - Please make sure that the user used for M(vmware_guest) has the correct level of privileges.
+ - For example, following is the list of minimum privileges required by users to create virtual machines.
+ - " DataStore > Allocate Space"
+ - " Virtual Machine > Configuration > Add New Disk"
+ - " Virtual Machine > Configuration > Add or Remove Device"
+ - " Virtual Machine > Inventory > Create New"
+ - " Network > Assign Network"
+ - " Resource > Assign Virtual Machine to Resource Pool"
+ - "Module may require additional privileges as well, which may be required for gathering facts - e.g. ESXi configurations."
+ - Tested on vSphere 5.5, 6.0, 6.5 and 6.7.
+ - Use SCSI disks instead of IDE when you want to expand online disks by specifying a SCSI controller.
+ - Uses SysPrep for Windows VM (depends on 'guest_id' parameter match 'win') with PyVmomi.
+ - In order to change the VM's parameters (e.g. number of CPUs), the VM must be powered off unless the hot-add
+ support is enabled and the C(state=present) must be used to apply the changes.
+ - "For additional information please visit Ansible VMware community wiki - U(https://github.com/ansible/community/wiki/VMware)."
+options:
+ state:
+ description:
+ - Specify the state the virtual machine should be in.
+ - 'If C(state) is set to C(present) and virtual machine exists, ensure the virtual machine
+ configurations conforms to task arguments.'
+ - 'If C(state) is set to C(absent) and virtual machine exists, then the specified virtual machine
+ is removed with its associated components.'
+ - 'If C(state) is set to one of the following C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
+ and virtual machine does not exists, then virtual machine is deployed with given parameters.'
+ - 'If C(state) is set to C(poweredon) and virtual machine exists with powerstate other than powered on,
+ then the specified virtual machine is powered on.'
+ - 'If C(state) is set to C(poweredoff) and virtual machine exists with powerstate other than powered off,
+ then the specified virtual machine is powered off.'
+ - 'If C(state) is set to C(restarted) and virtual machine exists, then the virtual machine is restarted.'
+ - 'If C(state) is set to C(suspended) and virtual machine exists, then the virtual machine is set to suspended mode.'
+ - 'If C(state) is set to C(shutdownguest) and virtual machine exists, then the virtual machine is shutdown.'
+ - 'If C(state) is set to C(rebootguest) and virtual machine exists, then the virtual machine is rebooted.'
+ default: present
+ choices: [ present, absent, poweredon, poweredoff, restarted, suspended, shutdownguest, rebootguest ]
+ name:
+ description:
+ - Name of the virtual machine to work with.
+ - Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
+ - 'If multiple virtual machines with same name exists, then C(folder) is required parameter to
+ identify uniqueness of the virtual machine.'
+ - This parameter is required, if C(state) is set to C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
+ and virtual machine does not exists.
+ - This parameter is case sensitive.
+ required: yes
+ name_match:
+ description:
+ - If multiple virtual machines matching the name, use the first or last found.
+ default: 'first'
+ choices: [ first, last ]
+ uuid:
+ description:
+ - UUID of the virtual machine to manage if known, this is VMware's unique identifier.
+ - This is required if C(name) is not supplied.
+ - If virtual machine does not exists, then this parameter is ignored.
+ - Please note that a supplied UUID will be ignored on virtual machine creation, as VMware creates the UUID internally.
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: no
+ type: bool
+ version_added: '2.8'
+ template:
+ description:
+ - Template or existing virtual machine used to create new virtual machine.
+ - If this value is not set, virtual machine is created without using a template.
+ - If the virtual machine already exists, this parameter will be ignored.
+ - This parameter is case sensitive.
+ - You can also specify template or VM UUID for identifying source. version_added 2.8. Use C(hw_product_uuid) from M(vmware_guest_facts) as UUID value.
+ - From version 2.8 onwards, absolute path to virtual machine or template can be used.
+ aliases: [ 'template_src' ]
+ is_template:
+ description:
+ - Flag the instance as a template.
+ - This will mark the given virtual machine as template.
+ default: 'no'
+ type: bool
+ version_added: '2.3'
+ folder:
+ description:
+ - Destination folder, absolute path to find an existing guest or create the new guest.
+ - The folder should include the datacenter. ESX's datacenter is ha-datacenter.
+ - This parameter is case sensitive.
+ - This parameter is required, while deploying new virtual machine. version_added 2.5.
+ - 'If multiple machines are found with same name, this parameter is used to identify
+ uniqueness of the virtual machine. version_added 2.5'
+ - 'Examples:'
+ - ' folder: /ha-datacenter/vm'
+ - ' folder: ha-datacenter/vm'
+ - ' folder: /datacenter1/vm'
+ - ' folder: datacenter1/vm'
+ - ' folder: /datacenter1/vm/folder1'
+ - ' folder: datacenter1/vm/folder1'
+ - ' folder: /folder1/datacenter1/vm'
+ - ' folder: folder1/datacenter1/vm'
+ - ' folder: /folder1/datacenter1/vm/folder2'
+ hardware:
+ description:
+ - Manage virtual machine's hardware attributes.
+ - All parameters case sensitive.
+ - 'Valid attributes are:'
+ - ' - C(hotadd_cpu) (boolean): Allow virtual CPUs to be added while the virtual machine is running.'
+ - ' - C(hotremove_cpu) (boolean): Allow virtual CPUs to be removed while the virtual machine is running.
+ version_added: 2.5'
+ - ' - C(hotadd_memory) (boolean): Allow memory to be added while the virtual machine is running.'
+ - ' - C(memory_mb) (integer): Amount of memory in MB.'
+ - ' - C(nested_virt) (bool): Enable nested virtualization. version_added: 2.5'
+ - ' - C(num_cpus) (integer): Number of CPUs.'
+ - ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket.'
+ - " C(num_cpus) must be a multiple of C(num_cpu_cores_per_socket).
+ For example to create a VM with 2 sockets of 4 cores, specify C(num_cpus): 8 and C(num_cpu_cores_per_socket): 4"
+ - ' - C(scsi) (string): Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual) (default).'
+ - " - C(memory_reservation_lock) (boolean): If set true, memory resource reservation for the virtual machine
+ will always be equal to the virtual machine's memory size. version_added: 2.5"
+ - ' - C(max_connections) (integer): Maximum number of active remote display connections for the virtual machines.
+ version_added: 2.5.'
+ - ' - C(mem_limit) (integer): The memory utilization of a virtual machine will not exceed this limit. Unit is MB.
+ version_added: 2.5'
+ - ' - C(mem_reservation) (integer): The amount of memory resource that is guaranteed available to the virtual
+ machine. Unit is MB. C(memory_reservation) is alias to this. version_added: 2.5'
+ - ' - C(cpu_limit) (integer): The CPU utilization of a virtual machine will not exceed this limit. Unit is MHz.
+ version_added: 2.5'
+ - ' - C(cpu_reservation) (integer): The amount of CPU resource that is guaranteed available to the virtual machine.
+ Unit is MHz. version_added: 2.5'
+ - ' - C(version) (integer): The Virtual machine hardware versions. Default is 10 (ESXi 5.5 and onwards).
+ If value specified as C(latest), version is set to the most current virtual hardware supported on the host.
+ C(latest) is added in version 2.10.
+ Please check VMware documentation for correct virtual machine hardware version.
+ Incorrect hardware version may lead to failure in deployment. If hardware version is already equal to the given
+ version then no action is taken. version_added: 2.6'
+ - ' - C(boot_firmware) (string): Choose which firmware should be used to boot the virtual machine.
+ Allowed values are "bios" and "efi". version_added: 2.7'
+ - ' - C(virt_based_security) (bool): Enable Virtualization Based Security feature for Windows 10.
+ (Support from Virtual machine hardware version 14, Guest OS Windows 10 64 bit, Windows Server 2016)'
+
+ guest_id:
+ description:
+ - Set the guest ID.
+ - This parameter is case sensitive.
+ - 'Examples:'
+ - " virtual machine with RHEL7 64 bit, will be 'rhel7_64Guest'"
+ - " virtual machine with CentOS 64 bit, will be 'centos64Guest'"
+ - " virtual machine with Ubuntu 64 bit, will be 'ubuntu64Guest'"
+ - This field is required when creating a virtual machine, not required when creating from the template.
+ - >
+ Valid values are referenced here:
+ U(https://code.vmware.com/apis/358/doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html)
+ version_added: '2.3'
+ disk:
+ description:
+ - A list of disks to add.
+ - This parameter is case sensitive.
+ - Shrinking disks is not supported.
+ - Removing existing disks of the virtual machine is not supported.
+ - 'Valid attributes are:'
+ - ' - C(size_[tb,gb,mb,kb]) (integer): Disk storage size in specified unit.'
+ - ' - C(type) (string): Valid values are:'
+ - ' - C(thin) thin disk'
+ - ' - C(eagerzeroedthick) eagerzeroedthick disk, added in version 2.5'
+ - ' Default: C(None) thick disk, no eagerzero.'
+ - ' - C(datastore) (string): The name of datastore which will be used for the disk. If C(autoselect_datastore) is set to True,
+ then will select the less used datastore whose name contains this "disk.datastore" string.'
+ - ' - C(filename) (string): Existing disk image to be used. Filename must already exist on the datastore.'
+ - ' Specify filename string in C([datastore_name] path/to/file.vmdk) format. Added in version 2.8.'
+ - ' - C(autoselect_datastore) (bool): select the less used datastore. "disk.datastore" and "disk.autoselect_datastore"
+ will not be used if C(datastore) is specified outside this C(disk) configuration.'
+ - ' - C(disk_mode) (string): Type of disk mode. Added in version 2.6'
+ - ' - Available options are :'
+ - ' - C(persistent): Changes are immediately and permanently written to the virtual disk. This is default.'
+ - ' - C(independent_persistent): Same as persistent, but not affected by snapshots.'
+ - ' - C(independent_nonpersistent): Changes to virtual disk are made to a redo log and discarded at power off, but not affected by snapshots.'
+ cdrom:
+ description:
+ - A CD-ROM configuration for the virtual machine.
+ - Or a list of CD-ROMs configuration for the virtual machine. Added in version 2.9.
+ - 'Parameters C(controller_type), C(controller_number), C(unit_number), C(state) are added for a list of CD-ROMs
+ configuration support.'
+ - 'Valid attributes are:'
+ - ' - C(type) (string): The type of CD-ROM, valid options are C(none), C(client) or C(iso). With C(none) the CD-ROM
+ will be disconnected but present.'
+ - ' - C(iso_path) (string): The datastore path to the ISO file to use, in the form of C([datastore1] path/to/file.iso).
+ Required if type is set C(iso).'
+ - ' - C(controller_type) (string): Default value is C(ide). Only C(ide) controller type for CD-ROM is supported for
+ now, will add SATA controller type in the future.'
+ - ' - C(controller_number) (int): For C(ide) controller, valid value is 0 or 1.'
+ - ' - C(unit_number) (int): For CD-ROM device attach to C(ide) controller, valid value is 0 or 1.
+ C(controller_number) and C(unit_number) are mandatory attributes.'
+ - ' - C(state) (string): Valid value is C(present) or C(absent). Default is C(present). If set to C(absent), then
+ the specified CD-ROM will be removed. For C(ide) controller, hot-add or hot-remove CD-ROM is not supported.'
+ version_added: '2.5'
+ resource_pool:
+ description:
+ - Use the given resource pool for virtual machine operation.
+ - This parameter is case sensitive.
+ - Resource pool should be child of the selected host parent.
+ version_added: '2.3'
+ wait_for_ip_address:
+ description:
+ - Wait until vCenter detects an IP address for the virtual machine.
+ - This requires vmware-tools (vmtoolsd) to properly work after creation.
+ - "vmware-tools needs to be installed on the given virtual machine in order to work with this parameter."
+ default: 'no'
+ type: bool
+ wait_for_ip_address_timeout:
+ description:
+ - Define a timeout (in seconds) for the wait_for_ip_address parameter.
+ default: '300'
+ type: int
+ version_added: '2.10'
+ wait_for_customization_timeout:
+ description:
+ - Define a timeout (in seconds) for the wait_for_customization parameter.
+ - Be careful when setting this value since the time guest customization took may differ among guest OSes.
+ default: '3600'
+ type: int
+ version_added: '2.10'
+ wait_for_customization:
+ description:
+ - Wait until vCenter detects all guest customizations as successfully completed.
+ - When enabled, the VM will automatically be powered on.
+ - "If vCenter does not detect guest customization start or succeed, failed events after time
+ C(wait_for_customization_timeout) parameter specified, warning message will be printed and task result is fail."
+ default: 'no'
+ type: bool
+ version_added: '2.8'
+ state_change_timeout:
+ description:
+ - If the C(state) is set to C(shutdownguest), by default the module will return immediately after sending the shutdown signal.
+ - If this argument is set to a positive integer, the module will instead wait for the virtual machine to reach the poweredoff state.
+ - The value sets a timeout in seconds for the module to wait for the state change.
+ default: 0
+ version_added: '2.6'
+ snapshot_src:
+ description:
+ - Name of the existing snapshot to use to create a clone of a virtual machine.
+ - This parameter is case sensitive.
+ - While creating linked clone using C(linked_clone) parameter, this parameter is required.
+ version_added: '2.4'
+ linked_clone:
+ description:
+ - Whether to create a linked clone from the snapshot specified.
+ - If specified, then C(snapshot_src) is required parameter.
+ default: 'no'
+ type: bool
+ version_added: '2.4'
+ force:
+ description:
+ - Ignore warnings and complete the actions.
+ - This parameter is useful while removing virtual machine which is powered on state.
+ - 'This module reflects the VMware vCenter API and UI workflow, as such, in some cases the `force` flag will
+ be mandatory to perform the action to ensure you are certain the action has to be taken, no matter what the consequence.
+ This is specifically the case for removing a powered on the virtual machine when C(state) is set to C(absent).'
+ default: 'no'
+ type: bool
+ delete_from_inventory:
+ description:
+ - Whether to delete Virtual machine from inventory or delete from disk.
+ default: False
+ type: bool
+ version_added: '2.10'
+ datacenter:
+ description:
+ - Destination datacenter for the deploy operation.
+ - This parameter is case sensitive.
+ default: ha-datacenter
+ cluster:
+ description:
+ - The cluster name where the virtual machine will run.
+ - This is a required parameter, if C(esxi_hostname) is not set.
+ - C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
+ - This parameter is case sensitive.
+ version_added: '2.3'
+ esxi_hostname:
+ description:
+ - The ESXi hostname where the virtual machine will run.
+ - This is a required parameter, if C(cluster) is not set.
+ - C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
+ - This parameter is case sensitive.
+ annotation:
+ description:
+ - A note or annotation to include in the virtual machine.
+ version_added: '2.3'
+ customvalues:
+ description:
+ - Define a list of custom values to set on virtual machine.
+ - A custom value object takes two fields C(key) and C(value).
+ - Incorrect key and values will be ignored.
+ version_added: '2.3'
+ networks:
+ description:
+ - A list of networks (in the order of the NICs).
+ - Removing NICs is not allowed, while reconfiguring the virtual machine.
+ - All parameters and VMware object names are case sensitive.
+ - 'One of the below parameters is required per entry:'
+ - ' - C(name) (string): Name of the portgroup or distributed virtual portgroup for this interface.
+ When specifying distributed virtual portgroup make sure given C(esxi_hostname) or C(cluster) is associated with it.'
+ - ' - C(vlan) (integer): VLAN number for this interface.'
+ - 'Optional parameters per entry (used for virtual hardware):'
+ - ' - C(device_type) (string): Virtual network device (one of C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3) (default), C(sriov)).'
+ - ' - C(mac) (string): Customize MAC address.'
+ - ' - C(dvswitch_name) (string): Name of the distributed vSwitch.
+ This value is required if multiple distributed portgroups exists with the same name. version_added 2.7'
+ - ' - C(start_connected) (bool): Indicates that virtual network adapter starts with associated virtual machine powers on. version_added: 2.5'
+ - 'Optional parameters per entry (used for OS customization):'
+ - ' - C(type) (string): Type of IP assignment (either C(dhcp) or C(static)). C(dhcp) is default.'
+ - ' - C(ip) (string): Static IP address (implies C(type: static)).'
+ - ' - C(netmask) (string): Static netmask required for C(ip).'
+ - ' - C(gateway) (string): Static gateway.'
+ - ' - C(dns_servers) (string): DNS servers for this network interface (Windows).'
+ - ' - C(domain) (string): Domain name for this network interface (Windows).'
+ - ' - C(wake_on_lan) (bool): Indicates if wake-on-LAN is enabled on this virtual network adapter. version_added: 2.5'
+ - ' - C(allow_guest_control) (bool): Enables guest control over whether the connectable device is connected. version_added: 2.5'
+ version_added: '2.3'
+ customization:
+ description:
+ - Parameters for OS customization when cloning from the template or the virtual machine, or apply to the existing virtual machine directly.
+ - Not all operating systems are supported for customization with respective vCenter version,
+ please check VMware documentation for respective OS customization.
+ - For supported customization operating system matrix, (see U(http://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf))
+ - All parameters and VMware object names are case sensitive.
+ - Linux based OSes requires Perl package to be installed for OS customizations.
+ - 'Common parameters (Linux/Windows):'
+ - ' - C(existing_vm) (bool): If set to C(True), do OS customization on the specified virtual machine directly.
+ If set to C(False) or not specified, do OS customization when cloning from the template or the virtual machine. version_added: 2.8'
+ - ' - C(dns_servers) (list): List of DNS servers to configure.'
+ - ' - C(dns_suffix) (list): List of domain suffixes, also known as DNS search path (default: C(domain) parameter).'
+ - ' - C(domain) (string): DNS domain name to use.'
+ - ' - C(hostname) (string): Computer hostname (default: shorted C(name) parameter). Allowed characters are alphanumeric (uppercase and lowercase)
+ and minus, rest of the characters are dropped as per RFC 952.'
+ - 'Parameters related to Linux customization:'
+ - ' - C(timezone) (string): Timezone (See List of supported time zones for different vSphere versions in Linux/Unix
+ systems (2145518) U(https://kb.vmware.com/s/article/2145518)). version_added: 2.9'
+ - ' - C(hwclockUTC) (bool): Specifies whether the hardware clock is in UTC or local time.
+ True when the hardware clock is in UTC, False when the hardware clock is in local time. version_added: 2.9'
+ - 'Parameters related to Windows customization:'
+ - ' - C(autologon) (bool): Auto logon after virtual machine customization (default: False).'
+ - ' - C(autologoncount) (int): Number of autologon after reboot (default: 1).'
+ - ' - C(domainadmin) (string): User used to join in AD domain (mandatory with C(joindomain)).'
+ - ' - C(domainadminpassword) (string): Password used to join in AD domain (mandatory with C(joindomain)).'
+ - ' - C(fullname) (string): Server owner name (default: Administrator).'
+ - ' - C(joindomain) (string): AD domain to join (Not compatible with C(joinworkgroup)).'
+ - ' - C(joinworkgroup) (string): Workgroup to join (Not compatible with C(joindomain), default: WORKGROUP).'
+ - ' - C(orgname) (string): Organisation name (default: ACME).'
+ - ' - C(password) (string): Local administrator password.'
+ - ' - C(productid) (string): Product ID.'
+ - ' - C(runonce) (list): List of commands to run at first user logon.'
+ - ' - C(timezone) (int): Timezone (See U(https://msdn.microsoft.com/en-us/library/ms912391.aspx)).'
+ version_added: '2.3'
+ vapp_properties:
+ description:
+ - A list of vApp properties.
+ - 'For full list of attributes and types refer to:'
+ - 'U(https://vdc-download.vmware.com/vmwb-repository/dcr-public/6b586ed2-655c-49d9-9029-bc416323cb22/
+ fa0b429a-a695-4c11-b7d2-2cbc284049dc/doc/vim.vApp.PropertyInfo.html)'
+ - 'Basic attributes are:'
+ - ' - C(id) (string): Property id - required.'
+ - ' - C(value) (string): Property value.'
+ - ' - C(type) (string): Value type, string type by default.'
+ - ' - C(operation): C(remove): This attribute is required only when removing properties.'
+ version_added: '2.6'
+ customization_spec:
+ description:
+ - Unique name identifying the requested customization specification.
+ - This parameter is case sensitive.
+ - If set, then overrides C(customization) parameter values.
+ version_added: '2.6'
+ datastore:
+ description:
+ - Specify datastore or datastore cluster to provision virtual machine.
+ - 'This parameter takes precedence over "disk.datastore" parameter.'
+ - 'This parameter can be used to override datastore or datastore cluster setting of the virtual machine when deployed
+ from the template.'
+ - Please see example for more usage.
+ version_added: '2.7'
+ convert:
+ description:
+ - Specify convert disk type while cloning template or virtual machine.
+ choices: [ thin, thick, eagerzeroedthick ]
+ version_added: '2.8'
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Create a virtual machine on given ESXi hostname
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ folder: /DC1/vm/
+ name: test_vm_0001
+ state: poweredon
+ guest_id: centos64Guest
+ # This is hostname of particular ESXi server on which user wants VM to be deployed
+ esxi_hostname: "{{ esxi_hostname }}"
+ disk:
+ - size_gb: 10
+ type: thin
+ datastore: datastore1
+ hardware:
+ memory_mb: 512
+ num_cpus: 4
+ scsi: paravirtual
+ networks:
+ - name: VM Network
+ mac: aa:bb:dd:aa:00:14
+ ip: 10.10.10.100
+ netmask: 255.255.255.0
+ device_type: vmxnet3
+ wait_for_ip_address: yes
+ wait_for_ip_address_timeout: 600
+ delegate_to: localhost
+ register: deploy_vm
+
+- name: Create a virtual machine from a template
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ folder: /testvms
+ name: testvm_2
+ state: poweredon
+ template: template_el7
+ disk:
+ - size_gb: 10
+ type: thin
+ datastore: g73_datastore
+ # Add another disk from an existing VMDK
+ - filename: "[datastore1] testvms/testvm_2_1/testvm_2_1.vmdk"
+ hardware:
+ memory_mb: 512
+ num_cpus: 6
+ num_cpu_cores_per_socket: 3
+ scsi: paravirtual
+ memory_reservation_lock: True
+ mem_limit: 8096
+ mem_reservation: 4096
+ cpu_limit: 8096
+ cpu_reservation: 4096
+ max_connections: 5
+ hotadd_cpu: True
+ hotremove_cpu: True
+ hotadd_memory: False
+ version: 12 # Hardware version of virtual machine
+ boot_firmware: "efi"
+ cdrom:
+ type: iso
+ iso_path: "[datastore1] livecd.iso"
+ networks:
+ - name: VM Network
+ mac: aa:bb:dd:aa:00:14
+ wait_for_ip_address: yes
+ delegate_to: localhost
+ register: deploy
+
+- name: Clone a virtual machine from Windows template and customize
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ datacenter: datacenter1
+ cluster: cluster
+ name: testvm-2
+ template: template_windows
+ networks:
+ - name: VM Network
+ ip: 192.168.1.100
+ netmask: 255.255.255.0
+ gateway: 192.168.1.1
+ mac: aa:bb:dd:aa:00:14
+ domain: my_domain
+ dns_servers:
+ - 192.168.1.1
+ - 192.168.1.2
+ - vlan: 1234
+ type: dhcp
+ customization:
+ autologon: yes
+ dns_servers:
+ - 192.168.1.1
+ - 192.168.1.2
+ domain: my_domain
+ password: new_vm_password
+ runonce:
+ - powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
+ delegate_to: localhost
+
+- name: Clone a virtual machine from Linux template and customize
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ datacenter: "{{ datacenter }}"
+ state: present
+ folder: /DC1/vm
+ template: "{{ template }}"
+ name: "{{ vm_name }}"
+ cluster: DC1_C1
+ networks:
+ - name: VM Network
+ ip: 192.168.10.11
+ netmask: 255.255.255.0
+ wait_for_ip_address: True
+ customization:
+ domain: "{{ guest_domain }}"
+ dns_servers:
+ - 8.9.9.9
+ - 7.8.8.9
+ dns_suffix:
+ - example.com
+ - example2.com
+ delegate_to: localhost
+
+- name: Rename a virtual machine (requires the virtual machine's uuid)
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ uuid: "{{ vm_uuid }}"
+ name: new_name
+ state: present
+ delegate_to: localhost
+
+- name: Remove a virtual machine by uuid
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ uuid: "{{ vm_uuid }}"
+ state: absent
+ delegate_to: localhost
+
+- name: Remove a virtual machine from inventory
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ name: vm_name
+ delete_from_inventory: True
+ state: absent
+ delegate_to: localhost
+
+- name: Manipulate vApp properties
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ name: vm_name
+ state: present
+ vapp_properties:
+ - id: remoteIP
+ category: Backup
+ label: Backup server IP
+ type: string
+ value: 10.10.10.1
+ - id: old_property
+ operation: remove
+ delegate_to: localhost
+
+- name: Set powerstate of a virtual machine to poweroff by using UUID
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ validate_certs: no
+ uuid: "{{ vm_uuid }}"
+ state: poweredoff
+ delegate_to: localhost
+
+- name: Deploy a virtual machine in a datastore different from the datastore of the template
+ vmware_guest:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ name: "{{ vm_name }}"
+ state: present
+ template: "{{ template_name }}"
+ # Here datastore can be different which holds template
+ datastore: "{{ virtual_machine_datastore }}"
+ hardware:
+ memory_mb: 512
+ num_cpus: 2
+ scsi: paravirtual
+ delegate_to: localhost
+
+- name: Create a diskless VM
+ vmware_guest:
+ validate_certs: False
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ datacenter: "{{ dc1 }}"
+ state: poweredoff
+ cluster: "{{ ccr1 }}"
+ name: diskless_vm
+ folder: /Asia-Datacenter1/vm
+ guest_id: centos64Guest
+ datastore: "{{ ds1 }}"
+ hardware:
+ memory_mb: 1024
+ num_cpus: 2
+ num_cpu_cores_per_socket: 1
+'''
+
+RETURN = r'''
+instance:
+ description: metadata about the new virtual machine
+ returned: always
+ type: dict
+ sample: None
+'''
+
+import re
+import time
+import string
+
+HAS_PYVMOMI = False
+try:
+ from pyVmomi import vim, vmodl, VmomiSupport
+ HAS_PYVMOMI = True
+except ImportError:
+ pass
+
+from random import randint
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+from ansible.module_utils._text import to_text, to_native
+from ansible.module_utils.vmware import (find_obj, gather_vm_facts, get_all_objs,
+ compile_folder_path_for_object, serialize_spec,
+ vmware_argument_spec, set_vm_power_state, PyVmomi,
+ find_dvs_by_name, find_dvspg_by_name, wait_for_vm_ip,
+ wait_for_task, TaskError, quote_obj_name)
+
+
+def list_or_dict(value):
+ if isinstance(value, list) or isinstance(value, dict):
+ return value
+ else:
+ raise ValueError("'%s' is not valid, valid type is 'list' or 'dict'." % value)
+
+
+class PyVmomiDeviceHelper(object):
+ """ This class is a helper to create easily VMware Objects for PyVmomiHelper """
+
+ def __init__(self, module):
+ self.module = module
+ self.next_disk_unit_number = 0
+ self.scsi_device_type = {
+ 'lsilogic': vim.vm.device.VirtualLsiLogicController,
+ 'paravirtual': vim.vm.device.ParaVirtualSCSIController,
+ 'buslogic': vim.vm.device.VirtualBusLogicController,
+ 'lsilogicsas': vim.vm.device.VirtualLsiLogicSASController,
+ }
+
+ def create_scsi_controller(self, scsi_type):
+ scsi_ctl = vim.vm.device.VirtualDeviceSpec()
+ scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ scsi_device = self.scsi_device_type.get(scsi_type, vim.vm.device.ParaVirtualSCSIController)
+ scsi_ctl.device = scsi_device()
+ scsi_ctl.device.busNumber = 0
+ # While creating a new SCSI controller, temporary key value
+ # should be unique negative integers
+ scsi_ctl.device.key = -randint(1000, 9999)
+ scsi_ctl.device.hotAddRemove = True
+ scsi_ctl.device.sharedBus = 'noSharing'
+ scsi_ctl.device.scsiCtlrUnitNumber = 7
+
+ return scsi_ctl
+
+ def is_scsi_controller(self, device):
+ return isinstance(device, tuple(self.scsi_device_type.values()))
+
+ @staticmethod
+ def create_ide_controller(bus_number=0):
+ ide_ctl = vim.vm.device.VirtualDeviceSpec()
+ ide_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ ide_ctl.device = vim.vm.device.VirtualIDEController()
+ ide_ctl.device.deviceInfo = vim.Description()
+ # While creating a new IDE controller, temporary key value
+ # should be unique negative integers
+ ide_ctl.device.key = -randint(200, 299)
+ ide_ctl.device.busNumber = bus_number
+
+ return ide_ctl
+
+ @staticmethod
+ def create_cdrom(ide_device, cdrom_type, iso_path=None, unit_number=0):
+ cdrom_spec = vim.vm.device.VirtualDeviceSpec()
+ cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ cdrom_spec.device = vim.vm.device.VirtualCdrom()
+ cdrom_spec.device.controllerKey = ide_device.key
+ cdrom_spec.device.key = -randint(3000, 3999)
+ cdrom_spec.device.unitNumber = unit_number
+ cdrom_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+ cdrom_spec.device.connectable.allowGuestControl = True
+ cdrom_spec.device.connectable.startConnected = (cdrom_type != "none")
+ if cdrom_type in ["none", "client"]:
+ cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
+ elif cdrom_type == "iso":
+ cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
+
+ return cdrom_spec
+
+ @staticmethod
+ def is_equal_cdrom(vm_obj, cdrom_device, cdrom_type, iso_path):
+ if cdrom_type == "none":
+ return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
+ cdrom_device.connectable.allowGuestControl and
+ not cdrom_device.connectable.startConnected and
+ (vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or not cdrom_device.connectable.connected))
+ elif cdrom_type == "client":
+ return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
+ cdrom_device.connectable.allowGuestControl and
+ cdrom_device.connectable.startConnected and
+ (vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
+ elif cdrom_type == "iso":
+ return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo) and
+ cdrom_device.backing.fileName == iso_path and
+ cdrom_device.connectable.allowGuestControl and
+ cdrom_device.connectable.startConnected and
+ (vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
+
+ @staticmethod
+ def update_cdrom_config(vm_obj, cdrom_spec, cdrom_device, iso_path=None):
+ # Updating an existing CD-ROM
+ if cdrom_spec["type"] in ["client", "none"]:
+ cdrom_device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
+ elif cdrom_spec["type"] == "iso" and iso_path is not None:
+ cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
+ cdrom_device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+ cdrom_device.connectable.allowGuestControl = True
+ cdrom_device.connectable.startConnected = (cdrom_spec["type"] != "none")
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ cdrom_device.connectable.connected = (cdrom_spec["type"] != "none")
+
+ def remove_cdrom(self, cdrom_device):
+ cdrom_spec = vim.vm.device.VirtualDeviceSpec()
+ cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
+ cdrom_spec.device = cdrom_device
+
+ return cdrom_spec
+
+ def create_scsi_disk(self, scsi_ctl, disk_index=None):
+ diskspec = vim.vm.device.VirtualDeviceSpec()
+ diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ diskspec.device = vim.vm.device.VirtualDisk()
+ diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+ diskspec.device.controllerKey = scsi_ctl.device.key
+
+ if self.next_disk_unit_number == 7:
+ raise AssertionError()
+ if disk_index == 7:
+ raise AssertionError()
+ """
+ Configure disk unit number.
+ """
+ if disk_index is not None:
+ diskspec.device.unitNumber = disk_index
+ self.next_disk_unit_number = disk_index + 1
+ else:
+ diskspec.device.unitNumber = self.next_disk_unit_number
+ self.next_disk_unit_number += 1
+
+ # unit number 7 is reserved to SCSI controller, increase next index
+ if self.next_disk_unit_number == 7:
+ self.next_disk_unit_number += 1
+
+ return diskspec
+
+ def get_device(self, device_type, name):
+ nic_dict = dict(pcnet32=vim.vm.device.VirtualPCNet32(),
+ vmxnet2=vim.vm.device.VirtualVmxnet2(),
+ vmxnet3=vim.vm.device.VirtualVmxnet3(),
+ e1000=vim.vm.device.VirtualE1000(),
+ e1000e=vim.vm.device.VirtualE1000e(),
+ sriov=vim.vm.device.VirtualSriovEthernetCard(),
+ )
+ if device_type in nic_dict:
+ return nic_dict[device_type]
+ else:
+ self.module.fail_json(msg='Invalid device_type "%s"'
+ ' for network "%s"' % (device_type, name))
+
+ def create_nic(self, device_type, device_label, device_infos):
+ nic = vim.vm.device.VirtualDeviceSpec()
+ nic.device = self.get_device(device_type, device_infos['name'])
+ nic.device.wakeOnLanEnabled = bool(device_infos.get('wake_on_lan', True))
+ nic.device.deviceInfo = vim.Description()
+ nic.device.deviceInfo.label = device_label
+ nic.device.deviceInfo.summary = device_infos['name']
+ nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+ nic.device.connectable.startConnected = bool(device_infos.get('start_connected', True))
+ nic.device.connectable.allowGuestControl = bool(device_infos.get('allow_guest_control', True))
+ nic.device.connectable.connected = True
+ if 'mac' in device_infos and is_mac(device_infos['mac']):
+ nic.device.addressType = 'manual'
+ nic.device.macAddress = device_infos['mac']
+ else:
+ nic.device.addressType = 'generated'
+
+ return nic
+
+ def integer_value(self, input_value, name):
+ """
+ Function to return int value for given input, else return error
+ Args:
+ input_value: Input value to retrieve int value from
+ name: Name of the Input value (used to build error message)
+ Returns: (int) if integer value can be obtained, otherwise will send a error message.
+ """
+ if isinstance(input_value, int):
+ return input_value
+ elif isinstance(input_value, str) and input_value.isdigit():
+ return int(input_value)
+ else:
+ self.module.fail_json(msg='"%s" attribute should be an'
+ ' integer value.' % name)
+
+
+class PyVmomiCache(object):
+ """ This class caches references to objects which are requested multiples times but not modified """
+
+ def __init__(self, content, dc_name=None):
+ self.content = content
+ self.dc_name = dc_name
+ self.networks = {}
+ self.clusters = {}
+ self.esx_hosts = {}
+ self.parent_datacenters = {}
+
+ def find_obj(self, content, types, name, confine_to_datacenter=True):
+ """ Wrapper around find_obj to set datacenter context """
+ result = find_obj(content, types, name)
+ if result and confine_to_datacenter:
+ if to_text(self.get_parent_datacenter(result).name) != to_text(self.dc_name):
+ result = None
+ objects = self.get_all_objs(content, types, confine_to_datacenter=True)
+ for obj in objects:
+ if name is None or to_text(obj.name) == to_text(name):
+ return obj
+ return result
+
+ def get_all_objs(self, content, types, confine_to_datacenter=True):
+ """ Wrapper around get_all_objs to set datacenter context """
+ objects = get_all_objs(content, types)
+ if confine_to_datacenter:
+ if hasattr(objects, 'items'):
+ # resource pools come back as a dictionary
+ # make a copy
+ for k, v in tuple(objects.items()):
+ parent_dc = self.get_parent_datacenter(k)
+ if parent_dc.name != self.dc_name:
+ del objects[k]
+ else:
+ # everything else should be a list
+ objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name]
+
+ return objects
+
+ def get_network(self, network):
+ network = quote_obj_name(network)
+
+ if network not in self.networks:
+ self.networks[network] = self.find_obj(self.content, [vim.Network], network)
+
+ return self.networks[network]
+
+ def get_cluster(self, cluster):
+ if cluster not in self.clusters:
+ self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster)
+
+ return self.clusters[cluster]
+
+ def get_esx_host(self, host):
+ if host not in self.esx_hosts:
+ self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host)
+
+ return self.esx_hosts[host]
+
+ def get_parent_datacenter(self, obj):
+ """ Walk the parent tree to find the objects datacenter """
+ if isinstance(obj, vim.Datacenter):
+ return obj
+ if obj in self.parent_datacenters:
+ return self.parent_datacenters[obj]
+ datacenter = None
+ while True:
+ if not hasattr(obj, 'parent'):
+ break
+ obj = obj.parent
+ if isinstance(obj, vim.Datacenter):
+ datacenter = obj
+ break
+ self.parent_datacenters[obj] = datacenter
+ return datacenter
+
+
+class PyVmomiHelper(PyVmomi):
+ def __init__(self, module):
+ super(PyVmomiHelper, self).__init__(module)
+ self.device_helper = PyVmomiDeviceHelper(self.module)
+ self.configspec = None
+ self.relospec = None
+ self.change_detected = False # a change was detected and needs to be applied through reconfiguration
+ self.change_applied = False # a change was applied meaning at least one task succeeded
+ self.customspec = None
+ self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
+
+ def gather_facts(self, vm):
+ return gather_vm_facts(self.content, vm)
+
+ def remove_vm(self, vm, delete_from_inventory=False):
+ # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
+ if vm.summary.runtime.powerState.lower() == 'poweredon':
+ self.module.fail_json(msg="Virtual machine %s found in 'powered on' state, "
+ "please use 'force' parameter to remove or poweroff VM "
+ "and try removing VM again." % vm.name)
+ # Delete VM from Inventory
+ if delete_from_inventory:
+ try:
+ vm.UnregisterVM()
+ except (vim.fault.TaskInProgress,
+ vmodl.RuntimeFault) as e:
+ return {'changed': self.change_applied, 'failed': True, 'msg': e.msg, 'op': 'UnregisterVM'}
+ self.change_applied = True
+ return {'changed': self.change_applied, 'failed': False}
+ # Delete VM from Disk
+ task = vm.Destroy()
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'destroy'}
+ else:
+ return {'changed': self.change_applied, 'failed': False}
+
+ def configure_guestid(self, vm_obj, vm_creation=False):
+ # guest_id is not required when using templates
+ if self.params['template']:
+ return
+
+ # guest_id is only mandatory on VM creation
+ if vm_creation and self.params['guest_id'] is None:
+ self.module.fail_json(msg="guest_id attribute is mandatory for VM creation")
+
+ if self.params['guest_id'] and \
+ (vm_obj is None or self.params['guest_id'].lower() != vm_obj.summary.config.guestId.lower()):
+ self.change_detected = True
+ self.configspec.guestId = self.params['guest_id']
+
+ def configure_resource_alloc_info(self, vm_obj):
+ """
+ Function to configure resource allocation information about virtual machine
+ :param vm_obj: VM object in case of reconfigure, None in case of deploy
+ :return: None
+ """
+ rai_change_detected = False
+ memory_allocation = vim.ResourceAllocationInfo()
+ cpu_allocation = vim.ResourceAllocationInfo()
+
+ if 'hardware' in self.params:
+ if 'mem_limit' in self.params['hardware']:
+ mem_limit = None
+ try:
+ mem_limit = int(self.params['hardware'].get('mem_limit'))
+ except ValueError:
+ self.module.fail_json(msg="hardware.mem_limit attribute should be an integer value.")
+ memory_allocation.limit = mem_limit
+ if vm_obj is None or memory_allocation.limit != vm_obj.config.memoryAllocation.limit:
+ rai_change_detected = True
+
+ if 'mem_reservation' in self.params['hardware'] or 'memory_reservation' in self.params['hardware']:
+ mem_reservation = self.params['hardware'].get('mem_reservation')
+ if mem_reservation is None:
+ mem_reservation = self.params['hardware'].get('memory_reservation')
+ try:
+ mem_reservation = int(mem_reservation)
+ except ValueError:
+ self.module.fail_json(msg="hardware.mem_reservation or hardware.memory_reservation should be an integer value.")
+
+ memory_allocation.reservation = mem_reservation
+ if vm_obj is None or \
+ memory_allocation.reservation != vm_obj.config.memoryAllocation.reservation:
+ rai_change_detected = True
+
+ if 'cpu_limit' in self.params['hardware']:
+ cpu_limit = None
+ try:
+ cpu_limit = int(self.params['hardware'].get('cpu_limit'))
+ except ValueError:
+ self.module.fail_json(msg="hardware.cpu_limit attribute should be an integer value.")
+ cpu_allocation.limit = cpu_limit
+ if vm_obj is None or cpu_allocation.limit != vm_obj.config.cpuAllocation.limit:
+ rai_change_detected = True
+
+ if 'cpu_reservation' in self.params['hardware']:
+ cpu_reservation = None
+ try:
+ cpu_reservation = int(self.params['hardware'].get('cpu_reservation'))
+ except ValueError:
+ self.module.fail_json(msg="hardware.cpu_reservation should be an integer value.")
+ cpu_allocation.reservation = cpu_reservation
+ if vm_obj is None or \
+ cpu_allocation.reservation != vm_obj.config.cpuAllocation.reservation:
+ rai_change_detected = True
+
+ if rai_change_detected:
+ self.configspec.memoryAllocation = memory_allocation
+ self.configspec.cpuAllocation = cpu_allocation
+ self.change_detected = True
+
+ def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
+ # set cpu/memory/etc
+ if 'hardware' in self.params:
+ if 'num_cpus' in self.params['hardware']:
+ try:
+ num_cpus = int(self.params['hardware']['num_cpus'])
+ except ValueError:
+ self.module.fail_json(msg="hardware.num_cpus attribute should be an integer value.")
+ # check VM power state and cpu hot-add/hot-remove state before re-config VM
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ if not vm_obj.config.cpuHotRemoveEnabled and num_cpus < vm_obj.config.hardware.numCPU:
+ self.module.fail_json(msg="Configured cpu number is less than the cpu number of the VM, "
+ "cpuHotRemove is not enabled")
+ if not vm_obj.config.cpuHotAddEnabled and num_cpus > vm_obj.config.hardware.numCPU:
+ self.module.fail_json(msg="Configured cpu number is more than the cpu number of the VM, "
+ "cpuHotAdd is not enabled")
+
+ if 'num_cpu_cores_per_socket' in self.params['hardware']:
+ try:
+ num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket'])
+ except ValueError:
+ self.module.fail_json(msg="hardware.num_cpu_cores_per_socket attribute "
+ "should be an integer value.")
+ if num_cpus % num_cpu_cores_per_socket != 0:
+ self.module.fail_json(msg="hardware.num_cpus attribute should be a multiple "
+ "of hardware.num_cpu_cores_per_socket")
+ self.configspec.numCoresPerSocket = num_cpu_cores_per_socket
+ if vm_obj is None or self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket:
+ self.change_detected = True
+ self.configspec.numCPUs = num_cpus
+ if vm_obj is None or self.configspec.numCPUs != vm_obj.config.hardware.numCPU:
+ self.change_detected = True
+ # num_cpu is mandatory for VM creation
+ elif vm_creation and not self.params['template']:
+ self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation")
+
+ if 'memory_mb' in self.params['hardware']:
+ try:
+ memory_mb = int(self.params['hardware']['memory_mb'])
+ except ValueError:
+ self.module.fail_json(msg="Failed to parse hardware.memory_mb value."
+ " Please refer the documentation and provide"
+ " correct value.")
+ # check VM power state and memory hotadd state before re-config VM
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ if vm_obj.config.memoryHotAddEnabled and memory_mb < vm_obj.config.hardware.memoryMB:
+ self.module.fail_json(msg="Configured memory is less than memory size of the VM, "
+ "operation is not supported")
+ elif not vm_obj.config.memoryHotAddEnabled and memory_mb != vm_obj.config.hardware.memoryMB:
+ self.module.fail_json(msg="memoryHotAdd is not enabled")
+ self.configspec.memoryMB = memory_mb
+ if vm_obj is None or self.configspec.memoryMB != vm_obj.config.hardware.memoryMB:
+ self.change_detected = True
+ # memory_mb is mandatory for VM creation
+ elif vm_creation and not self.params['template']:
+ self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation")
+
+ if 'hotadd_memory' in self.params['hardware']:
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
+ vm_obj.config.memoryHotAddEnabled != bool(self.params['hardware']['hotadd_memory']):
+ self.module.fail_json(msg="Configure hotadd memory operation is not supported when VM is power on")
+ self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory'])
+ if vm_obj is None or self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled:
+ self.change_detected = True
+
+ if 'hotadd_cpu' in self.params['hardware']:
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
+ vm_obj.config.cpuHotAddEnabled != bool(self.params['hardware']['hotadd_cpu']):
+ self.module.fail_json(msg="Configure hotadd cpu operation is not supported when VM is power on")
+ self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu'])
+ if vm_obj is None or self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled:
+ self.change_detected = True
+
+ if 'hotremove_cpu' in self.params['hardware']:
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
+ vm_obj.config.cpuHotRemoveEnabled != bool(self.params['hardware']['hotremove_cpu']):
+ self.module.fail_json(msg="Configure hotremove cpu operation is not supported when VM is power on")
+ self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu'])
+ if vm_obj is None or self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled:
+ self.change_detected = True
+
+ if 'memory_reservation_lock' in self.params['hardware']:
+ self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock'])
+ if vm_obj is None or self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax:
+ self.change_detected = True
+
+ if 'boot_firmware' in self.params['hardware']:
+ # boot firmware re-config can cause boot issue
+ if vm_obj is not None:
+ return
+ boot_firmware = self.params['hardware']['boot_firmware'].lower()
+ if boot_firmware not in ('bios', 'efi'):
+ self.module.fail_json(msg="hardware.boot_firmware value is invalid [%s]."
+ " Need one of ['bios', 'efi']." % boot_firmware)
+ self.configspec.firmware = boot_firmware
+ self.change_detected = True
+
+ def sanitize_cdrom_params(self):
+ # cdroms {'ide': [{num: 0, cdrom: []}, {}], 'sata': [{num: 0, cdrom: []}, {}, ...]}
+ cdroms = {'ide': [], 'sata': []}
+ expected_cdrom_spec = self.params.get('cdrom')
+ if expected_cdrom_spec:
+ for cdrom_spec in expected_cdrom_spec:
+ cdrom_spec['controller_type'] = cdrom_spec.get('controller_type', 'ide').lower()
+ if cdrom_spec['controller_type'] not in ['ide', 'sata']:
+ self.module.fail_json(msg="Invalid cdrom.controller_type: %s, valid value is 'ide' or 'sata'."
+ % cdrom_spec['controller_type'])
+
+ cdrom_spec['state'] = cdrom_spec.get('state', 'present').lower()
+ if cdrom_spec['state'] not in ['present', 'absent']:
+ self.module.fail_json(msg="Invalid cdrom.state: %s, valid value is 'present', 'absent'."
+ % cdrom_spec['state'])
+
+ if cdrom_spec['state'] == 'present':
+ if 'type' in cdrom_spec and cdrom_spec.get('type') not in ['none', 'client', 'iso']:
+ self.module.fail_json(msg="Invalid cdrom.type: %s, valid value is 'none', 'client' or 'iso'."
+ % cdrom_spec.get('type'))
+ if cdrom_spec.get('type') == 'iso' and not cdrom_spec.get('iso_path'):
+ self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.")
+
+ if cdrom_spec['controller_type'] == 'ide' and \
+ (cdrom_spec.get('controller_number') not in [0, 1] or cdrom_spec.get('unit_number') not in [0, 1]):
+ self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s, valid"
+ " values are 0 or 1 for IDE controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number')))
+
+ if cdrom_spec['controller_type'] == 'sata' and \
+ (cdrom_spec.get('controller_number') not in range(0, 4) or cdrom_spec.get('unit_number') not in range(0, 30)):
+ self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s,"
+ " valid controller_number value is 0-3, valid unit_number is 0-29"
+ " for SATA controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number')))
+
+ ctl_exist = False
+ for exist_spec in cdroms.get(cdrom_spec['controller_type']):
+ if exist_spec['num'] == cdrom_spec['controller_number']:
+ ctl_exist = True
+ exist_spec['cdrom'].append(cdrom_spec)
+ break
+ if not ctl_exist:
+ cdroms.get(cdrom_spec['controller_type']).append({'num': cdrom_spec['controller_number'], 'cdrom': [cdrom_spec]})
+
+ return cdroms
+
+ def configure_cdrom(self, vm_obj):
+ # Configure the VM CD-ROM
+ if self.params.get('cdrom'):
+ if vm_obj and vm_obj.config.template:
+ # Changing CD-ROM settings on a template is not supported
+ return
+
+ if isinstance(self.params.get('cdrom'), dict):
+ self.configure_cdrom_dict(vm_obj)
+ elif isinstance(self.params.get('cdrom'), list):
+ self.configure_cdrom_list(vm_obj)
+
+ def configure_cdrom_dict(self, vm_obj):
+ if self.params["cdrom"].get('type') not in ['none', 'client', 'iso']:
+ self.module.fail_json(msg="cdrom.type is mandatory. Options are 'none', 'client', and 'iso'.")
+ if self.params["cdrom"]['type'] == 'iso' and not self.params["cdrom"].get('iso_path'):
+ self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.")
+
+ cdrom_spec = None
+ cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj)
+ iso_path = self.params["cdrom"].get("iso_path")
+ if len(cdrom_devices) == 0:
+ # Creating new CD-ROM
+ ide_devices = self.get_vm_ide_devices(vm=vm_obj)
+ if len(ide_devices) == 0:
+ # Creating new IDE device
+ ide_ctl = self.device_helper.create_ide_controller()
+ ide_device = ide_ctl.device
+ self.change_detected = True
+ self.configspec.deviceChange.append(ide_ctl)
+ else:
+ ide_device = ide_devices[0]
+ if len(ide_device.device) > 3:
+ self.module.fail_json(msg="hardware.cdrom specified for a VM or template which already has 4"
+ " IDE devices of which none are a cdrom")
+
+ cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=self.params["cdrom"]["type"],
+ iso_path=iso_path)
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ cdrom_spec.device.connectable.connected = (self.params["cdrom"]["type"] != "none")
+
+ elif not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_devices[0],
+ cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path):
+ self.device_helper.update_cdrom_config(vm_obj, self.params["cdrom"], cdrom_devices[0], iso_path=iso_path)
+ cdrom_spec = vim.vm.device.VirtualDeviceSpec()
+ cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ cdrom_spec.device = cdrom_devices[0]
+
+ if cdrom_spec:
+ self.change_detected = True
+ self.configspec.deviceChange.append(cdrom_spec)
+
+ def configure_cdrom_list(self, vm_obj):
+ configured_cdroms = self.sanitize_cdrom_params()
+ cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj)
+ # configure IDE CD-ROMs
+ if configured_cdroms['ide']:
+ ide_devices = self.get_vm_ide_devices(vm=vm_obj)
+ for expected_cdrom_spec in configured_cdroms['ide']:
+ ide_device = None
+ for device in ide_devices:
+ if device.busNumber == expected_cdrom_spec['num']:
+ ide_device = device
+ break
+ # if not find the matched ide controller or no existing ide controller
+ if not ide_device:
+ ide_ctl = self.device_helper.create_ide_controller(bus_number=expected_cdrom_spec['num'])
+ ide_device = ide_ctl.device
+ self.change_detected = True
+ self.configspec.deviceChange.append(ide_ctl)
+
+ for cdrom in expected_cdrom_spec['cdrom']:
+ cdrom_device = None
+ iso_path = cdrom.get('iso_path')
+ unit_number = cdrom.get('unit_number')
+ for target_cdrom in cdrom_devices:
+ if target_cdrom.controllerKey == ide_device.key and target_cdrom.unitNumber == unit_number:
+ cdrom_device = target_cdrom
+ break
+ # create new CD-ROM
+ if not cdrom_device and cdrom.get('state') != 'absent':
+ if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+ self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-add.')
+ if len(ide_device.device) == 2:
+ self.module.fail_json(msg='Maximum number of CD-ROMs attached to IDE controller is 2.')
+ cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=cdrom['type'],
+ iso_path=iso_path, unit_number=unit_number)
+ self.change_detected = True
+ self.configspec.deviceChange.append(cdrom_spec)
+ # re-configure CD-ROM
+ elif cdrom_device and cdrom.get('state') != 'absent' and \
+ not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_device,
+ cdrom_type=cdrom['type'], iso_path=iso_path):
+ self.device_helper.update_cdrom_config(vm_obj, cdrom, cdrom_device, iso_path=iso_path)
+ cdrom_spec = vim.vm.device.VirtualDeviceSpec()
+ cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ cdrom_spec.device = cdrom_device
+ self.change_detected = True
+ self.configspec.deviceChange.append(cdrom_spec)
+ # delete CD-ROM
+ elif cdrom_device and cdrom.get('state') == 'absent':
+ if vm_obj and vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
+ self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-remove.')
+ cdrom_spec = self.device_helper.remove_cdrom(cdrom_device)
+ self.change_detected = True
+ self.configspec.deviceChange.append(cdrom_spec)
+ # configure SATA CD-ROMs is not supported yet
+ if configured_cdroms['sata']:
+ pass
+
+ def configure_hardware_params(self, vm_obj):
+ """
+ Function to configure hardware related configuration of virtual machine
+ Args:
+ vm_obj: virtual machine object
+ """
+ if 'hardware' in self.params:
+ if 'max_connections' in self.params['hardware']:
+ # maxMksConnections == max_connections
+ self.configspec.maxMksConnections = int(self.params['hardware']['max_connections'])
+ if vm_obj is None or self.configspec.maxMksConnections != vm_obj.config.maxMksConnections:
+ self.change_detected = True
+
+ if 'nested_virt' in self.params['hardware']:
+ self.configspec.nestedHVEnabled = bool(self.params['hardware']['nested_virt'])
+ if vm_obj is None or self.configspec.nestedHVEnabled != bool(vm_obj.config.nestedHVEnabled):
+ self.change_detected = True
+
+ if 'version' in self.params['hardware']:
+ hw_version_check_failed = False
+ temp_version = self.params['hardware'].get('version', 10)
+ if isinstance(temp_version, str) and temp_version.lower() == 'latest':
+ # Check is to make sure vm_obj is not of type template
+ if vm_obj and not vm_obj.config.template:
+ try:
+ task = vm_obj.UpgradeVM_Task()
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'}
+ except vim.fault.AlreadyUpgraded:
+ # Don't fail if VM is already upgraded.
+ pass
+ else:
+ try:
+ temp_version = int(temp_version)
+ except ValueError:
+ hw_version_check_failed = True
+
+ if temp_version not in range(3, 16):
+ hw_version_check_failed = True
+
+ if hw_version_check_failed:
+ self.module.fail_json(msg="Failed to set hardware.version '%s' value as valid"
+ " values range from 3 (ESX 2.x) to 14 (ESXi 6.5 and greater)." % temp_version)
+ # Hardware version is denoted as "vmx-10"
+ version = "vmx-%02d" % temp_version
+ self.configspec.version = version
+ if vm_obj is None or self.configspec.version != vm_obj.config.version:
+ self.change_detected = True
+ # Check is to make sure vm_obj is not of type template
+ if vm_obj and not vm_obj.config.template:
+ # VM exists and we need to update the hardware version
+ current_version = vm_obj.config.version
+ # current_version = "vmx-10"
+ version_digit = int(current_version.split("-", 1)[-1])
+ if temp_version < version_digit:
+ self.module.fail_json(msg="Current hardware version '%d' which is greater than the specified"
+ " version '%d'. Downgrading hardware version is"
+ " not supported. Please specify version greater"
+ " than the current version." % (version_digit,
+ temp_version))
+ new_version = "vmx-%02d" % temp_version
+ try:
+ task = vm_obj.UpgradeVM_Task(new_version)
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'}
+ except vim.fault.AlreadyUpgraded:
+ # Don't fail if VM is already upgraded.
+ pass
+
+ if 'virt_based_security' in self.params['hardware']:
+ host_version = self.select_host().summary.config.product.version
+ if int(host_version.split('.')[0]) < 6 or (int(host_version.split('.')[0]) == 6 and int(host_version.split('.')[1]) < 7):
+ self.module.fail_json(msg="ESXi version %s not support VBS." % host_version)
+ guest_ids = ['windows9_64Guest', 'windows9Server64Guest']
+ if vm_obj is None:
+ guestid = self.configspec.guestId
+ else:
+ guestid = vm_obj.summary.config.guestId
+ if guestid not in guest_ids:
+ self.module.fail_json(msg="Guest '%s' not support VBS." % guestid)
+ if (vm_obj is None and int(self.configspec.version.split('-')[1]) >= 14) or \
+ (vm_obj and int(vm_obj.config.version.split('-')[1]) >= 14 and (vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff)):
+ self.configspec.flags = vim.vm.FlagInfo()
+ self.configspec.flags.vbsEnabled = bool(self.params['hardware']['virt_based_security'])
+ if bool(self.params['hardware']['virt_based_security']):
+ self.configspec.flags.vvtdEnabled = True
+ self.configspec.nestedHVEnabled = True
+ if (vm_obj is None and self.configspec.firmware == 'efi') or \
+ (vm_obj and vm_obj.config.firmware == 'efi'):
+ self.configspec.bootOptions = vim.vm.BootOptions()
+ self.configspec.bootOptions.efiSecureBootEnabled = True
+ else:
+ self.module.fail_json(msg="Not support VBS when firmware is BIOS.")
+ if vm_obj is None or self.configspec.flags.vbsEnabled != vm_obj.config.flags.vbsEnabled:
+ self.change_detected = True
+
+ def get_device_by_type(self, vm=None, type=None):
+ device_list = []
+ if vm is None or type is None:
+ return device_list
+ for device in vm.config.hardware.device:
+ if isinstance(device, type):
+ device_list.append(device)
+
+ return device_list
+
+ def get_vm_cdrom_devices(self, vm=None):
+ return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualCdrom)
+
+ def get_vm_ide_devices(self, vm=None):
+ return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualIDEController)
+
+ def get_vm_network_interfaces(self, vm=None):
+ device_list = []
+ if vm is None:
+ return device_list
+
+ nw_device_types = (vim.vm.device.VirtualPCNet32, vim.vm.device.VirtualVmxnet2,
+ vim.vm.device.VirtualVmxnet3, vim.vm.device.VirtualE1000,
+ vim.vm.device.VirtualE1000e, vim.vm.device.VirtualSriovEthernetCard)
+ for device in vm.config.hardware.device:
+ if isinstance(device, nw_device_types):
+ device_list.append(device)
+
+ return device_list
+
+ def sanitize_network_params(self):
+ """
+ Sanitize user provided network provided params
+
+ Returns: A sanitized list of network params, else fails
+
+ """
+ network_devices = list()
+ # Clean up user data here
+ for network in self.params['networks']:
+ if 'name' not in network and 'vlan' not in network:
+ self.module.fail_json(msg="Please specify at least a network name or"
+ " a VLAN name under VM network list.")
+
+ if 'name' in network and self.cache.get_network(network['name']) is None:
+ self.module.fail_json(msg="Network '%(name)s' does not exist." % network)
+ elif 'vlan' in network:
+ dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
+ for dvp in dvps:
+ if hasattr(dvp.config.defaultPortConfig, 'vlan') and \
+ isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \
+ str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']):
+ network['name'] = dvp.config.name
+ break
+ if 'dvswitch_name' in network and \
+ dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \
+ dvp.config.name == network['vlan']:
+ network['name'] = dvp.config.name
+ break
+
+ if dvp.config.name == network['vlan']:
+ network['name'] = dvp.config.name
+ break
+ else:
+ self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network)
+
+ if 'type' in network:
+ if network['type'] not in ['dhcp', 'static']:
+ self.module.fail_json(msg="Network type '%(type)s' is not a valid parameter."
+ " Valid parameters are ['dhcp', 'static']." % network)
+ if network['type'] != 'static' and ('ip' in network or 'netmask' in network):
+ self.module.fail_json(msg='Static IP information provided for network "%(name)s",'
+ ' but "type" is set to "%(type)s".' % network)
+ else:
+ # Type is optional parameter, if user provided IP or Subnet assume
+ # network type as 'static'
+ if 'ip' in network or 'netmask' in network:
+ network['type'] = 'static'
+ else:
+ # User wants network type as 'dhcp'
+ network['type'] = 'dhcp'
+
+ if network.get('type') == 'static':
+ if 'ip' in network and 'netmask' not in network:
+ self.module.fail_json(msg="'netmask' is required if 'ip' is"
+ " specified under VM network list.")
+ if 'ip' not in network and 'netmask' in network:
+ self.module.fail_json(msg="'ip' is required if 'netmask' is"
+ " specified under VM network list.")
+
+ validate_device_types = ['pcnet32', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e', 'sriov']
+ if 'device_type' in network and network['device_type'] not in validate_device_types:
+ self.module.fail_json(msg="Device type specified '%s' is not valid."
+ " Please specify correct device"
+ " type from ['%s']." % (network['device_type'],
+ "', '".join(validate_device_types)))
+
+ if 'mac' in network and not is_mac(network['mac']):
+ self.module.fail_json(msg="Device MAC address '%s' is invalid."
+ " Please provide correct MAC address." % network['mac'])
+
+ network_devices.append(network)
+
+ return network_devices
+
+ def configure_network(self, vm_obj):
+ # Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM
+ if len(self.params['networks']) == 0:
+ return
+
+ network_devices = self.sanitize_network_params()
+
+ # List current device for Clone or Idempotency
+ current_net_devices = self.get_vm_network_interfaces(vm=vm_obj)
+ if len(network_devices) < len(current_net_devices):
+ self.module.fail_json(msg="Given network device list is lesser than current VM device list (%d < %d). "
+ "Removing interfaces is not allowed"
+ % (len(network_devices), len(current_net_devices)))
+
+ for key in range(0, len(network_devices)):
+ nic_change_detected = False
+ network_name = network_devices[key]['name']
+ if key < len(current_net_devices) and (vm_obj or self.params['template']):
+ # We are editing existing network devices, this is either when
+ # are cloning from VM or Template
+ nic = vim.vm.device.VirtualDeviceSpec()
+ nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+
+ nic.device = current_net_devices[key]
+ if ('wake_on_lan' in network_devices[key] and
+ nic.device.wakeOnLanEnabled != network_devices[key].get('wake_on_lan')):
+ nic.device.wakeOnLanEnabled = network_devices[key].get('wake_on_lan')
+ nic_change_detected = True
+ if ('start_connected' in network_devices[key] and
+ nic.device.connectable.startConnected != network_devices[key].get('start_connected')):
+ nic.device.connectable.startConnected = network_devices[key].get('start_connected')
+ nic_change_detected = True
+ if ('allow_guest_control' in network_devices[key] and
+ nic.device.connectable.allowGuestControl != network_devices[key].get('allow_guest_control')):
+ nic.device.connectable.allowGuestControl = network_devices[key].get('allow_guest_control')
+ nic_change_detected = True
+
+ if nic.device.deviceInfo.summary != network_name:
+ nic.device.deviceInfo.summary = network_name
+ nic_change_detected = True
+ if 'device_type' in network_devices[key]:
+ device = self.device_helper.get_device(network_devices[key]['device_type'], network_name)
+ device_class = type(device)
+ if not isinstance(nic.device, device_class):
+ self.module.fail_json(msg="Changing the device type is not possible when interface is already present. "
+ "The failing device type is %s" % network_devices[key]['device_type'])
+ # Changing mac address has no effect when editing interface
+ if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress:
+ self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. "
+ "The failing new MAC address is %s" % nic.device.macAddress)
+
+ else:
+ # Default device type is vmxnet3, VMware best practice
+ device_type = network_devices[key].get('device_type', 'vmxnet3')
+ nic = self.device_helper.create_nic(device_type,
+ 'Network Adapter %s' % (key + 1),
+ network_devices[key])
+ nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ nic_change_detected = True
+
+ if hasattr(self.cache.get_network(network_name), 'portKeys'):
+ # VDS switch
+
+ pg_obj = None
+ if 'dvswitch_name' in network_devices[key]:
+ dvs_name = network_devices[key]['dvswitch_name']
+ dvs_obj = find_dvs_by_name(self.content, dvs_name)
+ if dvs_obj is None:
+ self.module.fail_json(msg="Unable to find distributed virtual switch %s" % dvs_name)
+ pg_obj = find_dvspg_by_name(dvs_obj, network_name)
+ if pg_obj is None:
+ self.module.fail_json(msg="Unable to find distributed port group %s" % network_name)
+ else:
+ pg_obj = self.cache.find_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_name)
+
+ # TODO: (akasurde) There is no way to find association between resource pool and distributed virtual portgroup
+ # For now, check if we are able to find distributed virtual switch
+ if not pg_obj.config.distributedVirtualSwitch:
+ self.module.fail_json(msg="Failed to find distributed virtual switch which is associated with"
+ " distributed virtual portgroup '%s'. Make sure hostsystem is associated with"
+ " the given distributed virtual portgroup. Also, check if user has correct"
+ " permission to access distributed virtual switch in the given portgroup." % pg_obj.name)
+ if (nic.device.backing and
+ (not hasattr(nic.device.backing, 'port') or
+ (nic.device.backing.port.portgroupKey != pg_obj.key or
+ nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid))):
+ nic_change_detected = True
+
+ dvs_port_connection = vim.dvs.PortConnection()
+ dvs_port_connection.portgroupKey = pg_obj.key
+ # If user specifies distributed port group without associating to the hostsystem on which
+ # virtual machine is going to be deployed then we get error. We can infer that there is no
+ # association between given distributed port group and host system.
+ host_system = self.params.get('esxi_hostname')
+ if host_system and host_system not in [host.config.host.name for host in pg_obj.config.distributedVirtualSwitch.config.host]:
+ self.module.fail_json(msg="It seems that host system '%s' is not associated with distributed"
+ " virtual portgroup '%s'. Please make sure host system is associated"
+ " with given distributed virtual portgroup" % (host_system, pg_obj.name))
+ dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid
+ nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
+ nic.device.backing.port = dvs_port_connection
+
+ elif isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
+ # NSX-T Logical Switch
+ nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
+ network_id = self.cache.get_network(network_name).summary.opaqueNetworkId
+ nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch'
+ nic.device.backing.opaqueNetworkId = network_id
+ nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id
+ nic_change_detected = True
+ else:
+ # vSwitch
+ if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):
+ nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
+ nic_change_detected = True
+
+ net_obj = self.cache.get_network(network_name)
+ if nic.device.backing.network != net_obj:
+ nic.device.backing.network = net_obj
+ nic_change_detected = True
+
+ if nic.device.backing.deviceName != network_name:
+ nic.device.backing.deviceName = network_name
+ nic_change_detected = True
+
+ if nic_change_detected:
+ # Change to fix the issue found while configuring opaque network
+ # VMs cloned from a template with opaque network will get disconnected
+ # Replacing deprecated config parameter with relocation Spec
+ if isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
+ self.relospec.deviceChange.append(nic)
+ else:
+ self.configspec.deviceChange.append(nic)
+ self.change_detected = True
+
+ def configure_vapp_properties(self, vm_obj):
+ if len(self.params['vapp_properties']) == 0:
+ return
+
+ for x in self.params['vapp_properties']:
+ if not x.get('id'):
+ self.module.fail_json(msg="id is required to set vApp property")
+
+ new_vmconfig_spec = vim.vApp.VmConfigSpec()
+
+ if vm_obj:
+ # VM exists
+ # This is primarily for vcsim/integration tests, unset vAppConfig was not seen on my deployments
+ orig_spec = vm_obj.config.vAppConfig if vm_obj.config.vAppConfig else new_vmconfig_spec
+
+ vapp_properties_current = dict((x.id, x) for x in orig_spec.property)
+ vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
+
+ # each property must have a unique key
+ # init key counter with max value + 1
+ all_keys = [x.key for x in orig_spec.property]
+ new_property_index = max(all_keys) + 1 if all_keys else 0
+
+ for property_id, property_spec in vapp_properties_to_change.items():
+ is_property_changed = False
+ new_vapp_property_spec = vim.vApp.PropertySpec()
+
+ if property_id in vapp_properties_current:
+ if property_spec.get('operation') == 'remove':
+ new_vapp_property_spec.operation = 'remove'
+ new_vapp_property_spec.removeKey = vapp_properties_current[property_id].key
+ is_property_changed = True
+ else:
+ # this is 'edit' branch
+ new_vapp_property_spec.operation = 'edit'
+ new_vapp_property_spec.info = vapp_properties_current[property_id]
+ try:
+ for property_name, property_value in property_spec.items():
+
+ if property_name == 'operation':
+ # operation is not an info object property
+ # if set to anything other than 'remove' we don't fail
+ continue
+
+ # Updating attributes only if needed
+ if getattr(new_vapp_property_spec.info, property_name) != property_value:
+ setattr(new_vapp_property_spec.info, property_name, property_value)
+ is_property_changed = True
+
+ except Exception as e:
+ msg = "Failed to set vApp property field='%s' and value='%s'. Error: %s" % (property_name, property_value, to_text(e))
+ self.module.fail_json(msg=msg)
+ else:
+ if property_spec.get('operation') == 'remove':
+ # attempt to delete non-existent property
+ continue
+
+ # this is add new property branch
+ new_vapp_property_spec.operation = 'add'
+
+ property_info = vim.vApp.PropertyInfo()
+ property_info.classId = property_spec.get('classId')
+ property_info.instanceId = property_spec.get('instanceId')
+ property_info.id = property_spec.get('id')
+ property_info.category = property_spec.get('category')
+ property_info.label = property_spec.get('label')
+ property_info.type = property_spec.get('type', 'string')
+ property_info.userConfigurable = property_spec.get('userConfigurable', True)
+ property_info.defaultValue = property_spec.get('defaultValue')
+ property_info.value = property_spec.get('value', '')
+ property_info.description = property_spec.get('description')
+
+ new_vapp_property_spec.info = property_info
+ new_vapp_property_spec.info.key = new_property_index
+ new_property_index += 1
+ is_property_changed = True
+
+ if is_property_changed:
+ new_vmconfig_spec.property.append(new_vapp_property_spec)
+ else:
+ # New VM
+ all_keys = [x.key for x in new_vmconfig_spec.property]
+ new_property_index = max(all_keys) + 1 if all_keys else 0
+ vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
+ is_property_changed = False
+
+ for property_id, property_spec in vapp_properties_to_change.items():
+ new_vapp_property_spec = vim.vApp.PropertySpec()
+ # this is add new property branch
+ new_vapp_property_spec.operation = 'add'
+
+ property_info = vim.vApp.PropertyInfo()
+ property_info.classId = property_spec.get('classId')
+ property_info.instanceId = property_spec.get('instanceId')
+ property_info.id = property_spec.get('id')
+ property_info.category = property_spec.get('category')
+ property_info.label = property_spec.get('label')
+ property_info.type = property_spec.get('type', 'string')
+ property_info.userConfigurable = property_spec.get('userConfigurable', True)
+ property_info.defaultValue = property_spec.get('defaultValue')
+ property_info.value = property_spec.get('value', '')
+ property_info.description = property_spec.get('description')
+
+ new_vapp_property_spec.info = property_info
+ new_vapp_property_spec.info.key = new_property_index
+ new_property_index += 1
+ is_property_changed = True
+
+ if is_property_changed:
+ new_vmconfig_spec.property.append(new_vapp_property_spec)
+
+ if new_vmconfig_spec.property:
+ self.configspec.vAppConfig = new_vmconfig_spec
+ self.change_detected = True
+
+ def customize_customvalues(self, vm_obj, config_spec):
+ if len(self.params['customvalues']) == 0:
+ return
+
+ vm_custom_spec = config_spec
+ vm_custom_spec.extraConfig = []
+
+ changed = False
+ facts = self.gather_facts(vm_obj)
+ for kv in self.params['customvalues']:
+ if 'key' not in kv or 'value' not in kv:
+ self.module.exit_json(msg="customvalues items required both 'key' and 'value' fields.")
+
+ # If kv is not kv fetched from facts, change it
+ if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']:
+ option = vim.option.OptionValue()
+ option.key = kv['key']
+ option.value = kv['value']
+
+ vm_custom_spec.extraConfig.append(option)
+ changed = True
+
+ if changed:
+ self.change_detected = True
+
+ def customize_vm(self, vm_obj):
+
+ # User specified customization specification
+ custom_spec_name = self.params.get('customization_spec')
+ if custom_spec_name:
+ cc_mgr = self.content.customizationSpecManager
+ if cc_mgr.DoesCustomizationSpecExist(name=custom_spec_name):
+ temp_spec = cc_mgr.GetCustomizationSpec(name=custom_spec_name)
+ self.customspec = temp_spec.spec
+ return
+ else:
+ self.module.fail_json(msg="Unable to find customization specification"
+ " '%s' in given configuration." % custom_spec_name)
+
+ # Network settings
+ adaptermaps = []
+ for network in self.params['networks']:
+
+ guest_map = vim.vm.customization.AdapterMapping()
+ guest_map.adapter = vim.vm.customization.IPSettings()
+
+ if 'ip' in network and 'netmask' in network:
+ guest_map.adapter.ip = vim.vm.customization.FixedIp()
+ guest_map.adapter.ip.ipAddress = str(network['ip'])
+ guest_map.adapter.subnetMask = str(network['netmask'])
+ elif 'type' in network and network['type'] == 'dhcp':
+ guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()
+
+ if 'gateway' in network:
+ guest_map.adapter.gateway = network['gateway']
+
+ # On Windows, DNS domain and DNS servers can be set by network interface
+ # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html
+ if 'domain' in network:
+ guest_map.adapter.dnsDomain = network['domain']
+ elif 'domain' in self.params['customization']:
+ guest_map.adapter.dnsDomain = self.params['customization']['domain']
+
+ if 'dns_servers' in network:
+ guest_map.adapter.dnsServerList = network['dns_servers']
+ elif 'dns_servers' in self.params['customization']:
+ guest_map.adapter.dnsServerList = self.params['customization']['dns_servers']
+
+ adaptermaps.append(guest_map)
+
+ # Global DNS settings
+ globalip = vim.vm.customization.GlobalIPSettings()
+ if 'dns_servers' in self.params['customization']:
+ globalip.dnsServerList = self.params['customization']['dns_servers']
+
+ # TODO: Maybe list the different domains from the interfaces here by default ?
+ if 'dns_suffix' in self.params['customization']:
+ dns_suffix = self.params['customization']['dns_suffix']
+ if isinstance(dns_suffix, list):
+ globalip.dnsSuffixList = " ".join(dns_suffix)
+ else:
+ globalip.dnsSuffixList = dns_suffix
+ elif 'domain' in self.params['customization']:
+ globalip.dnsSuffixList = self.params['customization']['domain']
+
+ if self.params['guest_id']:
+ guest_id = self.params['guest_id']
+ else:
+ guest_id = vm_obj.summary.config.guestId
+
+ # For windows guest OS, use SysPrep
+ # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail
+ if 'win' in guest_id:
+ ident = vim.vm.customization.Sysprep()
+
+ ident.userData = vim.vm.customization.UserData()
+
+ # Setting hostName, orgName and fullName is mandatory, so we set some default when missing
+ ident.userData.computerName = vim.vm.customization.FixedName()
+ # computer name will be truncated to 15 characters if using VM name
+ default_name = self.params['name'].replace(' ', '')
+ punctuation = string.punctuation.replace('-', '')
+ default_name = ''.join([c for c in default_name if c not in punctuation])
+ ident.userData.computerName.name = str(self.params['customization'].get('hostname', default_name[0:15]))
+ ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator'))
+ ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME'))
+
+ if 'productid' in self.params['customization']:
+ ident.userData.productId = str(self.params['customization']['productid'])
+
+ ident.guiUnattended = vim.vm.customization.GuiUnattended()
+
+ if 'autologon' in self.params['customization']:
+ ident.guiUnattended.autoLogon = self.params['customization']['autologon']
+ ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1)
+
+ if 'timezone' in self.params['customization']:
+ # Check if timezone value is a int before proceeding.
+ ident.guiUnattended.timeZone = self.device_helper.integer_value(
+ self.params['customization']['timezone'],
+ 'customization.timezone')
+
+ ident.identification = vim.vm.customization.Identification()
+
+ if self.params['customization'].get('password', '') != '':
+ ident.guiUnattended.password = vim.vm.customization.Password()
+ ident.guiUnattended.password.value = str(self.params['customization']['password'])
+ ident.guiUnattended.password.plainText = True
+
+ if 'joindomain' in self.params['customization']:
+ if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']:
+ self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
+ "joindomain feature")
+
+ ident.identification.domainAdmin = str(self.params['customization']['domainadmin'])
+ ident.identification.joinDomain = str(self.params['customization']['joindomain'])
+ ident.identification.domainAdminPassword = vim.vm.customization.Password()
+ ident.identification.domainAdminPassword.value = str(self.params['customization']['domainadminpassword'])
+ ident.identification.domainAdminPassword.plainText = True
+
+ elif 'joinworkgroup' in self.params['customization']:
+ ident.identification.joinWorkgroup = str(self.params['customization']['joinworkgroup'])
+
+ if 'runonce' in self.params['customization']:
+ ident.guiRunOnce = vim.vm.customization.GuiRunOnce()
+ ident.guiRunOnce.commandList = self.params['customization']['runonce']
+
+ else:
+ # FIXME: We have no clue whether this non-Windows OS is actually Linux, hence it might fail!
+
+ # For Linux guest OS, use LinuxPrep
+ # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.LinuxPrep.html
+ ident = vim.vm.customization.LinuxPrep()
+
+ # TODO: Maybe add domain from interface if missing ?
+ if 'domain' in self.params['customization']:
+ ident.domain = str(self.params['customization']['domain'])
+
+ ident.hostName = vim.vm.customization.FixedName()
+ hostname = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0]))
+ # Remove all characters except alphanumeric and minus which is allowed by RFC 952
+ valid_hostname = re.sub(r"[^a-zA-Z0-9\-]", "", hostname)
+ ident.hostName.name = valid_hostname
+
+ # List of supported time zones for different vSphere versions in Linux/Unix systems
+ # https://kb.vmware.com/s/article/2145518
+ if 'timezone' in self.params['customization']:
+ ident.timeZone = str(self.params['customization']['timezone'])
+ if 'hwclockUTC' in self.params['customization']:
+ ident.hwClockUTC = self.params['customization']['hwclockUTC']
+
+ self.customspec = vim.vm.customization.Specification()
+ self.customspec.nicSettingMap = adaptermaps
+ self.customspec.globalIPSettings = globalip
+ self.customspec.identity = ident
+
+ def get_vm_scsi_controller(self, vm_obj):
+ # If vm_obj doesn't exist there is no SCSI controller to find
+ if vm_obj is None:
+ return None
+
+ for device in vm_obj.config.hardware.device:
+ if self.device_helper.is_scsi_controller(device):
+ scsi_ctl = vim.vm.device.VirtualDeviceSpec()
+ scsi_ctl.device = device
+ return scsi_ctl
+
+ return None
+
+ def get_configured_disk_size(self, expected_disk_spec):
+ # what size is it?
+ if [x for x in expected_disk_spec.keys() if x.startswith('size_') or x == 'size']:
+ # size, size_tb, size_gb, size_mb, size_kb
+ if 'size' in expected_disk_spec:
+ size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
+ disk_size_m = size_regex.match(expected_disk_spec['size'])
+ try:
+ if disk_size_m:
+ expected = disk_size_m.group(1)
+ unit = disk_size_m.group(2)
+ else:
+ raise ValueError
+
+ if re.match(r'\d+\.\d+', expected):
+ # We found float value in string, let's typecast it
+ expected = float(expected)
+ else:
+ # We found int value in string, let's typecast it
+ expected = int(expected)
+
+ if not expected or not unit:
+ raise ValueError
+
+ except (TypeError, ValueError, NameError):
+ # Common failure
+ self.module.fail_json(msg="Failed to parse disk size please review value"
+ " provided using documentation.")
+ else:
+ param = [x for x in expected_disk_spec.keys() if x.startswith('size_')][0]
+ unit = param.split('_')[-1].lower()
+ expected = [x[1] for x in expected_disk_spec.items() if x[0].startswith('size_')][0]
+ expected = int(expected)
+
+ disk_units = dict(tb=3, gb=2, mb=1, kb=0)
+ if unit in disk_units:
+ unit = unit.lower()
+ return expected * (1024 ** disk_units[unit])
+ else:
+ self.module.fail_json(msg="%s is not a supported unit for disk size."
+ " Supported units are ['%s']." % (unit,
+ "', '".join(disk_units.keys())))
+
+ # No size found but disk, fail
+ self.module.fail_json(
+ msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration")
+
+ def add_existing_vmdk(self, vm_obj, expected_disk_spec, diskspec, scsi_ctl):
+ """
+ Adds vmdk file described by expected_disk_spec['filename'], retrieves the file
+ information and adds the correct spec to self.configspec.deviceChange.
+ """
+ filename = expected_disk_spec['filename']
+ # If this is a new disk, or the disk file names are different
+ if (vm_obj and diskspec.device.backing.fileName != filename) or vm_obj is None:
+ diskspec.device.backing.fileName = filename
+ diskspec.device.key = -1
+ self.change_detected = True
+ self.configspec.deviceChange.append(diskspec)
+
+ def configure_disks(self, vm_obj):
+ # Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM
+ if len(self.params['disk']) == 0:
+ return
+
+ scsi_ctl = self.get_vm_scsi_controller(vm_obj)
+
+ # Create scsi controller only if we are deploying a new VM, not a template or reconfiguring
+ if vm_obj is None or scsi_ctl is None:
+ scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type())
+ self.change_detected = True
+ self.configspec.deviceChange.append(scsi_ctl)
+
+ disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \
+ if vm_obj is not None else None
+
+ if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks):
+ self.module.fail_json(msg="Provided disks configuration has less disks than "
+ "the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks)))
+
+ disk_index = 0
+ for expected_disk_spec in self.params.get('disk'):
+ disk_modified = False
+ # If we are manipulating and existing objects which has disks and disk_index is in disks
+ if vm_obj is not None and disks is not None and disk_index < len(disks):
+ diskspec = vim.vm.device.VirtualDeviceSpec()
+ # set the operation to edit so that it knows to keep other settings
+ diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ diskspec.device = disks[disk_index]
+ else:
+ diskspec = self.device_helper.create_scsi_disk(scsi_ctl, disk_index)
+ disk_modified = True
+
+ # increment index for next disk search
+ disk_index += 1
+ # index 7 is reserved to SCSI controller
+ if disk_index == 7:
+ disk_index += 1
+
+ if 'disk_mode' in expected_disk_spec:
+ disk_mode = expected_disk_spec.get('disk_mode', 'persistent').lower()
+ valid_disk_mode = ['persistent', 'independent_persistent', 'independent_nonpersistent']
+ if disk_mode not in valid_disk_mode:
+ self.module.fail_json(msg="disk_mode specified is not valid."
+ " Should be one of ['%s']" % "', '".join(valid_disk_mode))
+
+ if (vm_obj and diskspec.device.backing.diskMode != disk_mode) or (vm_obj is None):
+ diskspec.device.backing.diskMode = disk_mode
+ disk_modified = True
+ else:
+ diskspec.device.backing.diskMode = "persistent"
+
+ # is it thin?
+ if 'type' in expected_disk_spec:
+ disk_type = expected_disk_spec.get('type', '').lower()
+ if disk_type == 'thin':
+ diskspec.device.backing.thinProvisioned = True
+ elif disk_type == 'eagerzeroedthick':
+ diskspec.device.backing.eagerlyScrub = True
+
+ if 'filename' in expected_disk_spec and expected_disk_spec['filename'] is not None:
+ self.add_existing_vmdk(vm_obj, expected_disk_spec, diskspec, scsi_ctl)
+ continue
+ elif vm_obj is None or self.params['template']:
+ # We are creating new VM or from Template
+ # Only create virtual device if not backed by vmdk in original template
+ if diskspec.device.backing.fileName == '':
+ diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
+
+ # which datastore?
+ if expected_disk_spec.get('datastore'):
+ # TODO: This is already handled by the relocation spec,
+ # but it needs to eventually be handled for all the
+ # other disks defined
+ pass
+
+ kb = self.get_configured_disk_size(expected_disk_spec)
+ # VMware doesn't allow to reduce disk sizes
+ if kb < diskspec.device.capacityInKB:
+ self.module.fail_json(
+ msg="Given disk size is smaller than found (%d < %d). Reducing disks is not allowed." %
+ (kb, diskspec.device.capacityInKB))
+
+ if kb != diskspec.device.capacityInKB or disk_modified:
+ diskspec.device.capacityInKB = kb
+ self.configspec.deviceChange.append(diskspec)
+
+ self.change_detected = True
+
+ def select_host(self):
+ hostsystem = self.cache.get_esx_host(self.params['esxi_hostname'])
+ if not hostsystem:
+ self.module.fail_json(msg='Failed to find ESX host "%(esxi_hostname)s"' % self.params)
+ if hostsystem.runtime.connectionState != 'connected' or hostsystem.runtime.inMaintenanceMode:
+ self.module.fail_json(msg='ESXi "%(esxi_hostname)s" is in invalid state or in maintenance mode.' % self.params)
+ return hostsystem
+
+ def autoselect_datastore(self):
+ datastore = None
+ datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
+
+ if datastores is None or len(datastores) == 0:
+ self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
+
+ datastore_freespace = 0
+ for ds in datastores:
+ if not self.is_datastore_valid(datastore_obj=ds):
+ continue
+
+ if ds.summary.freeSpace > datastore_freespace:
+ datastore = ds
+ datastore_freespace = ds.summary.freeSpace
+
+ return datastore
+
+ def get_recommended_datastore(self, datastore_cluster_obj=None):
+ """
+ Function to return Storage DRS recommended datastore from datastore cluster
+ Args:
+ datastore_cluster_obj: datastore cluster managed object
+
+ Returns: Name of recommended datastore from the given datastore cluster
+
+ """
+ if datastore_cluster_obj is None:
+ return None
+ # Check if Datastore Cluster provided by user is SDRS ready
+ sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
+ if sdrs_status:
+ # We can get storage recommendation only if SDRS is enabled on given datastorage cluster
+ pod_sel_spec = vim.storageDrs.PodSelectionSpec()
+ pod_sel_spec.storagePod = datastore_cluster_obj
+ storage_spec = vim.storageDrs.StoragePlacementSpec()
+ storage_spec.podSelectionSpec = pod_sel_spec
+ storage_spec.type = 'create'
+
+ try:
+ rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
+ rec_action = rec.recommendations[0].action[0]
+ return rec_action.destination.name
+ except Exception:
+ # There is some error so we fall back to general workflow
+ pass
+ datastore = None
+ datastore_freespace = 0
+ for ds in datastore_cluster_obj.childEntity:
+ if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
+ # If datastore field is provided, filter destination datastores
+ if not self.is_datastore_valid(datastore_obj=ds):
+ continue
+
+ datastore = ds
+ datastore_freespace = ds.summary.freeSpace
+ if datastore:
+ return datastore.name
+ return None
+
+ def select_datastore(self, vm_obj=None):
+ datastore = None
+ datastore_name = None
+
+ if len(self.params['disk']) != 0:
+ # TODO: really use the datastore for newly created disks
+ if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']:
+ datastores = []
+
+ if self.params['cluster']:
+ cluster = self.find_cluster_by_name(self.params['cluster'], self.content)
+
+ for host in cluster.host:
+ for mi in host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo:
+ if mi.volume.type == "VMFS":
+ datastores.append(self.cache.find_obj(self.content, [vim.Datastore], mi.volume.name))
+ elif self.params['esxi_hostname']:
+ host = self.find_hostsystem_by_name(self.params['esxi_hostname'])
+
+ for mi in host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo:
+ if mi.volume.type == "VMFS":
+ datastores.append(self.cache.find_obj(self.content, [vim.Datastore], mi.volume.name))
+ else:
+ datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
+ datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']]
+
+ datastore_freespace = 0
+ for ds in datastores:
+ if not self.is_datastore_valid(datastore_obj=ds):
+ continue
+
+ if (ds.summary.freeSpace > datastore_freespace) or (ds.summary.freeSpace == datastore_freespace and not datastore):
+ # If datastore field is provided, filter destination datastores
+ if 'datastore' in self.params['disk'][0] and \
+ isinstance(self.params['disk'][0]['datastore'], str) and \
+ ds.name.find(self.params['disk'][0]['datastore']) < 0:
+ continue
+
+ datastore = ds
+ datastore_name = datastore.name
+ datastore_freespace = ds.summary.freeSpace
+
+ elif 'datastore' in self.params['disk'][0]:
+ datastore_name = self.params['disk'][0]['datastore']
+ # Check if user has provided datastore cluster first
+ datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
+ if datastore_cluster:
+ # If user specified datastore cluster so get recommended datastore
+ datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
+ # Check if get_recommended_datastore or user specified datastore exists or not
+ datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
+ else:
+ self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore")
+
+ if not datastore and self.params['template']:
+ # use the template's existing DS
+ disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
+ if disks:
+ datastore = disks[0].backing.datastore
+ datastore_name = datastore.name
+ # validation
+ if datastore:
+ dc = self.cache.get_parent_datacenter(datastore)
+ if dc.name != self.params['datacenter']:
+ datastore = self.autoselect_datastore()
+ datastore_name = datastore.name
+
+ if not datastore:
+ if len(self.params['disk']) != 0 or self.params['template'] is None:
+ self.module.fail_json(msg="Unable to find the datastore with given parameters."
+ " This could mean, %s is a non-existent virtual machine and module tried to"
+ " deploy it as new virtual machine with no disk. Please specify disks parameter"
+ " or specify template to clone from." % self.params['name'])
+ self.module.fail_json(msg="Failed to find a matching datastore")
+
+ return datastore, datastore_name
+
+ def obj_has_parent(self, obj, parent):
+ if obj is None and parent is None:
+ raise AssertionError()
+ current_parent = obj
+
+ while True:
+ if current_parent.name == parent.name:
+ return True
+
+ # Check if we have reached till root folder
+ moid = current_parent._moId
+ if moid in ['group-d1', 'ha-folder-root']:
+ return False
+
+ current_parent = current_parent.parent
+ if current_parent is None:
+ return False
+
+ def get_scsi_type(self):
+ disk_controller_type = "paravirtual"
+ # set cpu/memory/etc
+ if 'hardware' in self.params:
+ if 'scsi' in self.params['hardware']:
+ if self.params['hardware']['scsi'] in ['buslogic', 'paravirtual', 'lsilogic', 'lsilogicsas']:
+ disk_controller_type = self.params['hardware']['scsi']
+ else:
+ self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'")
+ return disk_controller_type
+
+ def find_folder(self, searchpath):
+ """ Walk inventory objects one position of the searchpath at a time """
+
+ # split the searchpath so we can iterate through it
+ paths = [x.replace('/', '') for x in searchpath.split('/')]
+ paths_total = len(paths) - 1
+ position = 0
+
+ # recursive walk while looking for next element in searchpath
+ root = self.content.rootFolder
+ while root and position <= paths_total:
+ change = False
+ if hasattr(root, 'childEntity'):
+ for child in root.childEntity:
+ if child.name == paths[position]:
+ root = child
+ position += 1
+ change = True
+ break
+ elif isinstance(root, vim.Datacenter):
+ if hasattr(root, 'vmFolder'):
+ if root.vmFolder.name == paths[position]:
+ root = root.vmFolder
+ position += 1
+ change = True
+ else:
+ root = None
+
+ if not change:
+ root = None
+
+ return root
+
+ def get_resource_pool(self, cluster=None, host=None, resource_pool=None):
+ """ Get a resource pool, filter on cluster, esxi_hostname or resource_pool if given """
+
+ cluster_name = cluster or self.params.get('cluster', None)
+ host_name = host or self.params.get('esxi_hostname', None)
+ resource_pool_name = resource_pool or self.params.get('resource_pool', None)
+
+ # get the datacenter object
+ datacenter = find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
+ if not datacenter:
+ self.module.fail_json(msg='Unable to find datacenter "%s"' % self.params['datacenter'])
+
+ # if cluster is given, get the cluster object
+ if cluster_name:
+ cluster = find_obj(self.content, [vim.ComputeResource], cluster_name, folder=datacenter)
+ if not cluster:
+ self.module.fail_json(msg='Unable to find cluster "%s"' % cluster_name)
+ # if host is given, get the cluster object using the host
+ elif host_name:
+ host = find_obj(self.content, [vim.HostSystem], host_name, folder=datacenter)
+ if not host:
+ self.module.fail_json(msg='Unable to find host "%s"' % host_name)
+ cluster = host.parent
+ else:
+ cluster = None
+
+ # get resource pools limiting search to cluster or datacenter
+ resource_pool = find_obj(self.content, [vim.ResourcePool], resource_pool_name, folder=cluster or datacenter)
+ if not resource_pool:
+ if resource_pool_name:
+ self.module.fail_json(msg='Unable to find resource_pool "%s"' % resource_pool_name)
+ else:
+ self.module.fail_json(msg='Unable to find resource pool, need esxi_hostname, resource_pool, or cluster')
+ return resource_pool
+
+ def deploy_vm(self):
+ # https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
+
+ # FIXME:
+ # - static IPs
+
+ self.folder = self.params.get('folder', None)
+ if self.folder is None:
+ self.module.fail_json(msg="Folder is required parameter while deploying new virtual machine")
+
+ # Prepend / if it was missing from the folder path, also strip trailing slashes
+ if not self.folder.startswith('/'):
+ self.folder = '/%(folder)s' % self.params
+ self.folder = self.folder.rstrip('/')
+
+ datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
+ if datacenter is None:
+ self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params)
+
+ dcpath = compile_folder_path_for_object(datacenter)
+
+ # Nested folder does not have trailing /
+ if not dcpath.endswith('/'):
+ dcpath += '/'
+
+ # Check for full path first in case it was already supplied
+ if (self.folder.startswith(dcpath + self.params['datacenter'] + '/vm') or
+ self.folder.startswith(dcpath + '/' + self.params['datacenter'] + '/vm')):
+ fullpath = self.folder
+ elif self.folder.startswith('/vm/') or self.folder == '/vm':
+ fullpath = "%s%s%s" % (dcpath, self.params['datacenter'], self.folder)
+ elif self.folder.startswith('/'):
+ fullpath = "%s%s/vm%s" % (dcpath, self.params['datacenter'], self.folder)
+ else:
+ fullpath = "%s%s/vm/%s" % (dcpath, self.params['datacenter'], self.folder)
+
+ f_obj = self.content.searchIndex.FindByInventoryPath(fullpath)
+
+ # abort if no strategy was successful
+ if f_obj is None:
+ # Add some debugging values in failure.
+ details = {
+ 'datacenter': datacenter.name,
+ 'datacenter_path': dcpath,
+ 'folder': self.folder,
+ 'full_search_path': fullpath,
+ }
+ self.module.fail_json(msg='No folder %s matched in the search path : %s' % (self.folder, fullpath),
+ details=details)
+
+ destfolder = f_obj
+
+ if self.params['template']:
+ vm_obj = self.get_vm_or_template(template_name=self.params['template'])
+ if vm_obj is None:
+ self.module.fail_json(msg="Could not find a template named %(template)s" % self.params)
+ else:
+ vm_obj = None
+
+ # always get a resource_pool
+ resource_pool = self.get_resource_pool()
+
+ # set the destination datastore for VM & disks
+ if self.params['datastore']:
+ # Give precedence to datastore value provided by user
+ # User may want to deploy VM to specific datastore.
+ datastore_name = self.params['datastore']
+ # Check if user has provided datastore cluster first
+ datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
+ if datastore_cluster:
+ # If user specified datastore cluster so get recommended datastore
+ datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
+ # Check if get_recommended_datastore or user specified datastore exists or not
+ datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
+ else:
+ (datastore, datastore_name) = self.select_datastore(vm_obj)
+
+ self.configspec = vim.vm.ConfigSpec()
+ self.configspec.deviceChange = []
+ # create the relocation spec
+ self.relospec = vim.vm.RelocateSpec()
+ self.relospec.deviceChange = []
+ self.configure_guestid(vm_obj=vm_obj, vm_creation=True)
+ self.configure_cpu_and_memory(vm_obj=vm_obj, vm_creation=True)
+ self.configure_hardware_params(vm_obj=vm_obj)
+ self.configure_resource_alloc_info(vm_obj=vm_obj)
+ self.configure_vapp_properties(vm_obj=vm_obj)
+ self.configure_disks(vm_obj=vm_obj)
+ self.configure_network(vm_obj=vm_obj)
+ self.configure_cdrom(vm_obj=vm_obj)
+
+ # Find if we need network customizations (find keys in dictionary that requires customizations)
+ network_changes = False
+ for nw in self.params['networks']:
+ for key in nw:
+ # We don't need customizations for these keys
+ if key == 'type' and nw['type'] == 'dhcp':
+ network_changes = True
+ break
+ if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected', 'dvswitch_name'):
+ network_changes = True
+ break
+
+ if len(self.params['customization']) > 0 or network_changes or self.params.get('customization_spec') is not None:
+ self.customize_vm(vm_obj=vm_obj)
+
+ clonespec = None
+ clone_method = None
+ try:
+ if self.params['template']:
+ # Only select specific host when ESXi hostname is provided
+ if self.params['esxi_hostname']:
+ self.relospec.host = self.select_host()
+ self.relospec.datastore = datastore
+
+ # Convert disk present in template if is set
+ if self.params['convert']:
+ for device in vm_obj.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualDisk):
+ disk_locator = vim.vm.RelocateSpec.DiskLocator()
+ disk_locator.diskBackingInfo = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+ if self.params['convert'] in ['thin']:
+ disk_locator.diskBackingInfo.thinProvisioned = True
+ if self.params['convert'] in ['eagerzeroedthick']:
+ disk_locator.diskBackingInfo.eagerlyScrub = True
+ if self.params['convert'] in ['thick']:
+ disk_locator.diskBackingInfo.diskMode = "persistent"
+ disk_locator.diskId = device.key
+ disk_locator.datastore = datastore
+ self.relospec.disk.append(disk_locator)
+
+ # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
+ # > pool: For a clone operation from a template to a virtual machine, this argument is required.
+ self.relospec.pool = resource_pool
+ linked_clone = self.params.get('linked_clone')
+ snapshot_src = self.params.get('snapshot_src', None)
+ if linked_clone:
+ if snapshot_src is not None:
+ self.relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking
+ else:
+ self.module.fail_json(msg="Parameter 'linked_src' and 'snapshot_src' are"
+ " required together for linked clone operation.")
+
+ clonespec = vim.vm.CloneSpec(template=self.params['is_template'], location=self.relospec)
+ if self.customspec:
+ clonespec.customization = self.customspec
+
+ if snapshot_src is not None:
+ if vm_obj.snapshot is None:
+ self.module.fail_json(msg="No snapshots present for virtual machine or template [%(template)s]" % self.params)
+ snapshot = self.get_snapshots_by_name_recursively(snapshots=vm_obj.snapshot.rootSnapshotList,
+ snapname=snapshot_src)
+ if len(snapshot) != 1:
+ self.module.fail_json(msg='virtual machine "%(template)s" does not contain'
+ ' snapshot named "%(snapshot_src)s"' % self.params)
+
+ clonespec.snapshot = snapshot[0].snapshot
+
+ clonespec.config = self.configspec
+ clone_method = 'Clone'
+ try:
+ task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
+ except vim.fault.NoPermission as e:
+ self.module.fail_json(msg="Failed to clone virtual machine %s to folder %s "
+ "due to permission issue: %s" % (self.params['name'],
+ destfolder,
+ to_native(e.msg)))
+ self.change_detected = True
+ else:
+ # ConfigSpec require name for VM creation
+ self.configspec.name = self.params['name']
+ self.configspec.files = vim.vm.FileInfo(logDirectory=None,
+ snapshotDirectory=None,
+ suspendDirectory=None,
+ vmPathName="[" + datastore_name + "]")
+
+ clone_method = 'CreateVM_Task'
+ try:
+ task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool)
+ except vmodl.fault.InvalidRequest as e:
+ self.module.fail_json(msg="Failed to create virtual machine due to invalid configuration "
+ "parameter %s" % to_native(e.msg))
+ except vim.fault.RestrictedVersion as e:
+ self.module.fail_json(msg="Failed to create virtual machine due to "
+ "product versioning restrictions: %s" % to_native(e.msg))
+ self.change_detected = True
+ self.wait_for_task(task)
+ except TypeError as e:
+ self.module.fail_json(msg="TypeError was returned, please ensure to give correct inputs. %s" % to_text(e))
+
+ if task.info.state == 'error':
+ # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
+ # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
+
+ # provide these to the user for debugging
+ clonespec_json = serialize_spec(clonespec)
+ configspec_json = serialize_spec(self.configspec)
+ kwargs = {
+ 'changed': self.change_applied,
+ 'failed': True,
+ 'msg': task.info.error.msg,
+ 'clonespec': clonespec_json,
+ 'configspec': configspec_json,
+ 'clone_method': clone_method
+ }
+
+ return kwargs
+ else:
+ # set annotation
+ vm = task.info.result
+ if self.params['annotation']:
+ annotation_spec = vim.vm.ConfigSpec()
+ annotation_spec.annotation = str(self.params['annotation'])
+ task = vm.ReconfigVM_Task(annotation_spec)
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'annotation'}
+
+ if self.params['customvalues']:
+ vm_custom_spec = vim.vm.ConfigSpec()
+ self.customize_customvalues(vm_obj=vm, config_spec=vm_custom_spec)
+ task = vm.ReconfigVM_Task(vm_custom_spec)
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customvalues'}
+
+ if self.params['wait_for_ip_address'] or self.params['wait_for_customization'] or self.params['state'] in ['poweredon', 'restarted']:
+ set_vm_power_state(self.content, vm, 'poweredon', force=False)
+
+ if self.params['wait_for_ip_address']:
+ wait_for_vm_ip(self.content, vm, self.params['wait_for_ip_address_timeout'])
+
+ if self.params['wait_for_customization']:
+ is_customization_ok = self.wait_for_customization(vm=vm, timeout=self.params['wait_for_customization_timeout'])
+ if not is_customization_ok:
+ vm_facts = self.gather_facts(vm)
+ return {'changed': self.change_applied, 'failed': True, 'instance': vm_facts, 'op': 'customization'}
+
+ vm_facts = self.gather_facts(vm)
+ return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
+
+ def get_snapshots_by_name_recursively(self, snapshots, snapname):
+ snap_obj = []
+ for snapshot in snapshots:
+ if snapshot.name == snapname:
+ snap_obj.append(snapshot)
+ else:
+ snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname)
+ return snap_obj
+
+ def reconfigure_vm(self):
+ self.configspec = vim.vm.ConfigSpec()
+ self.configspec.deviceChange = []
+ # create the relocation spec
+ self.relospec = vim.vm.RelocateSpec()
+ self.relospec.deviceChange = []
+ self.configure_guestid(vm_obj=self.current_vm_obj)
+ self.configure_cpu_and_memory(vm_obj=self.current_vm_obj)
+ self.configure_hardware_params(vm_obj=self.current_vm_obj)
+ self.configure_disks(vm_obj=self.current_vm_obj)
+ self.configure_network(vm_obj=self.current_vm_obj)
+ self.configure_cdrom(vm_obj=self.current_vm_obj)
+ self.customize_customvalues(vm_obj=self.current_vm_obj, config_spec=self.configspec)
+ self.configure_resource_alloc_info(vm_obj=self.current_vm_obj)
+ self.configure_vapp_properties(vm_obj=self.current_vm_obj)
+
+ if self.params['annotation'] and self.current_vm_obj.config.annotation != self.params['annotation']:
+ self.configspec.annotation = str(self.params['annotation'])
+ self.change_detected = True
+
+ if self.params['resource_pool']:
+ self.relospec.pool = self.get_resource_pool()
+
+ if self.relospec.pool != self.current_vm_obj.resourcePool:
+ task = self.current_vm_obj.RelocateVM_Task(spec=self.relospec)
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'relocate'}
+
+ # Only send VMware task if we see a modification
+ if self.change_detected:
+ task = None
+ try:
+ task = self.current_vm_obj.ReconfigVM_Task(spec=self.configspec)
+ except vim.fault.RestrictedVersion as e:
+ self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
+ " product versioning restrictions: %s" % to_native(e.msg))
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'reconfig'}
+
+ # Rename VM
+ if self.params['uuid'] and self.params['name'] and self.params['name'] != self.current_vm_obj.config.name:
+ task = self.current_vm_obj.Rename_Task(self.params['name'])
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'rename'}
+
+ # Mark VM as Template
+ if self.params['is_template'] and not self.current_vm_obj.config.template:
+ try:
+ self.current_vm_obj.MarkAsTemplate()
+ self.change_applied = True
+ except vmodl.fault.NotSupported as e:
+ self.module.fail_json(msg="Failed to mark virtual machine [%s] "
+ "as template: %s" % (self.params['name'], e.msg))
+
+ # Mark Template as VM
+ elif not self.params['is_template'] and self.current_vm_obj.config.template:
+ resource_pool = self.get_resource_pool()
+ kwargs = dict(pool=resource_pool)
+
+ if self.params.get('esxi_hostname', None):
+ host_system_obj = self.select_host()
+ kwargs.update(host=host_system_obj)
+
+ try:
+ self.current_vm_obj.MarkAsVirtualMachine(**kwargs)
+ self.change_applied = True
+ except vim.fault.InvalidState as invalid_state:
+ self.module.fail_json(msg="Virtual machine is not marked"
+ " as template : %s" % to_native(invalid_state.msg))
+ except vim.fault.InvalidDatastore as invalid_ds:
+ self.module.fail_json(msg="Converting template to virtual machine"
+ " operation cannot be performed on the"
+ " target datastores: %s" % to_native(invalid_ds.msg))
+ except vim.fault.CannotAccessVmComponent as cannot_access:
+ self.module.fail_json(msg="Failed to convert template to virtual machine"
+ " as operation unable access virtual machine"
+ " component: %s" % to_native(cannot_access.msg))
+ except vmodl.fault.InvalidArgument as invalid_argument:
+ self.module.fail_json(msg="Failed to convert template to virtual machine"
+ " due to : %s" % to_native(invalid_argument.msg))
+ except Exception as generic_exc:
+ self.module.fail_json(msg="Failed to convert template to virtual machine"
+ " due to generic error : %s" % to_native(generic_exc))
+
+ # Automatically update VMware UUID when converting template to VM.
+ # This avoids an interactive prompt during VM startup.
+ uuid_action = [x for x in self.current_vm_obj.config.extraConfig if x.key == "uuid.action"]
+ if not uuid_action:
+ uuid_action_opt = vim.option.OptionValue()
+ uuid_action_opt.key = "uuid.action"
+ uuid_action_opt.value = "create"
+ self.configspec.extraConfig.append(uuid_action_opt)
+
+ self.change_detected = True
+
+ # add customize existing VM after VM re-configure
+ if 'existing_vm' in self.params['customization'] and self.params['customization']['existing_vm']:
+ if self.current_vm_obj.config.template:
+ self.module.fail_json(msg="VM is template, not support guest OS customization.")
+ if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
+ self.module.fail_json(msg="VM is not in poweroff state, can not do guest OS customization.")
+ cus_result = self.customize_exist_vm()
+ if cus_result['failed']:
+ return cus_result
+
+ vm_facts = self.gather_facts(self.current_vm_obj)
+ return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
+
+ def customize_exist_vm(self):
+ task = None
+ # Find if we need network customizations (find keys in dictionary that requires customizations)
+ network_changes = False
+ for nw in self.params['networks']:
+ for key in nw:
+ # We don't need customizations for these keys
+ if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected', 'dvswitch_name'):
+ network_changes = True
+ break
+ if len(self.params['customization']) > 1 or network_changes or self.params.get('customization_spec'):
+ self.customize_vm(vm_obj=self.current_vm_obj)
+ try:
+ task = self.current_vm_obj.CustomizeVM_Task(self.customspec)
+ except vim.fault.CustomizationFault as e:
+ self.module.fail_json(msg="Failed to customization virtual machine due to CustomizationFault: %s" % to_native(e.msg))
+ except vim.fault.RuntimeFault as e:
+ self.module.fail_json(msg="failed to customization virtual machine due to RuntimeFault: %s" % to_native(e.msg))
+ except Exception as e:
+ self.module.fail_json(msg="failed to customization virtual machine due to fault: %s" % to_native(e.msg))
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customize_exist'}
+
+ if self.params['wait_for_customization']:
+ set_vm_power_state(self.content, self.current_vm_obj, 'poweredon', force=False)
+ is_customization_ok = self.wait_for_customization(vm=self.current_vm_obj, timeout=self.params['wait_for_customization_timeout'])
+ if not is_customization_ok:
+ return {'changed': self.change_applied, 'failed': True,
+ 'msg': 'Wait for customization failed due to timeout', 'op': 'wait_for_customize_exist'}
+
+ return {'changed': self.change_applied, 'failed': False}
+
+ def wait_for_task(self, task, poll_interval=1):
+ """
+ Wait for a VMware task to complete. Terminal states are 'error' and 'success'.
+
+ Inputs:
+ - task: the task to wait for
+ - poll_interval: polling interval to check the task, in seconds
+
+ Modifies:
+ - self.change_applied
+ """
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
+ # https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
+ while task.info.state not in ['error', 'success']:
+ time.sleep(poll_interval)
+ self.change_applied = self.change_applied or task.info.state == 'success'
+
+ def get_vm_events(self, vm, eventTypeIdList):
+ byEntity = vim.event.EventFilterSpec.ByEntity(entity=vm, recursion="self")
+ filterSpec = vim.event.EventFilterSpec(entity=byEntity, eventTypeId=eventTypeIdList)
+ eventManager = self.content.eventManager
+ return eventManager.QueryEvent(filterSpec)
+
+ def wait_for_customization(self, vm, timeout=3600, sleep=10):
+ poll = int(timeout // sleep)
+ thispoll = 0
+ while thispoll <= poll:
+ eventStarted = self.get_vm_events(vm, ['CustomizationStartedEvent'])
+ if len(eventStarted):
+ thispoll = 0
+ while thispoll <= poll:
+ eventsFinishedResult = self.get_vm_events(vm, ['CustomizationSucceeded', 'CustomizationFailed'])
+ if len(eventsFinishedResult):
+ if not isinstance(eventsFinishedResult[0], vim.event.CustomizationSucceeded):
+ self.module.warn("Customization failed with error {%s}:{%s}"
+ % (eventsFinishedResult[0]._wsdlName, eventsFinishedResult[0].fullFormattedMessage))
+ return False
+ else:
+ return True
+ else:
+ time.sleep(sleep)
+ thispoll += 1
+ if len(eventsFinishedResult) == 0:
+ self.module.warn('Waiting for customization result event timed out.')
+ return False
+ else:
+ time.sleep(sleep)
+ thispoll += 1
+ if len(eventStarted):
+ self.module.warn('Waiting for customization result event timed out.')
+ else:
+ self.module.warn('Waiting for customization start event timed out.')
+ return False
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['absent', 'poweredoff', 'poweredon', 'present', 'rebootguest', 'restarted', 'shutdownguest', 'suspended']),
+ template=dict(type='str', aliases=['template_src']),
+ is_template=dict(type='bool', default=False),
+ annotation=dict(type='str', aliases=['notes']),
+ customvalues=dict(type='list', default=[]),
+ name=dict(type='str'),
+ name_match=dict(type='str', choices=['first', 'last'], default='first'),
+ uuid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ guest_id=dict(type='str'),
+ disk=dict(type='list', default=[]),
+ cdrom=dict(type=list_or_dict, default=[]),
+ hardware=dict(type='dict', default={}),
+ force=dict(type='bool', default=False),
+ datacenter=dict(type='str', default='ha-datacenter'),
+ esxi_hostname=dict(type='str'),
+ cluster=dict(type='str'),
+ wait_for_ip_address=dict(type='bool', default=False),
+ wait_for_ip_address_timeout=dict(type='int', default=300),
+ state_change_timeout=dict(type='int', default=0),
+ snapshot_src=dict(type='str'),
+ linked_clone=dict(type='bool', default=False),
+ networks=dict(type='list', default=[]),
+ resource_pool=dict(type='str'),
+ customization=dict(type='dict', default={}, no_log=True),
+ customization_spec=dict(type='str', default=None),
+ wait_for_customization=dict(type='bool', default=False),
+ wait_for_customization_timeout=dict(type='int', default=3600),
+ vapp_properties=dict(type='list', default=[]),
+ datastore=dict(type='str'),
+ convert=dict(type='str', choices=['thin', 'thick', 'eagerzeroedthick']),
+ delete_from_inventory=dict(type='bool', default=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['cluster', 'esxi_hostname'],
+ ],
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ pyv = PyVmomiHelper(module)
+
+ # Check if the VM exists before continuing
+ vm = pyv.get_vm()
+
+ # VM already exists
+ if vm:
+ if module.params['state'] == 'absent':
+ # destroy it
+ if module.check_mode:
+ result.update(
+ vm_name=vm.name,
+ changed=True,
+ current_powerstate=vm.summary.runtime.powerState.lower(),
+ desired_operation='remove_vm',
+ )
+ module.exit_json(**result)
+ if module.params['force']:
+ # has to be poweredoff first
+ set_vm_power_state(pyv.content, vm, 'poweredoff', module.params['force'])
+ result = pyv.remove_vm(vm, module.params['delete_from_inventory'])
+ elif module.params['state'] == 'present':
+ if module.check_mode:
+ result.update(
+ vm_name=vm.name,
+ changed=True,
+ desired_operation='reconfigure_vm',
+ )
+ module.exit_json(**result)
+ result = pyv.reconfigure_vm()
+ elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted', 'suspended', 'shutdownguest', 'rebootguest']:
+ if module.check_mode:
+ result.update(
+ vm_name=vm.name,
+ changed=True,
+ current_powerstate=vm.summary.runtime.powerState.lower(),
+ desired_operation='set_vm_power_state',
+ )
+ module.exit_json(**result)
+ # set powerstate
+ tmp_result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'])
+ if tmp_result['changed']:
+ result["changed"] = True
+ if module.params['state'] in ['poweredon', 'restarted', 'rebootguest'] and module.params['wait_for_ip_address']:
+ wait_result = wait_for_vm_ip(pyv.content, vm, module.params['wait_for_ip_address_timeout'])
+ if not wait_result:
+ module.fail_json(msg='Waiting for IP address timed out')
+ tmp_result['instance'] = wait_result
+ if not tmp_result["failed"]:
+ result["failed"] = False
+ result['instance'] = tmp_result['instance']
+ if tmp_result["failed"]:
+ result["failed"] = True
+ result["msg"] = tmp_result["msg"]
+ else:
+ # This should not happen
+ raise AssertionError()
+ # VM doesn't exist
+ else:
+ if module.params['state'] in ['poweredon', 'poweredoff', 'present', 'restarted', 'suspended']:
+ if module.check_mode:
+ result.update(
+ changed=True,
+ desired_operation='deploy_vm',
+ )
+ module.exit_json(**result)
+ result = pyv.deploy_vm()
+ if result['failed']:
+ module.fail_json(msg='Failed to create a virtual machine : %s' % result['msg'])
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/vmware_guest_custom_attributes.py b/test/support/integration/plugins/modules/vmware_guest_custom_attributes.py
new file mode 100644
index 0000000000..e55a3ad754
--- /dev/null
+++ b/test/support/integration/plugins/modules/vmware_guest_custom_attributes.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright, (c) 2018, Ansible Project
+# Copyright, (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+
+DOCUMENTATION = '''
+---
+module: vmware_guest_custom_attributes
+short_description: Manage custom attributes from VMware for the given virtual machine
+description:
+ - This module can be used to add, remove and update custom attributes for the given virtual machine.
+version_added: 2.7
+author:
+ - Jimmy Conner (@cigamit)
+ - Abhijeet Kasurde (@Akasurde)
+notes:
+ - Tested on vSphere 6.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ name:
+ description:
+ - Name of the virtual machine to work with.
+ - This is required parameter, if C(uuid) or C(moid) is not supplied.
+ type: str
+ state:
+ description:
+ - The action to take.
+ - If set to C(present), then custom attribute is added or updated.
+ - If set to C(absent), then custom attribute is removed.
+ default: 'present'
+ choices: ['present', 'absent']
+ type: str
+ uuid:
+ description:
+ - UUID of the virtual machine to manage if known. This is VMware's unique identifier.
+ - This is required parameter, if C(name) or C(moid) is not supplied.
+ type: str
+ moid:
+ description:
+ - Managed Object ID of the instance to manage if known, this is a unique identifier only within a single vCenter instance.
+ - This is required if C(name) or C(uuid) is not supplied.
+ version_added: '2.9'
+ type: str
+ use_instance_uuid:
+ description:
+ - Whether to use the VMware instance UUID rather than the BIOS UUID.
+ default: no
+ type: bool
+ version_added: '2.8'
+ folder:
+ description:
+ - Absolute path to find an existing guest.
+ - This is required parameter, if C(name) is supplied and multiple virtual machines with same name are found.
+ type: str
+ datacenter:
+ description:
+ - Datacenter name where the virtual machine is located in.
+ required: True
+ type: str
+ attributes:
+ description:
+ - A list of name and value of custom attributes that needs to be manage.
+ - Value of custom attribute is not required and will be ignored, if C(state) is set to C(absent).
+ default: []
+ type: list
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+- name: Add virtual machine custom attributes
+ vmware_guest_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: present
+ attributes:
+ - name: MyAttribute
+ value: MyValue
+ delegate_to: localhost
+ register: attributes
+
+- name: Add multiple virtual machine custom attributes
+ vmware_guest_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: present
+ attributes:
+ - name: MyAttribute
+ value: MyValue
+ - name: MyAttribute2
+ value: MyValue2
+ delegate_to: localhost
+ register: attributes
+
+- name: Remove virtual machine Attribute
+ vmware_guest_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: absent
+ attributes:
+ - name: MyAttribute
+ delegate_to: localhost
+ register: attributes
+
+- name: Remove virtual machine Attribute using Virtual Machine MoID
+ vmware_guest_custom_attributes:
+ hostname: "{{ vcenter_hostname }}"
+ username: "{{ vcenter_username }}"
+ password: "{{ vcenter_password }}"
+ moid: vm-42
+ state: absent
+ attributes:
+ - name: MyAttribute
+ delegate_to: localhost
+ register: attributes
+'''
+
+RETURN = """
+custom_attributes:
+ description: metadata about the virtual machine attributes
+ returned: always
+ type: dict
+ sample: {
+ "mycustom": "my_custom_value",
+ "mycustom_2": "my_custom_value_2",
+ "sample_1": "sample_1_value",
+ "sample_2": "sample_2_value",
+ "sample_3": "sample_3_value"
+ }
+"""
+
+try:
+ from pyVmomi import vim
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
+
+
+class VmAttributeManager(PyVmomi):
+ def __init__(self, module):
+ super(VmAttributeManager, self).__init__(module)
+
+ def set_custom_field(self, vm, user_fields):
+ result_fields = dict()
+ change_list = list()
+ changed = False
+
+ for field in user_fields:
+ field_key = self.check_exists(field['name'])
+ found = False
+ field_value = field.get('value', '')
+
+ for k, v in [(x.name, v.value) for x in self.custom_field_mgr for v in vm.customValue if x.key == v.key]:
+ if k == field['name']:
+ found = True
+ if v != field_value:
+ if not self.module.check_mode:
+ self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value)
+ result_fields[k] = field_value
+ change_list.append(True)
+ if not found and field_value != "":
+ if not field_key and not self.module.check_mode:
+ field_key = self.content.customFieldsManager.AddFieldDefinition(name=field['name'], moType=vim.VirtualMachine)
+ change_list.append(True)
+ if not self.module.check_mode:
+ self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value)
+ result_fields[field['name']] = field_value
+
+ if any(change_list):
+ changed = True
+
+ return {'changed': changed, 'failed': False, 'custom_attributes': result_fields}
+
+ def check_exists(self, field):
+ for x in self.custom_field_mgr:
+ if x.name == field:
+ return x
+ return False
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ datacenter=dict(type='str'),
+ name=dict(type='str'),
+ folder=dict(type='str'),
+ uuid=dict(type='str'),
+ moid=dict(type='str'),
+ use_instance_uuid=dict(type='bool', default=False),
+ state=dict(type='str', default='present',
+ choices=['absent', 'present']),
+ attributes=dict(
+ type='list',
+ default=[],
+ options=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str'),
+ )
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid', 'moid']
+ ],
+ )
+
+ if module.params.get('folder'):
+ # FindByInventoryPath() does not require an absolute path
+ # so we should leave the input folder path unmodified
+ module.params['folder'] = module.params['folder'].rstrip('/')
+
+ pyv = VmAttributeManager(module)
+ results = {'changed': False, 'failed': False, 'instance': dict()}
+
+ # Check if the virtual machine exists before continuing
+ vm = pyv.get_vm()
+
+ if vm:
+ # virtual machine already exists
+ if module.params['state'] == "present":
+ results = pyv.set_custom_field(vm, module.params['attributes'])
+ elif module.params['state'] == "absent":
+ results = pyv.set_custom_field(vm, module.params['attributes'])
+ module.exit_json(**results)
+ else:
+ # virtual machine does not exists
+ vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid'))
+ module.fail_json(msg="Unable to manage custom attributes for non-existing"
+ " virtual machine %s" % vm_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/support/integration/plugins/modules/vmware_host_hyperthreading.py b/test/support/integration/plugins/modules/vmware_host_hyperthreading.py
new file mode 100644
index 0000000000..ad579e1e5e
--- /dev/null
+++ b/test/support/integration/plugins/modules/vmware_host_hyperthreading.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = r'''
+---
+module: vmware_host_hyperthreading
+short_description: Enables/Disables Hyperthreading optimization for an ESXi host system
+description:
+- This module can be used to enable or disable Hyperthreading optimization for ESXi host systems in given vCenter infrastructure.
+- It also checks if Hyperthreading is activated/deactivated and if the host needs to be restarted.
+- The module informs the user if Hyperthreading is enabled but inactive because the processor is vulnerable to L1 Terminal Fault (L1TF).
+version_added: 2.8
+author:
+- Christian Kotte (@ckotte)
+notes:
+- Tested on vSphere 6.5
+requirements:
+- python >= 2.6
+- PyVmomi
+options:
+ state:
+ description:
+ - Enable or disable Hyperthreading.
+ - You need to reboot the ESXi host if you change the configuration.
+ - Make sure that Hyperthreading is enabled in the BIOS. Otherwise, it will be enabled, but never activated.
+ type: str
+ choices: [ enabled, disabled ]
+ default: 'enabled'
+ esxi_hostname:
+ description:
+ - Name of the host system to work with.
+ - This parameter is required if C(cluster_name) is not specified.
+ type: str
+ cluster_name:
+ description:
+ - Name of the cluster from which all host systems will be used.
+ - This parameter is required if C(esxi_hostname) is not specified.
+ type: str
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = r'''
+- name: Enable Hyperthreading for an host system
+ vmware_host_hyperthreading:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: enabled
+ validate_certs: no
+ delegate_to: localhost
+
+- name: Disable Hyperthreading for an host system
+ vmware_host_hyperthreading:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ esxi_hostname: '{{ esxi_hostname }}'
+ state: disabled
+ validate_certs: no
+ delegate_to: localhost
+
+- name: Disable Hyperthreading for all host systems from cluster
+ vmware_host_hyperthreading:
+ hostname: '{{ vcenter_hostname }}'
+ username: '{{ vcenter_username }}'
+ password: '{{ vcenter_password }}'
+ cluster_name: '{{ cluster_name }}'
+ state: disabled
+ validate_certs: no
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+results:
+ description: metadata about host system's Hyperthreading configuration
+ returned: always
+ type: dict
+ sample: {
+ "esxi01": {
+ "msg": "Hyperthreading is already enabled and active for host 'esxi01'",
+ "state_current": "active",
+ "state": "enabled",
+ },
+ }
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VmwareHostHyperthreading(PyVmomi):
+ """Manage Hyperthreading for an ESXi host system"""
+ def __init__(self, module):
+ super(VmwareHostHyperthreading, self).__init__(module)
+ cluster_name = self.params.get('cluster_name')
+ esxi_host_name = self.params.get('esxi_hostname')
+ self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
+ if not self.hosts:
+ self.module.fail_json(msg="Failed to find host system.")
+
+ def ensure(self):
+ """Manage Hyperthreading for an ESXi host system"""
+ results = dict(changed=False, result=dict())
+ desired_state = self.params.get('state')
+ host_change_list = []
+ for host in self.hosts:
+ changed = False
+ results['result'][host.name] = dict(msg='')
+
+ hyperthreading_info = host.config.hyperThread
+
+ results['result'][host.name]['state'] = desired_state
+ if desired_state == 'enabled':
+ # Don't do anything if Hyperthreading is already enabled
+ if hyperthreading_info.config:
+ if hyperthreading_info.active:
+ results['result'][host.name]['changed'] = False
+ results['result'][host.name]['state_current'] = "active"
+ results['result'][host.name]['msg'] = "Hyperthreading is enabled and active"
+ if not hyperthreading_info.active:
+ # L1 Terminal Fault (L1TF)/Foreshadow mitigation workaround (https://kb.vmware.com/s/article/55806)
+ option_manager = host.configManager.advancedOption
+ try:
+ mitigation = option_manager.QueryOptions('VMkernel.Boot.hyperthreadingMitigation')
+ except vim.fault.InvalidName:
+ mitigation = None
+ if mitigation and mitigation[0].value:
+ results['result'][host.name]['changed'] = False
+ results['result'][host.name]['state_current'] = "enabled"
+ results['result'][host.name]['msg'] = ("Hyperthreading is enabled, but not active because the"
+ " processor is vulnerable to L1 Terminal Fault (L1TF).")
+ else:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_current'] = "enabled"
+ results['result'][host.name]['msg'] = ("Hyperthreading is enabled, but not active."
+ " A reboot is required!")
+ # Enable Hyperthreading
+ else:
+ # Check if Hyperthreading is available
+ if hyperthreading_info.available:
+ if not self.module.check_mode:
+ try:
+ host.configManager.cpuScheduler.EnableHyperThreading()
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_previous'] = "disabled"
+ results['result'][host.name]['state_current'] = "enabled"
+ results['result'][host.name]['msg'] = (
+ "Hyperthreading enabled for host. Reboot the host to activate it."
+ )
+ except vmodl.fault.NotSupported as not_supported:
+ # This should never happen since Hyperthreading is available
+ self.module.fail_json(
+ msg="Failed to enable Hyperthreading for host '%s' : %s" %
+ (host.name, to_native(not_supported.msg))
+ )
+ except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
+ self.module.fail_json(
+ msg="Failed to enable Hyperthreading for host '%s' due to : %s" %
+ (host.name, to_native(runtime_fault.msg))
+ )
+ else:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_previous'] = "disabled"
+ results['result'][host.name]['state_current'] = "enabled"
+ results['result'][host.name]['msg'] = "Hyperthreading will be enabled"
+ else:
+ self.module.fail_json(msg="Hyperthreading optimization is not available for host '%s'" % host.name)
+ elif desired_state == 'disabled':
+ # Don't do anything if Hyperthreading is already disabled
+ if not hyperthreading_info.config:
+ if not hyperthreading_info.active:
+ results['result'][host.name]['changed'] = False
+ results['result'][host.name]['state_current'] = "inactive"
+ results['result'][host.name]['msg'] = "Hyperthreading is disabled and inactive"
+ if hyperthreading_info.active:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_current'] = "disabled"
+ results['result'][host.name]['msg'] = ("Hyperthreading is already disabled"
+ " but still active. A reboot is required!")
+ # Disable Hyperthreading
+ else:
+ # Check if Hyperthreading is available
+ if hyperthreading_info.available:
+ if not self.module.check_mode:
+ try:
+ host.configManager.cpuScheduler.DisableHyperThreading()
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_previous'] = "enabled"
+ results['result'][host.name]['state_current'] = "disabled"
+ results['result'][host.name]['msg'] = (
+ "Hyperthreading disabled. Reboot the host to deactivate it."
+ )
+ except vmodl.fault.NotSupported as not_supported:
+ # This should never happen since Hyperthreading is available
+ self.module.fail_json(
+ msg="Failed to disable Hyperthreading for host '%s' : %s" %
+ (host.name, to_native(not_supported.msg))
+ )
+ except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault:
+ self.module.fail_json(
+ msg="Failed to disable Hyperthreading for host '%s' due to : %s" %
+ (host.name, to_native(runtime_fault.msg))
+ )
+ else:
+ changed = results['result'][host.name]['changed'] = True
+ results['result'][host.name]['state_previous'] = "enabled"
+ results['result'][host.name]['state_current'] = "disabled"
+ results['result'][host.name]['msg'] = "Hyperthreading will be disabled"
+ else:
+ self.module.fail_json(msg="Hyperthreading optimization is not available for host '%s'" % host.name)
+
+ host_change_list.append(changed)
+
+ if any(host_change_list):
+ results['changed'] = True
+ self.module.exit_json(**results)
+
+
+def main():
+ """Main"""
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ state=dict(default='enabled', choices=['enabled', 'disabled']),
+ esxi_hostname=dict(type='str', required=False),
+ cluster_name=dict(type='str', required=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_one_of=[
+ ['cluster_name', 'esxi_hostname'],
+ ],
+ supports_check_mode=True
+ )
+
+ hyperthreading = VmwareHostHyperthreading(module)
+ hyperthreading.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/utils/shippable/incidental/aws.sh b/test/utils/shippable/incidental/aws.sh
new file mode 120000
index 0000000000..700ad3edcf
--- /dev/null
+++ b/test/utils/shippable/incidental/aws.sh
@@ -0,0 +1 @@
+cloud.sh \ No newline at end of file
diff --git a/test/utils/shippable/incidental/azure.sh b/test/utils/shippable/incidental/azure.sh
new file mode 120000
index 0000000000..700ad3edcf
--- /dev/null
+++ b/test/utils/shippable/incidental/azure.sh
@@ -0,0 +1 @@
+cloud.sh \ No newline at end of file
diff --git a/test/utils/shippable/incidental/cloud.sh b/test/utils/shippable/incidental/cloud.sh
new file mode 100755
index 0000000000..fdf8668acf
--- /dev/null
+++ b/test/utils/shippable/incidental/cloud.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+cloud="${args[0]}"
+python_version="${args[1]}"
+
+target="shippable/${cloud}/incidental/"
+
+stage="${S:-prod}"
+
+# python versions to test in order
+# all versions run full tests
+python_versions=(
+ 2.7
+ 3.6
+)
+
+if [ "${python_version}" ]; then
+ # limit tests to a single python version
+ python_versions=("${python_version}")
+fi
+
+for python_version in "${python_versions[@]}"; do
+ # terminate remote instances on the final python version tested
+ if [ "${python_version}" = "${python_versions[-1]}" ]; then
+ terminate="always"
+ else
+ terminate="never"
+ fi
+
+ # shellcheck disable=SC2086
+ ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote-terminate "${terminate}" \
+ --remote-stage "${stage}" \
+ --enable-test-support \
+ --docker --python "${python_version}"
+done
diff --git a/test/utils/shippable/incidental/cs.sh b/test/utils/shippable/incidental/cs.sh
new file mode 120000
index 0000000000..700ad3edcf
--- /dev/null
+++ b/test/utils/shippable/incidental/cs.sh
@@ -0,0 +1 @@
+cloud.sh \ No newline at end of file
diff --git a/test/utils/shippable/incidental/hcloud.sh b/test/utils/shippable/incidental/hcloud.sh
new file mode 120000
index 0000000000..700ad3edcf
--- /dev/null
+++ b/test/utils/shippable/incidental/hcloud.sh
@@ -0,0 +1 @@
+cloud.sh \ No newline at end of file
diff --git a/test/utils/shippable/incidental/tower.sh b/test/utils/shippable/incidental/tower.sh
new file mode 120000
index 0000000000..700ad3edcf
--- /dev/null
+++ b/test/utils/shippable/incidental/tower.sh
@@ -0,0 +1 @@
+cloud.sh \ No newline at end of file
diff --git a/test/utils/shippable/incidental/vcenter.sh b/test/utils/shippable/incidental/vcenter.sh
new file mode 120000
index 0000000000..700ad3edcf
--- /dev/null
+++ b/test/utils/shippable/incidental/vcenter.sh
@@ -0,0 +1 @@
+cloud.sh \ No newline at end of file