summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnsible Core Team <info@ansible.com>2020-03-09 09:40:27 +0000
committerAnsible Core Team <info@ansible.com>2020-03-09 09:40:27 +0000
commitcb06e04e718f73173a6dcae1b082637063bae786 (patch)
treedeab34c907f665de2e033342ea8eec374f0775c8
parent5c9a9974c09c041a3bddca744af41fea7e51835f (diff)
downloadansible-cb06e04e718f73173a6dcae1b082637063bae786.tar.gz
Migrated to community.amazon
-rw-r--r--contrib/inventory/ec2.ini219
-rwxr-xr-xcontrib/inventory/ec2.py1712
l---------lib/ansible/modules/cloud/amazon/_aws_acm_facts.py1
l---------lib/ansible/modules/cloud/amazon/_aws_kms_facts.py1
l---------lib/ansible/modules/cloud/amazon/_aws_region_facts.py1
l---------lib/ansible/modules/cloud/amazon/_aws_s3_bucket_facts.py1
l---------lib/ansible/modules/cloud/amazon/_aws_sgw_facts.py1
l---------lib/ansible/modules/cloud/amazon/_aws_waf_facts.py1
l---------lib/ansible/modules/cloud/amazon/_cloudfront_facts.py1
l---------lib/ansible/modules/cloud/amazon/_cloudwatchlogs_log_group_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_asg_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_customer_gateway_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_eip_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_elb_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_instance_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_lc_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_placement_group_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_vpc_endpoint_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_vpc_igw_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_vpc_nacl_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_vpc_nat_gateway_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_vpc_peering_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_vpc_route_table_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_vpc_vgw_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ec2_vpc_vpn_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ecs_service_facts.py1
l---------lib/ansible/modules/cloud/amazon/_ecs_taskdefinition_facts.py1
l---------lib/ansible/modules/cloud/amazon/_efs_facts.py1
l---------lib/ansible/modules/cloud/amazon/_elasticache_facts.py1
l---------lib/ansible/modules/cloud/amazon/_elb_application_lb_facts.py1
l---------lib/ansible/modules/cloud/amazon/_elb_classic_lb_facts.py1
l---------lib/ansible/modules/cloud/amazon/_elb_target_facts.py1
l---------lib/ansible/modules/cloud/amazon/_elb_target_group_facts.py1
l---------lib/ansible/modules/cloud/amazon/_iam_cert_facts.py1
l---------lib/ansible/modules/cloud/amazon/_iam_mfa_device_facts.py1
l---------lib/ansible/modules/cloud/amazon/_iam_role_facts.py1
l---------lib/ansible/modules/cloud/amazon/_iam_server_certificate_facts.py1
-rw-r--r--lib/ansible/modules/cloud/amazon/_lambda_facts.py389
l---------lib/ansible/modules/cloud/amazon/_rds_instance_facts.py1
l---------lib/ansible/modules/cloud/amazon/_rds_snapshot_facts.py1
l---------lib/ansible/modules/cloud/amazon/_redshift_facts.py1
l---------lib/ansible/modules/cloud/amazon/_route53_facts.py1
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_acm.py397
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_acm_info.py299
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_api_gateway.py375
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_application_scaling_policy.py543
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_batch_compute_environment.py490
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_batch_job_definition.py459
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_batch_job_queue.py316
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_codebuild.py408
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_codecommit.py247
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_codepipeline.py320
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_config_aggregation_authorization.py163
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_config_aggregator.py232
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_config_delivery_channel.py219
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_config_recorder.py213
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_config_rule.py275
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_direct_connect_connection.py343
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_direct_connect_gateway.py374
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_direct_connect_link_aggregation_group.py470
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_direct_connect_virtual_interface.py500
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_eks_cluster.py307
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_elasticbeanstalk_app.py228
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_glue_connection.py337
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_glue_job.py373
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_inspector_target.py248
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_kms.py1072
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_kms_info.py433
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_region_info.py96
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_s3_bucket_info.py119
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_s3_cors.py168
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_secret.py404
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_ses_identity.py546
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_ses_identity_policy.py201
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_ses_rule_set.py254
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_sgw_info.py361
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py262
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_step_functions_state_machine.py232
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_step_functions_state_machine_execution.py197
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_waf_condition.py736
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_waf_info.py149
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_waf_rule.py355
-rw-r--r--lib/ansible/modules/cloud/amazon/aws_waf_web_acl.py359
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudformation_exports_info.py87
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudformation_stack_set.py724
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudfront_distribution.py2264
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudfront_info.py729
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudfront_invalidation.py276
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudfront_origin_access_identity.py280
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudtrail.py618
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py464
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group.py319
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group_info.py132
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group_metric_filter.py221
-rw-r--r--lib/ansible/modules/cloud/amazon/data_pipeline.py652
-rw-r--r--lib/ansible/modules/cloud/amazon/dms_endpoint.py472
-rw-r--r--lib/ansible/modules/cloud/amazon/dms_replication_subnet_group.py238
-rw-r--r--lib/ansible/modules/cloud/amazon/dynamodb_table.py522
-rw-r--r--lib/ansible/modules/cloud/amazon/dynamodb_ttl.py174
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_ami_copy.py226
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_asg.py1831
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_asg_info.py414
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_asg_lifecycle_hook.py253
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py276
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_customer_gateway_info.py137
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_eip.py649
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_eip_info.py145
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_elb.py374
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_elb_info.py271
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_instance.py1805
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_instance_info.py571
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_launch_template.py702
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_lc.py714
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_lc_find.py217
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_lc_info.py237
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py410
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_placement_group.py209
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_placement_group_info.py129
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py193
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_snapshot_copy.py201
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_transit_gateway.py578
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_transit_gateway_info.py268
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_egress_igw.py191
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint.py400
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint_info.py200
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_igw.py283
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_igw_info.py159
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py634
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_nacl_info.py222
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py1020
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway_info.py156
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py448
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_peering_info.py149
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py750
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_info.py134
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py581
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_info.py165
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_vpn.py783
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_vpn_info.py218
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_win_password.py208
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_attribute.py311
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_cluster.py233
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_ecr.py531
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_service.py850
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_service_info.py258
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_tag.py224
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_task.py450
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py528
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_taskdefinition_info.py334
-rw-r--r--lib/ansible/modules/cloud/amazon/efs.py758
-rw-r--r--lib/ansible/modules/cloud/amazon/efs_info.py401
-rw-r--r--lib/ansible/modules/cloud/amazon/elasticache.py562
-rw-r--r--lib/ansible/modules/cloud/amazon/elasticache_info.py310
-rw-r--r--lib/ansible/modules/cloud/amazon/elasticache_parameter_group.py356
-rw-r--r--lib/ansible/modules/cloud/amazon/elasticache_snapshot.py233
-rw-r--r--lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py149
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_application_lb.py659
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_application_lb_info.py292
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_classic_lb.py1365
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_classic_lb_info.py217
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_instance.py376
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_network_lb.py469
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_target.py354
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_target_group.py860
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_target_group_info.py329
-rw-r--r--lib/ansible/modules/cloud/amazon/elb_target_info.py439
-rw-r--r--lib/ansible/modules/cloud/amazon/execute_lambda.py286
-rw-r--r--lib/ansible/modules/cloud/amazon/iam.py873
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_cert.py315
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_group.py439
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_managed_policy.py384
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_mfa_device_info.py117
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_password_policy.py216
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_policy.py346
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_policy_info.py219
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_role.py673
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_role_info.py258
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_saml_federation.py249
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_server_certificate_info.py172
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_user.py370
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_user_info.py185
-rw-r--r--lib/ansible/modules/cloud/amazon/kinesis_stream.py1428
-rw-r--r--lib/ansible/modules/cloud/amazon/lambda.py628
-rw-r--r--lib/ansible/modules/cloud/amazon/lambda_alias.py389
-rw-r--r--lib/ansible/modules/cloud/amazon/lambda_event.py448
-rw-r--r--lib/ansible/modules/cloud/amazon/lambda_info.py380
-rw-r--r--lib/ansible/modules/cloud/amazon/lambda_policy.py439
-rw-r--r--lib/ansible/modules/cloud/amazon/lightsail.py340
-rw-r--r--lib/ansible/modules/cloud/amazon/rds.py1405
-rw-r--r--lib/ansible/modules/cloud/amazon/rds_instance.py1226
-rw-r--r--lib/ansible/modules/cloud/amazon/rds_instance_info.py407
-rw-r--r--lib/ansible/modules/cloud/amazon/rds_param_group.py356
-rw-r--r--lib/ansible/modules/cloud/amazon/rds_snapshot.py352
-rw-r--r--lib/ansible/modules/cloud/amazon/rds_snapshot_info.py396
-rw-r--r--lib/ansible/modules/cloud/amazon/rds_subnet_group.py202
-rw-r--r--lib/ansible/modules/cloud/amazon/redshift.py625
-rw-r--r--lib/ansible/modules/cloud/amazon/redshift_cross_region_snapshots.py205
-rw-r--r--lib/ansible/modules/cloud/amazon/redshift_info.py354
-rw-r--r--lib/ansible/modules/cloud/amazon/redshift_subnet_group.py182
-rw-r--r--lib/ansible/modules/cloud/amazon/route53.py721
-rw-r--r--lib/ansible/modules/cloud/amazon/route53_health_check.py375
-rw-r--r--lib/ansible/modules/cloud/amazon/route53_info.py499
-rw-r--r--lib/ansible/modules/cloud/amazon/route53_zone.py442
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_bucket_notification.py265
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_lifecycle.py520
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_logging.py178
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_sync.py567
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_website.py335
-rw-r--r--lib/ansible/modules/cloud/amazon/sns.py237
-rw-r--r--lib/ansible/modules/cloud/amazon/sns_topic.py529
-rw-r--r--lib/ansible/modules/cloud/amazon/sqs_queue.py481
-rw-r--r--lib/ansible/modules/cloud/amazon/sts_assume_role.py180
-rw-r--r--lib/ansible/modules/cloud/amazon/sts_session_token.py158
-rw-r--r--lib/ansible/plugins/connection/aws_ssm.py557
-rw-r--r--test/integration/targets/aws_acm/aliases4
-rw-r--r--test/integration/targets/aws_acm/defaults/main.yml40
-rw-r--r--test/integration/targets/aws_acm/meta/main.yml2
-rw-r--r--test/integration/targets/aws_acm/tasks/full_acm_test.yml482
-rw-r--r--test/integration/targets/aws_acm/tasks/main.yml39
-rw-r--r--test/integration/targets/aws_api_gateway/aliases2
-rw-r--r--test/integration/targets/aws_api_gateway/meta/main.yml3
-rw-r--r--test/integration/targets/aws_api_gateway/tasks/main.yml207
-rw-r--r--test/integration/targets/aws_api_gateway/templates/minimal-swagger-api.yml.j233
-rw-r--r--test/integration/targets/aws_codebuild/aliases2
-rw-r--r--test/integration/targets/aws_codebuild/defaults/main.yml10
-rw-r--r--test/integration/targets/aws_codebuild/files/codebuild_iam_trust_policy.json12
-rw-r--r--test/integration/targets/aws_codebuild/tasks/main.yml119
-rw-r--r--test/integration/targets/aws_codebuild/vars/main.yml0
-rw-r--r--test/integration/targets/aws_codecommit/aliases2
-rw-r--r--test/integration/targets/aws_codecommit/tasks/main.yml105
-rw-r--r--test/integration/targets/aws_codepipeline/aliases2
-rw-r--r--test/integration/targets/aws_codepipeline/defaults/main.yml12
-rw-r--r--test/integration/targets/aws_codepipeline/files/codepipeline_iam_trust_policy.json12
-rw-r--r--test/integration/targets/aws_codepipeline/tasks/main.yml156
-rw-r--r--test/integration/targets/aws_config/aliases8
-rw-r--r--test/integration/targets/aws_config/defaults/main.yaml4
-rw-r--r--test/integration/targets/aws_config/files/config-trust-policy.json13
-rw-r--r--test/integration/targets/aws_config/tasks/main.yaml405
-rw-r--r--test/integration/targets/aws_config/templates/config-s3-policy.json.j223
-rw-r--r--test/integration/targets/aws_eks_cluster/aliases2
-rw-r--r--test/integration/targets/aws_eks_cluster/defaults/main.yml33
-rw-r--r--test/integration/targets/aws_eks_cluster/files/eks-trust-policy.json12
-rw-r--r--test/integration/targets/aws_eks_cluster/meta/main.yml2
-rw-r--r--test/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.10.1.yml12
-rw-r--r--test/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.12.38.yml13
-rw-r--r--test/integration/targets/aws_eks_cluster/tasks/full_test.yml245
-rw-r--r--test/integration/targets/aws_eks_cluster/tasks/main.yml66
-rw-r--r--test/integration/targets/aws_elasticbeanstalk_app/aliases2
-rw-r--r--test/integration/targets/aws_elasticbeanstalk_app/defaults/main.yml5
-rw-r--r--test/integration/targets/aws_elasticbeanstalk_app/meta/main.yml3
-rw-r--r--test/integration/targets/aws_elasticbeanstalk_app/tasks/main.yml156
-rw-r--r--test/integration/targets/aws_glue_connection/aliases2
-rw-r--r--test/integration/targets/aws_glue_connection/tasks/main.yml87
-rw-r--r--test/integration/targets/aws_inspector_target/aliases2
-rw-r--r--test/integration/targets/aws_inspector_target/defaults/main.yml3
-rw-r--r--test/integration/targets/aws_inspector_target/tasks/main.yml96
-rw-r--r--test/integration/targets/aws_kms/aliases3
-rw-r--r--test/integration/targets/aws_kms/meta/main.yml3
-rw-r--r--test/integration/targets/aws_kms/tasks/main.yml371
-rw-r--r--test/integration/targets/aws_kms/templates/console-policy.j272
-rw-r--r--test/integration/targets/aws_lambda/aliases5
-rw-r--r--test/integration/targets/aws_lambda/defaults/main.yml3
-rw-r--r--test/integration/targets/aws_lambda/files/mini_lambda.py44
-rw-r--r--test/integration/targets/aws_lambda/meta/main.yml3
-rw-r--r--test/integration/targets/aws_lambda/tasks/main.yml515
-rw-r--r--test/integration/targets/aws_secret/aliases2
-rw-r--r--test/integration/targets/aws_secret/defaults/main.yaml5
-rw-r--r--test/integration/targets/aws_secret/files/hello_world.zipbin401 -> 0 bytes
-rw-r--r--test/integration/targets/aws_secret/files/secretsmanager-trust-policy.json19
-rw-r--r--test/integration/targets/aws_secret/tasks/main.yaml250
-rw-r--r--test/integration/targets/aws_ses_identity/aliases2
-rw-r--r--test/integration/targets/aws_ses_identity/defaults/main.yaml4
-rw-r--r--test/integration/targets/aws_ses_identity/meta/main.yaml0
-rw-r--r--test/integration/targets/aws_ses_identity/tasks/assert_defaults.yaml23
-rw-r--r--test/integration/targets/aws_ses_identity/tasks/main.yaml648
-rw-r--r--test/integration/targets/aws_ses_identity_policy/aliases2
-rw-r--r--test/integration/targets/aws_ses_identity_policy/defaults/main.yaml3
-rw-r--r--test/integration/targets/aws_ses_identity_policy/tasks/main.yaml334
-rw-r--r--test/integration/targets/aws_ses_identity_policy/templates/policy.json.j213
-rw-r--r--test/integration/targets/aws_ses_rule_set/aliases2
-rw-r--r--test/integration/targets/aws_ses_rule_set/defaults/main.yaml9
-rw-r--r--test/integration/targets/aws_ses_rule_set/tasks/active-rule-set-tests.yaml349
-rw-r--r--test/integration/targets/aws_ses_rule_set/tasks/cleanup-lock.yaml15
-rw-r--r--test/integration/targets/aws_ses_rule_set/tasks/inactive-rule-set-tests.yaml187
-rw-r--r--test/integration/targets/aws_ses_rule_set/tasks/main.yaml36
-rw-r--r--test/integration/targets/aws_ses_rule_set/tasks/obtain-lock-wrapper.yaml26
-rw-r--r--test/integration/targets/aws_ses_rule_set/tasks/obtain-lock.yaml126
-rw-r--r--test/integration/targets/aws_ssm_parameter_store/aliases2
-rw-r--r--test/integration/targets/aws_ssm_parameter_store/defaults/main.yml3
-rw-r--r--test/integration/targets/aws_ssm_parameter_store/tasks/main.yml131
-rw-r--r--test/integration/targets/aws_step_functions_state_machine/aliases3
-rw-r--r--test/integration/targets/aws_step_functions_state_machine/defaults/main.yml4
-rw-r--r--test/integration/targets/aws_step_functions_state_machine/files/alternative_state_machine.json15
-rw-r--r--test/integration/targets/aws_step_functions_state_machine/files/state_machine.json10
-rw-r--r--test/integration/targets/aws_step_functions_state_machine/files/state_machines_iam_trust_policy.json12
-rw-r--r--test/integration/targets/aws_step_functions_state_machine/tasks/main.yml300
-rw-r--r--test/integration/targets/aws_waf_web_acl/aliases6
-rw-r--r--test/integration/targets/aws_waf_web_acl/tasks/main.yml1199
-rw-r--r--test/integration/targets/cloudformation_exports_info/aliases2
-rw-r--r--test/integration/targets/cloudformation_exports_info/defaults/main.yml1
-rw-r--r--test/integration/targets/cloudformation_exports_info/files/test_stack.yml24
-rw-r--r--test/integration/targets/cloudformation_exports_info/tasks/main.yml39
-rw-r--r--test/integration/targets/cloudformation_stack_set/aliases2
-rw-r--r--test/integration/targets/cloudformation_stack_set/files/test_bucket_stack.yml6
-rw-r--r--test/integration/targets/cloudformation_stack_set/files/test_modded_bucket_stack.yml9
-rw-r--r--test/integration/targets/cloudformation_stack_set/playbooks/full_test.yml6
-rwxr-xr-xtest/integration/targets/cloudformation_stack_set/runme.sh8
-rw-r--r--test/integration/targets/cloudformation_stack_set/tasks/main.yml186
-rw-r--r--test/integration/targets/cloudfront_distribution/aliases2
-rw-r--r--test/integration/targets/cloudfront_distribution/defaults/main.yml49
-rw-r--r--test/integration/targets/cloudfront_distribution/meta/main.yml1
-rw-r--r--test/integration/targets/cloudfront_distribution/tasks/main.yml422
-rw-r--r--test/integration/targets/cloudtrail/aliases2
-rw-r--r--test/integration/targets/cloudtrail/defaults/main.yml7
-rw-r--r--test/integration/targets/cloudtrail/tasks/main.yml1423
-rw-r--r--test/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j213
-rw-r--r--test/integration/targets/cloudtrail/templates/cloudwatch-policy.j217
-rw-r--r--test/integration/targets/cloudtrail/templates/kms-policy.j234
-rw-r--r--test/integration/targets/cloudtrail/templates/s3-policy.j234
-rw-r--r--test/integration/targets/cloudtrail/templates/sns-policy.j234
-rw-r--r--test/integration/targets/cloudwatchlogs/aliases4
-rw-r--r--test/integration/targets/cloudwatchlogs/defaults/main.yml3
-rw-r--r--test/integration/targets/cloudwatchlogs/tasks/main.yml157
-rw-r--r--test/integration/targets/connection_aws_ssm/aliases7
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup.yml3
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/README.md43
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/defaults/main.yml4
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/files/ec2-trust-policy.json13
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/debian.yml25
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/main.yml156
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/redhat.yml13
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/aws-env-vars.j24
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_linux_vars_to_delete.yml.j22
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_windows_vars_to_delete.yml.j22
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/iam_role_vars_to_delete.yml.j22
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-linux.aws_ssm.j212
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-windows.aws_ssm.j212
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/s3_vars_to_delete.yml.j22
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown.yml3
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/README.md43
-rw-r--r--test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/tasks/main.yml85
-rw-r--r--test/integration/targets/connection_aws_ssm/inventory.aws_ssm.template10
-rwxr-xr-xtest/integration/targets/connection_aws_ssm/runme.sh42
-rw-r--r--test/integration/targets/dms_endpoint/aliases2
-rw-r--r--test/integration/targets/dms_endpoint/tasks/main.yml136
-rw-r--r--test/integration/targets/dms_replication_subnet_group/aliases2
-rw-r--r--test/integration/targets/dms_replication_subnet_group/defaults/main.yml2
-rw-r--r--test/integration/targets/dms_replication_subnet_group/files/dmsAssumeRolePolicyDocument.json12
-rw-r--r--test/integration/targets/dms_replication_subnet_group/tasks/main.yml175
-rw-r--r--test/integration/targets/ec2_asg/aliases2
-rw-r--r--test/integration/targets/ec2_asg/defaults/main.yml4
-rw-r--r--test/integration/targets/ec2_asg/tasks/main.yml782
-rw-r--r--test/integration/targets/ec2_asg/vars/main.yml0
-rw-r--r--test/integration/targets/ec2_eip/aliases2
-rw-r--r--test/integration/targets/ec2_eip/defaults/main.yml5
-rw-r--r--test/integration/targets/ec2_eip/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_eip/tasks/main.yml767
-rw-r--r--test/integration/targets/ec2_instance/aliases3
-rw-r--r--test/integration/targets/ec2_instance/inventory17
-rw-r--r--test/integration/targets/ec2_instance/main.yml43
-rw-r--r--test/integration/targets/ec2_instance/meta/main.yml4
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/defaults/main.yml14
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/files/assume-role-policy.json13
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/block_devices.yml82
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/checkmode_tests.yml172
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/cpu_options.yml86
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/default_vpc_tests.yml57
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/ebs_optimized.yml41
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_cleanup.yml93
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_setup.yml79
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/external_resource_attach.yml129
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/find_ami.yml15
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/iam_instance_role.yml127
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/instance_no_wait.yml68
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/main.yml48
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/tags_and_vpc_settings.yml158
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/termination_protection.yml184
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail.yml29
-rw-r--r--test/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail_wrapper.yml30
-rwxr-xr-xtest/integration/targets/ec2_instance/runme.sh12
-rw-r--r--test/integration/targets/ec2_launch_template/aliases2
-rw-r--r--test/integration/targets/ec2_launch_template/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_launch_template/playbooks/full_test.yml5
-rw-r--r--test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/defaults/main.yml18
-rw-r--r--test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/files/assume-role-policy.json13
-rw-r--r--test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/cpu_options.yml38
-rw-r--r--test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/iam_instance_role.yml104
-rw-r--r--test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/main.yml24
-rw-r--r--test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/tags_and_vpc_settings.yml208
-rw-r--r--test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/versions.yml62
-rw-r--r--test/integration/targets/ec2_launch_template/playbooks/version_fail.yml36
-rwxr-xr-xtest/integration/targets/ec2_launch_template/runme.sh15
-rw-r--r--test/integration/targets/ec2_metric_alarm/aliases2
-rw-r--r--test/integration/targets/ec2_metric_alarm/defaults/main.yml6
-rw-r--r--test/integration/targets/ec2_metric_alarm/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_metric_alarm/tasks/env_cleanup.yml94
-rw-r--r--test/integration/targets/ec2_metric_alarm/tasks/env_setup.yml62
-rw-r--r--test/integration/targets/ec2_metric_alarm/tasks/main.yml228
-rw-r--r--test/integration/targets/ec2_metric_alarm/vars/main.yml1
-rw-r--r--test/integration/targets/ec2_transit_gateway/aliases3
-rw-r--r--test/integration/targets/ec2_transit_gateway/tasks/main.yml175
-rw-r--r--test/integration/targets/ec2_vpc_egress_igw/aliases2
-rw-r--r--test/integration/targets/ec2_vpc_egress_igw/tasks/main.yml112
-rw-r--r--test/integration/targets/ec2_vpc_igw/aliases2
-rw-r--r--test/integration/targets/ec2_vpc_igw/tasks/main.yml84
-rw-r--r--test/integration/targets/ec2_vpc_nacl/aliases3
-rw-r--r--test/integration/targets/ec2_vpc_nacl/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_vpc_nacl/tasks/ingress_and_egress.yml162
-rw-r--r--test/integration/targets/ec2_vpc_nacl/tasks/ipv6.yml178
-rw-r--r--test/integration/targets/ec2_vpc_nacl/tasks/main.yml170
-rw-r--r--test/integration/targets/ec2_vpc_nacl/tasks/subnet_ids.yml174
-rw-r--r--test/integration/targets/ec2_vpc_nacl/tasks/subnet_names.yml140
-rw-r--r--test/integration/targets/ec2_vpc_nacl/tasks/tags.yml117
-rw-r--r--test/integration/targets/ec2_vpc_nat_gateway/aliases2
-rw-r--r--test/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml82
-rw-r--r--test/integration/targets/ec2_vpc_route_table/aliases4
-rw-r--r--test/integration/targets/ec2_vpc_route_table/meta/main.yml3
-rw-r--r--test/integration/targets/ec2_vpc_route_table/tasks/main.yml757
-rw-r--r--test/integration/targets/ec2_vpc_vgw/aliases2
-rw-r--r--test/integration/targets/ec2_vpc_vgw/tasks/main.yml171
-rw-r--r--test/integration/targets/ec2_vpc_vpn_info/aliases2
-rw-r--r--test/integration/targets/ec2_vpc_vpn_info/tasks/main.yml124
-rw-r--r--test/integration/targets/ecs_cluster/aliases6
-rw-r--r--test/integration/targets/ecs_cluster/defaults/main.yml38
-rw-r--r--test/integration/targets/ecs_cluster/files/ec2-trust-policy.json13
-rw-r--r--test/integration/targets/ecs_cluster/files/ecs-trust-policy.json16
-rw-r--r--test/integration/targets/ecs_cluster/meta/main.yml2
-rw-r--r--test/integration/targets/ecs_cluster/tasks/full_test.yml1169
-rw-r--r--test/integration/targets/ecs_cluster/tasks/main.yml53
-rw-r--r--test/integration/targets/ecs_cluster/tasks/network_assign_public_ip_fail.yml123
-rw-r--r--test/integration/targets/ecs_cluster/tasks/network_fail.yml216
-rw-r--r--test/integration/targets/ecs_cluster/tasks/network_force_new_deployment.yml124
-rw-r--r--test/integration/targets/ecs_cluster/tasks/network_force_new_deployment_fail.yml125
-rw-r--r--test/integration/targets/ecs_ecr/aliases2
-rw-r--r--test/integration/targets/ecs_ecr/defaults/main.yml22
-rw-r--r--test/integration/targets/ecs_ecr/meta/main.yml3
-rw-r--r--test/integration/targets/ecs_ecr/tasks/main.yml543
-rw-r--r--test/integration/targets/ecs_tag/aliases3
-rw-r--r--test/integration/targets/ecs_tag/tasks/main.yml320
-rw-r--r--test/integration/targets/efs/aliases3
-rw-r--r--test/integration/targets/efs/playbooks/full_test.yml9
-rw-r--r--test/integration/targets/efs/playbooks/roles/efs/tasks/main.yml327
-rw-r--r--test/integration/targets/efs/playbooks/version_fail.yml32
-rwxr-xr-xtest/integration/targets/efs/runme.sh15
-rw-r--r--test/integration/targets/elb_application_lb/aliases2
-rw-r--r--test/integration/targets/elb_application_lb/defaults/main.yml6
-rw-r--r--test/integration/targets/elb_application_lb/meta/main.yml2
-rw-r--r--test/integration/targets/elb_application_lb/tasks/full_test.yml259
-rw-r--r--test/integration/targets/elb_application_lb/tasks/main.yml44
-rw-r--r--test/integration/targets/elb_application_lb/tasks/multiple_actions_fail.yml253
-rw-r--r--test/integration/targets/elb_application_lb/tasks/test_alb_bad_listener_options.yml71
-rw-r--r--test/integration/targets/elb_application_lb/tasks/test_alb_tags.yml93
-rw-r--r--test/integration/targets/elb_application_lb/tasks/test_alb_with_asg.yml89
-rw-r--r--test/integration/targets/elb_application_lb/tasks/test_creating_alb.yml52
-rw-r--r--test/integration/targets/elb_application_lb/tasks/test_deleting_alb.yml52
-rw-r--r--test/integration/targets/elb_application_lb/tasks/test_modifying_alb_listeners.yml240
-rw-r--r--test/integration/targets/elb_application_lb/tasks/test_multiple_actions.yml467
-rw-r--r--test/integration/targets/elb_application_lb/tasks/test_multiple_actions_fail.yml53
-rw-r--r--test/integration/targets/elb_classic_lb/aliases2
-rw-r--r--test/integration/targets/elb_classic_lb/defaults/main.yml3
-rw-r--r--test/integration/targets/elb_classic_lb/meta/main.yml3
-rw-r--r--test/integration/targets/elb_classic_lb/tasks/main.yml425
-rw-r--r--test/integration/targets/elb_classic_lb/vars/main.yml2
-rw-r--r--test/integration/targets/elb_network_lb/aliases2
-rw-r--r--test/integration/targets/elb_network_lb/defaults/main.yml7
-rw-r--r--test/integration/targets/elb_network_lb/files/cert.pem32
-rw-r--r--test/integration/targets/elb_network_lb/files/key.pem52
-rw-r--r--test/integration/targets/elb_network_lb/meta/main.yml3
-rw-r--r--test/integration/targets/elb_network_lb/tasks/main.yml248
-rw-r--r--test/integration/targets/elb_network_lb/tasks/test_creating_nlb.yml82
-rw-r--r--test/integration/targets/elb_network_lb/tasks/test_deleting_nlb.yml50
-rw-r--r--test/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml88
-rw-r--r--test/integration/targets/elb_network_lb/tasks/test_nlb_bad_listener_options.yml72
-rw-r--r--test/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml101
-rw-r--r--test/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml90
-rw-r--r--test/integration/targets/elb_target/aliases4
-rw-r--r--test/integration/targets/elb_target/playbooks/full_test.yml7
-rw-r--r--test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/defaults/main.yml5
-rw-r--r--test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/ansible_lambda_target.py10
-rw-r--r--test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/assume-role.json8
-rw-r--r--test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/tasks/main.yml126
-rw-r--r--test/integration/targets/elb_target/playbooks/roles/elb_target/defaults/main.yml10
-rw-r--r--test/integration/targets/elb_target/playbooks/roles/elb_target/tasks/main.yml482
-rw-r--r--test/integration/targets/elb_target/playbooks/version_fail.yml41
-rwxr-xr-xtest/integration/targets/elb_target/runme.sh13
-rw-r--r--test/integration/targets/elb_target_info/aliases2
-rw-r--r--test/integration/targets/elb_target_info/playbooks/full_test.yml6
-rw-r--r--test/integration/targets/elb_target_info/playbooks/roles/elb_target_info/defaults/main.yml5
-rw-r--r--test/integration/targets/elb_target_info/playbooks/roles/elb_target_info/tasks/main.yml505
-rwxr-xr-xtest/integration/targets/elb_target_info/runme.sh5
-rw-r--r--test/integration/targets/iam_group/aliases2
-rw-r--r--test/integration/targets/iam_group/defaults/main.yml3
-rw-r--r--test/integration/targets/iam_group/meta/main.yml3
-rw-r--r--test/integration/targets/iam_group/tasks/main.yml125
-rw-r--r--test/integration/targets/iam_password_policy/aliases2
-rw-r--r--test/integration/targets/iam_password_policy/tasks/main.yaml105
-rw-r--r--test/integration/targets/iam_policy/aliases3
-rw-r--r--test/integration/targets/iam_policy/defaults/main.yml6
-rw-r--r--test/integration/targets/iam_policy/files/no_access.json10
-rw-r--r--test/integration/targets/iam_policy/files/no_access_with_id.json11
-rw-r--r--test/integration/targets/iam_policy/files/no_access_with_second_id.json11
-rw-r--r--test/integration/targets/iam_policy/files/no_trust.json10
-rw-r--r--test/integration/targets/iam_policy/tasks/main.yml97
-rw-r--r--test/integration/targets/iam_policy/tasks/object.yml1065
-rw-r--r--test/integration/targets/iam_role/aliases3
-rw-r--r--test/integration/targets/iam_role/defaults/main.yml8
-rw-r--r--test/integration/targets/iam_role/files/deny-all-a.json13
-rw-r--r--test/integration/targets/iam_role/files/deny-all-b.json13
-rw-r--r--test/integration/targets/iam_role/files/deny-all.json12
-rw-r--r--test/integration/targets/iam_role/files/deny-assume.json10
-rw-r--r--test/integration/targets/iam_role/meta/main.yml3
-rw-r--r--test/integration/targets/iam_role/tasks/main.yml1519
-rw-r--r--test/integration/targets/iam_saml_federation/aliases2
-rw-r--r--test/integration/targets/iam_saml_federation/files/example1.xml22
-rw-r--r--test/integration/targets/iam_saml_federation/files/example2.xml22
-rw-r--r--test/integration/targets/iam_saml_federation/meta/main.yml3
-rw-r--r--test/integration/targets/iam_saml_federation/tasks/main.yml87
-rw-r--r--test/integration/targets/iam_user/aliases3
-rw-r--r--test/integration/targets/iam_user/defaults/main.yml7
-rw-r--r--test/integration/targets/iam_user/meta/main.yml3
-rw-r--r--test/integration/targets/iam_user/tasks/main.yml480
-rw-r--r--test/integration/targets/lambda_policy/aliases2
-rw-r--r--test/integration/targets/lambda_policy/defaults/main.yml3
-rw-r--r--test/integration/targets/lambda_policy/files/mini_http_lambda.py36
-rw-r--r--test/integration/targets/lambda_policy/files/minimal_trust_policy.json12
-rw-r--r--test/integration/targets/lambda_policy/meta/main.yml3
-rw-r--r--test/integration/targets/lambda_policy/tasks/main.yml206
-rw-r--r--test/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j239
-rw-r--r--test/integration/targets/lightsail/aliases2
-rw-r--r--test/integration/targets/lightsail/defaults/main.yml2
-rw-r--r--test/integration/targets/lightsail/tasks/main.yml122
-rw-r--r--test/integration/targets/rds_instance/aliases2
-rw-r--r--test/integration/targets/rds_instance/defaults/main.yml23
-rw-r--r--test/integration/targets/rds_instance/tasks/credential_tests.yml36
-rw-r--r--test/integration/targets/rds_instance/tasks/main.yml29
-rw-r--r--test/integration/targets/rds_instance/tasks/test_aurora.yml144
-rw-r--r--test/integration/targets/rds_instance/tasks/test_bad_options.yml41
-rw-r--r--test/integration/targets/rds_instance/tasks/test_encryption.yml53
-rw-r--r--test/integration/targets/rds_instance/tasks/test_final_snapshot.yml75
-rw-r--r--test/integration/targets/rds_instance/tasks/test_modification.yml195
-rw-r--r--test/integration/targets/rds_instance/tasks/test_processor_features.yml126
-rw-r--r--test/integration/targets/rds_instance/tasks/test_read_replica.yml142
-rw-r--r--test/integration/targets/rds_instance/tasks/test_restore_instance.yml95
-rw-r--r--test/integration/targets/rds_instance/tasks/test_snapshot.yml85
-rw-r--r--test/integration/targets/rds_instance/tasks/test_states.yml277
-rw-r--r--test/integration/targets/rds_instance/tasks/test_tags.yml265
-rw-r--r--test/integration/targets/rds_instance/tasks/test_vpc_security_groups.yml166
-rw-r--r--test/integration/targets/rds_param_group/aliases2
-rw-r--r--test/integration/targets/rds_param_group/defaults/main.yml30
-rw-r--r--test/integration/targets/rds_param_group/meta/main.yml3
-rw-r--r--test/integration/targets/rds_param_group/tasks/main.yml321
-rw-r--r--test/integration/targets/rds_subnet_group/aliases2
-rw-r--r--test/integration/targets/rds_subnet_group/defaults/main.yml8
-rw-r--r--test/integration/targets/rds_subnet_group/meta/main.yml3
-rw-r--r--test/integration/targets/rds_subnet_group/tasks/main.yml113
-rw-r--r--test/integration/targets/rds_subnet_group/tasks/params.yml62
-rw-r--r--test/integration/targets/rds_subnet_group/tasks/tests.yml221
-rw-r--r--test/integration/targets/redshift/aliases2
-rw-r--r--test/integration/targets/redshift/defaults/main.yml6
-rw-r--r--test/integration/targets/redshift/meta/main.yml3
-rw-r--r--test/integration/targets/redshift/tasks/main.yml276
-rw-r--r--test/integration/targets/route53/aliases3
-rw-r--r--test/integration/targets/route53/defaults/main.yml2
-rw-r--r--test/integration/targets/route53/tasks/main.yml252
-rw-r--r--test/integration/targets/route53/vars/main.yml0
-rw-r--r--test/integration/targets/route53_zone/aliases2
-rw-r--r--test/integration/targets/route53_zone/tasks/main.yml393
-rw-r--r--test/integration/targets/s3_bucket_notification/aliases2
-rw-r--r--test/integration/targets/s3_bucket_notification/defaults/main.yml3
-rw-r--r--test/integration/targets/s3_bucket_notification/files/mini_lambda.py8
-rw-r--r--test/integration/targets/s3_bucket_notification/meta/main.yml3
-rw-r--r--test/integration/targets/s3_bucket_notification/tasks/main.yml335
-rw-r--r--test/integration/targets/s3_lifecycle/aliases3
-rw-r--r--test/integration/targets/s3_lifecycle/tasks/main.yml435
-rw-r--r--test/integration/targets/s3_logging/aliases4
-rw-r--r--test/integration/targets/s3_logging/defaults/main.yml4
-rw-r--r--test/integration/targets/s3_logging/tasks/main.yml203
-rw-r--r--test/integration/targets/script_inventory_ec2/aliases2
-rwxr-xr-xtest/integration/targets/script_inventory_ec2/ec2.sh5
-rwxr-xr-xtest/integration/targets/script_inventory_ec2/inventory_diff.py67
-rw-r--r--test/integration/targets/script_inventory_ec2/lib/__init__.py0
-rw-r--r--test/integration/targets/script_inventory_ec2/lib/boto/__init__.py5
-rw-r--r--test/integration/targets/script_inventory_ec2/lib/boto/ec2/__init__.py48
-rw-r--r--test/integration/targets/script_inventory_ec2/lib/boto/elasticache/__init__.py33
-rw-r--r--test/integration/targets/script_inventory_ec2/lib/boto/exception.py22
-rw-r--r--test/integration/targets/script_inventory_ec2/lib/boto/exceptions.py22
-rw-r--r--test/integration/targets/script_inventory_ec2/lib/boto/mocks/__init__.py0
-rw-r--r--test/integration/targets/script_inventory_ec2/lib/boto/mocks/instances.py348
-rw-r--r--test/integration/targets/script_inventory_ec2/lib/boto/rds.py0
-rw-r--r--test/integration/targets/script_inventory_ec2/lib/boto/route53.py0
-rw-r--r--test/integration/targets/script_inventory_ec2/lib/boto/session.py76
-rw-r--r--test/integration/targets/script_inventory_ec2/lib/boto/sts.py0
-rwxr-xr-xtest/integration/targets/script_inventory_ec2/runme.sh151
-rw-r--r--test/integration/targets/sns/aliases2
-rw-r--r--test/integration/targets/sns/defaults/main.yml1
-rw-r--r--test/integration/targets/sns/tasks/main.yml53
-rw-r--r--test/integration/targets/sns_topic/aliases2
-rw-r--r--test/integration/targets/sns_topic/defaults/main.yml8
-rw-r--r--test/integration/targets/sns_topic/files/lambda-policy.json14
-rw-r--r--test/integration/targets/sns_topic/files/lambda-trust-policy.json12
-rw-r--r--test/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py6
-rw-r--r--test/integration/targets/sns_topic/tasks/main.yml360
-rw-r--r--test/integration/targets/sns_topic/templates/initial-policy.json20
-rw-r--r--test/integration/targets/sns_topic/templates/updated-policy.json20
-rw-r--r--test/integration/targets/sqs_queue/aliases2
-rw-r--r--test/integration/targets/sqs_queue/defaults/main.yml1
-rw-r--r--test/integration/targets/sqs_queue/tasks/main.yml106
-rw-r--r--test/integration/targets/sts_assume_role/aliases2
-rw-r--r--test/integration/targets/sts_assume_role/meta/main.yml3
-rw-r--r--test/integration/targets/sts_assume_role/tasks/main.yml384
-rw-r--r--test/integration/targets/sts_assume_role/templates/policy.json.j212
-rw-r--r--test/sanity/ignore.txt144
-rw-r--r--test/units/modules/cloud/amazon/test_aws_acm.py122
-rw-r--r--test/units/modules/cloud/amazon/test_aws_api_gateway.py69
-rw-r--r--test/units/modules/cloud/amazon/test_aws_direct_connect_connection.py92
-rw-r--r--test/units/modules/cloud/amazon/test_aws_direct_connect_link_aggregation_group.py168
-rw-r--r--test/units/modules/cloud/amazon/test_data_pipeline.py250
-rw-r--r--test/units/modules/cloud/amazon/test_ec2_vpc_nat_gateway.py207
-rw-r--r--test/units/modules/cloud/amazon/test_ec2_vpc_vpn.py352
-rw-r--r--test/units/modules/cloud/amazon/test_iam_password_policy.py30
-rw-r--r--test/units/modules/cloud/amazon/test_kinesis_stream.py330
-rw-r--r--test/units/modules/cloud/amazon/test_lambda.py273
-rw-r--r--test/units/modules/cloud/amazon/test_lambda_policy.py155
-rw-r--r--test/units/modules/cloud/amazon/test_redshift_cross_region_snapshots.py52
-rw-r--r--test/units/modules/cloud/amazon/test_route53_zone.py610
-rw-r--r--test/units/modules/cloud/amazon/test_s3_bucket_notification.py262
-rw-r--r--test/units/plugins/connection/test_aws_ssm.py194
629 files changed, 0 insertions, 112826 deletions
diff --git a/contrib/inventory/ec2.ini b/contrib/inventory/ec2.ini
deleted file mode 100644
index d5e4742032..0000000000
--- a/contrib/inventory/ec2.ini
+++ /dev/null
@@ -1,219 +0,0 @@
-# Ansible EC2 external inventory script settings
-#
-
-[ec2]
-
-# to talk to a private eucalyptus instance uncomment these lines
-# and edit edit eucalyptus_host to be the host name of your cloud controller
-#eucalyptus = True
-#eucalyptus_host = clc.cloud.domain.org
-
-# AWS regions to make calls to. Set this to 'all' to make request to all regions
-# in AWS and merge the results together. Alternatively, set this to a comma
-# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not
-# provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or
-# AWS_DEFAULT_REGION environment variable will be read to determine the region.
-regions = all
-regions_exclude = us-gov-west-1, cn-north-1
-
-# When generating inventory, Ansible needs to know how to address a server.
-# Each EC2 instance has a lot of variables associated with it. Here is the list:
-# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
-# Below are 2 variables that are used as the address of a server:
-# - destination_variable
-# - vpc_destination_variable
-
-# This is the normal destination variable to use. If you are running Ansible
-# from outside EC2, then 'public_dns_name' makes the most sense. If you are
-# running Ansible from within EC2, then perhaps you want to use the internal
-# address, and should set this to 'private_dns_name'. The key of an EC2 tag
-# may optionally be used; however the boto instance variables hold precedence
-# in the event of a collision.
-destination_variable = public_dns_name
-
-# This allows you to override the inventory_name with an ec2 variable, instead
-# of using the destination_variable above. Addressing (aka ansible_ssh_host)
-# will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
-#hostname_variable = tag_Name
-
-# For server inside a VPC, using DNS names may not make sense. When an instance
-# has 'subnet_id' set, this variable is used. If the subnet is public, setting
-# this to 'ip_address' will return the public IP address. For instances in a
-# private subnet, this should be set to 'private_ip_address', and Ansible must
-# be run from within EC2. The key of an EC2 tag may optionally be used; however
-# the boto instance variables hold precedence in the event of a collision.
-# WARNING: - instances that are in the private vpc, _without_ public ip address
-# will not be listed in the inventory until You set:
-# vpc_destination_variable = private_ip_address
-vpc_destination_variable = ip_address
-
-# The following two settings allow flexible ansible host naming based on a
-# python format string and a comma-separated list of ec2 tags. Note that:
-#
-# 1) If the tags referenced are not present for some instances, empty strings
-# will be substituted in the format string.
-# 2) This overrides both destination_variable and vpc_destination_variable.
-#
-#destination_format = {0}.{1}.example.com
-#destination_format_tags = Name,environment
-
-# To tag instances on EC2 with the resource records that point to them from
-# Route53, set 'route53' to True.
-route53 = False
-
-# To use Route53 records as the inventory hostnames, uncomment and set
-# to equal the domain name you wish to use. You must also have 'route53' (above)
-# set to True.
-# route53_hostnames = .example.com
-
-# To exclude RDS instances from the inventory, uncomment and set to False.
-#rds = False
-
-# To exclude ElastiCache instances from the inventory, uncomment and set to False.
-#elasticache = False
-
-# Additionally, you can specify the list of zones to exclude looking up in
-# 'route53_excluded_zones' as a comma-separated list.
-# route53_excluded_zones = samplezone1.com, samplezone2.com
-
-# By default, only EC2 instances in the 'running' state are returned. Set
-# 'all_instances' to True to return all instances regardless of state.
-all_instances = False
-
-# By default, only EC2 instances in the 'running' state are returned. Specify
-# EC2 instance states to return as a comma-separated list. This
-# option is overridden when 'all_instances' is True.
-# instance_states = pending, running, shutting-down, terminated, stopping, stopped
-
-# By default, only RDS instances in the 'available' state are returned. Set
-# 'all_rds_instances' to True return all RDS instances regardless of state.
-all_rds_instances = False
-
-# Include RDS cluster information (Aurora etc.)
-include_rds_clusters = False
-
-# By default, only ElastiCache clusters and nodes in the 'available' state
-# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
-# to True return all ElastiCache clusters and nodes, regardless of state.
-#
-# Note that all_elasticache_nodes only applies to listed clusters. That means
-# if you set all_elastic_clusters to false, no node will be return from
-# unavailable clusters, regardless of the state and to what you set for
-# all_elasticache_nodes.
-all_elasticache_replication_groups = False
-all_elasticache_clusters = False
-all_elasticache_nodes = False
-
-# API calls to EC2 are slow. For this reason, we cache the results of an API
-# call. Set this to the path you want cache files to be written to. Two files
-# will be written to this directory:
-# - ansible-ec2.cache
-# - ansible-ec2.index
-cache_path = ~/.ansible/tmp
-
-# The number of seconds a cache file is considered valid. After this many
-# seconds, a new API call will be made, and the cache file will be updated.
-# To disable the cache, set this value to 0
-cache_max_age = 300
-
-# Organize groups into a nested/hierarchy instead of a flat namespace.
-nested_groups = False
-
-# Replace - tags when creating groups to avoid issues with ansible
-replace_dash_in_groups = True
-
-# If set to true, any tag of the form "a,b,c" is expanded into a list
-# and the results are used to create additional tag_* inventory groups.
-expand_csv_tags = False
-
-# The EC2 inventory output can become very large. To manage its size,
-# configure which groups should be created.
-group_by_instance_id = True
-group_by_region = True
-group_by_availability_zone = True
-group_by_aws_account = False
-group_by_ami_id = True
-group_by_instance_type = True
-group_by_instance_state = False
-group_by_platform = True
-group_by_key_pair = True
-group_by_vpc_id = True
-group_by_security_group = True
-group_by_tag_keys = True
-group_by_tag_none = True
-group_by_route53_names = True
-group_by_rds_engine = True
-group_by_rds_parameter_group = True
-group_by_elasticache_engine = True
-group_by_elasticache_cluster = True
-group_by_elasticache_parameter_group = True
-group_by_elasticache_replication_group = True
-
-# If you only want to include hosts that match a certain regular expression
-# pattern_include = staging-*
-
-# If you want to exclude any hosts that match a certain regular expression
-# pattern_exclude = staging-*
-
-# Instance filters can be used to control which instances are retrieved for
-# inventory. For the full list of possible filters, please read the EC2 API
-# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
-# Filters are key/value pairs separated by '=', to list multiple filters use
-# a list separated by commas. To "AND" criteria together, use "&". Note that
-# the "AND" is not useful along with stack_filters and so such usage is not allowed.
-# See examples below.
-
-# If you want to apply multiple filters simultaneously, set stack_filters to
-# True. Default behaviour is to combine the results of all filters. Stacking
-# allows the use of multiple conditions to filter down, for example by
-# environment and type of host.
-stack_filters = False
-
-# Retrieve only instances with (key=value) env=staging tag
-# instance_filters = tag:env=staging
-
-# Retrieve only instances with role=webservers OR role=dbservers tag
-# instance_filters = tag:role=webservers,tag:role=dbservers
-
-# Retrieve only t1.micro instances OR instances with tag env=staging
-# instance_filters = instance-type=t1.micro,tag:env=staging
-
-# You can use wildcards in filter values also. Below will list instances which
-# tag Name value matches webservers1*
-# (ex. webservers15, webservers1a, webservers123 etc)
-# instance_filters = tag:Name=webservers1*
-
-# Retrieve only instances of type t1.micro that also have tag env=stage
-# instance_filters = instance-type=t1.micro&tag:env=stage
-
-# Retrieve instances of type t1.micro AND tag env=stage, as well as any instance
-# that are of type m3.large, regardless of env tag
-# instance_filters = instance-type=t1.micro&tag:env=stage,instance-type=m3.large
-
-# An IAM role can be assumed, so all requests are run as that role.
-# This can be useful for connecting across different accounts, or to limit user
-# access
-# iam_role = role-arn
-
-# A boto configuration profile may be used to separate out credentials
-# see https://boto.readthedocs.io/en/latest/boto_config_tut.html
-# boto_profile = some-boto-profile-name
-
-
-[credentials]
-
-# The AWS credentials can optionally be specified here. Credentials specified
-# here are ignored if the environment variable AWS_ACCESS_KEY_ID or
-# AWS_PROFILE is set, or if the boto_profile property above is set.
-#
-# Supplying AWS credentials here is not recommended, as it introduces
-# non-trivial security concerns. When going down this route, please make sure
-# to set access permissions for this file correctly, e.g. handle it the same
-# way as you would a private SSH key.
-#
-# Unlike the boto and AWS configure files, this section does not support
-# profiles.
-#
-# aws_access_key_id = AXXXXXXXXXXXXXX
-# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
-# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX
diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py
deleted file mode 100755
index feb3153987..0000000000
--- a/contrib/inventory/ec2.py
+++ /dev/null
@@ -1,1712 +0,0 @@
-#!/usr/bin/env python
-
-'''
-EC2 external inventory script
-=================================
-
-Generates inventory that Ansible can understand by making API request to
-AWS EC2 using the Boto library.
-
-NOTE: This script assumes Ansible is being executed where the environment
-variables needed for Boto have already been set:
- export AWS_ACCESS_KEY_ID='AK123'
- export AWS_SECRET_ACCESS_KEY='abc123'
-
-Optional region environment variable if region is 'auto'
-
-This script also assumes that there is an ec2.ini file alongside it. To specify a
-different path to ec2.ini, define the EC2_INI_PATH environment variable:
-
- export EC2_INI_PATH=/path/to/my_ec2.ini
-
-If you're using eucalyptus you need to set the above variables and
-you need to define:
-
- export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
-
-If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
-using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
-the AWS_PROFILE variable:
-
- AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
-
-For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
-
-You can filter for specific EC2 instances by creating an environment variable
-named EC2_INSTANCE_FILTERS, which has the same format as the instance_filters
-entry documented in ec2.ini. For example, to find all hosts whose name begins
-with 'webserver', one might use:
-
- export EC2_INSTANCE_FILTERS='tag:Name=webserver*'
-
-When run against a specific host, this script returns the following variables:
- - ec2_ami_launch_index
- - ec2_architecture
- - ec2_association
- - ec2_attachTime
- - ec2_attachment
- - ec2_attachmentId
- - ec2_block_devices
- - ec2_client_token
- - ec2_deleteOnTermination
- - ec2_description
- - ec2_deviceIndex
- - ec2_dns_name
- - ec2_eventsSet
- - ec2_group_name
- - ec2_hypervisor
- - ec2_id
- - ec2_image_id
- - ec2_instanceState
- - ec2_instance_type
- - ec2_ipOwnerId
- - ec2_ip_address
- - ec2_item
- - ec2_kernel
- - ec2_key_name
- - ec2_launch_time
- - ec2_monitored
- - ec2_monitoring
- - ec2_networkInterfaceId
- - ec2_ownerId
- - ec2_persistent
- - ec2_placement
- - ec2_platform
- - ec2_previous_state
- - ec2_private_dns_name
- - ec2_private_ip_address
- - ec2_publicIp
- - ec2_public_dns_name
- - ec2_ramdisk
- - ec2_reason
- - ec2_region
- - ec2_requester_id
- - ec2_root_device_name
- - ec2_root_device_type
- - ec2_security_group_ids
- - ec2_security_group_names
- - ec2_shutdown_state
- - ec2_sourceDestCheck
- - ec2_spot_instance_request_id
- - ec2_state
- - ec2_state_code
- - ec2_state_reason
- - ec2_status
- - ec2_subnet_id
- - ec2_tenancy
- - ec2_virtualization_type
- - ec2_vpc_id
-
-These variables are pulled out of a boto.ec2.instance object. There is a lack of
-consistency with variable spellings (camelCase and underscores) since this
-just loops through all variables the object exposes. It is preferred to use the
-ones with underscores when multiple exist.
-
-In addition, if an instance has AWS tags associated with it, each tag is a new
-variable named:
- - ec2_tag_[Key] = [Value]
-
-Security groups are comma-separated in 'ec2_security_group_ids' and
-'ec2_security_group_names'.
-
-When destination_format and destination_format_tags are specified
-the destination_format can be built from the instance tags and attributes.
-The behavior will first check the user defined tags, then proceed to
-check instance attributes, and finally if neither are found 'nil' will
-be used instead.
-
-'my_instance': {
- 'region': 'us-east-1', # attribute
- 'availability_zone': 'us-east-1a', # attribute
- 'private_dns_name': '172.31.0.1', # attribute
- 'ec2_tag_deployment': 'blue', # tag
- 'ec2_tag_clusterid': 'ansible', # tag
- 'ec2_tag_Name': 'webserver', # tag
- ...
-}
-
-Inside of the ec2.ini file the following settings are specified:
-...
-destination_format: {0}-{1}-{2}-{3}
-destination_format_tags: Name,clusterid,deployment,private_dns_name
-...
-
-These settings would produce a destination_format as the following:
-'webserver-ansible-blue-172.31.0.1'
-'''
-
-# (c) 2012, Peter Sankauskas
-#
-# This file is part of Ansible,
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-######################################################################
-
-import sys
-import os
-import argparse
-import re
-from time import time
-from copy import deepcopy
-from datetime import date, datetime
-import boto
-from boto import ec2
-from boto import rds
-from boto import elasticache
-from boto import route53
-from boto import sts
-
-from ansible.module_utils import six
-from ansible.module_utils import ec2 as ec2_utils
-from ansible.module_utils.six.moves import configparser
-
-HAS_BOTO3 = False
-try:
- import boto3 # noqa
- HAS_BOTO3 = True
-except ImportError:
- pass
-
-from collections import defaultdict
-
-import json
-
-DEFAULTS = {
- 'all_elasticache_clusters': 'False',
- 'all_elasticache_nodes': 'False',
- 'all_elasticache_replication_groups': 'False',
- 'all_instances': 'False',
- 'all_rds_instances': 'False',
- 'aws_access_key_id': '',
- 'aws_secret_access_key': '',
- 'aws_security_token': '',
- 'boto_profile': '',
- 'cache_max_age': '300',
- 'cache_path': '~/.ansible/tmp',
- 'destination_variable': 'public_dns_name',
- 'elasticache': 'True',
- 'eucalyptus': 'False',
- 'eucalyptus_host': '',
- 'expand_csv_tags': 'False',
- 'group_by_ami_id': 'True',
- 'group_by_availability_zone': 'True',
- 'group_by_aws_account': 'False',
- 'group_by_elasticache_cluster': 'True',
- 'group_by_elasticache_engine': 'True',
- 'group_by_elasticache_parameter_group': 'True',
- 'group_by_elasticache_replication_group': 'True',
- 'group_by_instance_id': 'True',
- 'group_by_instance_state': 'False',
- 'group_by_instance_type': 'True',
- 'group_by_key_pair': 'True',
- 'group_by_platform': 'True',
- 'group_by_rds_engine': 'True',
- 'group_by_rds_parameter_group': 'True',
- 'group_by_region': 'True',
- 'group_by_route53_names': 'True',
- 'group_by_security_group': 'True',
- 'group_by_tag_keys': 'True',
- 'group_by_tag_none': 'True',
- 'group_by_vpc_id': 'True',
- 'hostname_variable': '',
- 'iam_role': '',
- 'include_rds_clusters': 'False',
- 'nested_groups': 'False',
- 'pattern_exclude': '',
- 'pattern_include': '',
- 'rds': 'False',
- 'regions': 'all',
- 'regions_exclude': 'us-gov-west-1, cn-north-1',
- 'replace_dash_in_groups': 'True',
- 'route53': 'False',
- 'route53_excluded_zones': '',
- 'route53_hostnames': '',
- 'stack_filters': 'False',
- 'vpc_destination_variable': 'ip_address'
-}
-
-
-class Ec2Inventory(object):
-
- def _empty_inventory(self):
- return {"_meta": {"hostvars": {}}}
-
- def _json_serial(self, obj):
- """JSON serializer for objects not serializable by default json code"""
-
- if isinstance(obj, (datetime, date)):
- return obj.isoformat()
- raise TypeError("Type %s not serializable" % type(obj))
-
- def __init__(self):
- ''' Main execution path '''
-
- # Inventory grouped by instance IDs, tags, security groups, regions,
- # and availability zones
- self.inventory = self._empty_inventory()
-
- self.aws_account_id = None
-
- # Index of hostname (address) to instance ID
- self.index = {}
-
- # Boto profile to use (if any)
- self.boto_profile = None
-
- # AWS credentials.
- self.credentials = {}
-
- # Read settings and parse CLI arguments
- self.parse_cli_args()
- self.read_settings()
-
- # Make sure that profile_name is not passed at all if not set
- # as pre 2.24 boto will fall over otherwise
- if self.boto_profile:
- if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
- self.fail_with_error("boto version must be >= 2.24 to use profile")
-
- # Cache
- if self.args.refresh_cache:
- self.do_api_calls_update_cache()
- elif not self.is_cache_valid():
- self.do_api_calls_update_cache()
-
- # Data to print
- if self.args.host:
- data_to_print = self.get_host_info()
-
- elif self.args.list:
- # Display list of instances for inventory
- if self.inventory == self._empty_inventory():
- data_to_print = self.get_inventory_from_cache()
- else:
- data_to_print = self.json_format_dict(self.inventory, True)
-
- print(data_to_print)
-
- def is_cache_valid(self):
- ''' Determines if the cache files have expired, or if it is still valid '''
-
- if os.path.isfile(self.cache_path_cache):
- mod_time = os.path.getmtime(self.cache_path_cache)
- current_time = time()
- if (mod_time + self.cache_max_age) > current_time:
- if os.path.isfile(self.cache_path_index):
- return True
-
- return False
-
- def read_settings(self):
- ''' Reads the settings from the ec2.ini file '''
-
- scriptbasename = __file__
- scriptbasename = os.path.basename(scriptbasename)
- scriptbasename = scriptbasename.replace('.py', '')
-
- defaults = {
- 'ec2': {
- 'ini_fallback': os.path.join(os.path.dirname(__file__), 'ec2.ini'),
- 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename)
- }
- }
-
- if six.PY3:
- config = configparser.ConfigParser(DEFAULTS)
- else:
- config = configparser.SafeConfigParser(DEFAULTS)
- ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path'])
- ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path))
-
- if not os.path.isfile(ec2_ini_path):
- ec2_ini_path = os.path.expanduser(defaults['ec2']['ini_fallback'])
-
- if os.path.isfile(ec2_ini_path):
- config.read(ec2_ini_path)
-
- # Add empty sections if they don't exist
- try:
- config.add_section('ec2')
- except configparser.DuplicateSectionError:
- pass
-
- try:
- config.add_section('credentials')
- except configparser.DuplicateSectionError:
- pass
-
- # is eucalyptus?
- self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
- self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
-
- # Regions
- self.regions = []
- config_regions = config.get('ec2', 'regions')
- if (config_regions == 'all'):
- if self.eucalyptus_host:
- self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
- else:
- config_regions_exclude = config.get('ec2', 'regions_exclude')
-
- for region_info in ec2.regions():
- if region_info.name not in config_regions_exclude:
- self.regions.append(region_info.name)
- else:
- self.regions = config_regions.split(",")
- if 'auto' in self.regions:
- env_region = os.environ.get('AWS_REGION')
- if env_region is None:
- env_region = os.environ.get('AWS_DEFAULT_REGION')
- self.regions = [env_region]
-
- # Destination addresses
- self.destination_variable = config.get('ec2', 'destination_variable')
- self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
- self.hostname_variable = config.get('ec2', 'hostname_variable')
-
- if config.has_option('ec2', 'destination_format') and \
- config.has_option('ec2', 'destination_format_tags'):
- self.destination_format = config.get('ec2', 'destination_format')
- self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
- else:
- self.destination_format = None
- self.destination_format_tags = None
-
- # Route53
- self.route53_enabled = config.getboolean('ec2', 'route53')
- self.route53_hostnames = config.get('ec2', 'route53_hostnames')
-
- self.route53_excluded_zones = []
- self.route53_excluded_zones = [a for a in config.get('ec2', 'route53_excluded_zones').split(',') if a]
-
- # Include RDS instances?
- self.rds_enabled = config.getboolean('ec2', 'rds')
-
- # Include RDS cluster instances?
- self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
-
- # Include ElastiCache instances?
- self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
-
- # Return all EC2 instances?
- self.all_instances = config.getboolean('ec2', 'all_instances')
-
- # Instance states to be gathered in inventory. Default is 'running'.
- # Setting 'all_instances' to 'yes' overrides this option.
- ec2_valid_instance_states = [
- 'pending',
- 'running',
- 'shutting-down',
- 'terminated',
- 'stopping',
- 'stopped'
- ]
- self.ec2_instance_states = []
- if self.all_instances:
- self.ec2_instance_states = ec2_valid_instance_states
- elif config.has_option('ec2', 'instance_states'):
- for instance_state in config.get('ec2', 'instance_states').split(','):
- instance_state = instance_state.strip()
- if instance_state not in ec2_valid_instance_states:
- continue
- self.ec2_instance_states.append(instance_state)
- else:
- self.ec2_instance_states = ['running']
-
- # Return all RDS instances? (if RDS is enabled)
- self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
-
- # Return all ElastiCache replication groups? (if ElastiCache is enabled)
- self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
-
- # Return all ElastiCache clusters? (if ElastiCache is enabled)
- self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
-
- # Return all ElastiCache nodes? (if ElastiCache is enabled)
- self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
-
- # boto configuration profile (prefer CLI argument then environment variables then config file)
- self.boto_profile = self.args.boto_profile or \
- os.environ.get('AWS_PROFILE') or \
- config.get('ec2', 'boto_profile')
-
- # AWS credentials (prefer environment variables)
- if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
- os.environ.get('AWS_PROFILE')):
-
- aws_access_key_id = config.get('credentials', 'aws_access_key_id')
- aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
- aws_security_token = config.get('credentials', 'aws_security_token')
-
- if aws_access_key_id:
- self.credentials = {
- 'aws_access_key_id': aws_access_key_id,
- 'aws_secret_access_key': aws_secret_access_key
- }
- if aws_security_token:
- self.credentials['security_token'] = aws_security_token
-
- # Cache related
- cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
- if self.boto_profile:
- cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
- if not os.path.exists(cache_dir):
- os.makedirs(cache_dir)
-
- cache_name = 'ansible-ec2'
- cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id'))
- if cache_id:
- cache_name = '%s-%s' % (cache_name, cache_id)
- cache_name += '-' + str(abs(hash(__file__)))[1:7]
- self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name)
- self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name)
- self.cache_max_age = config.getint('ec2', 'cache_max_age')
-
- self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
-
- # Configure nested groups instead of flat namespace.
- self.nested_groups = config.getboolean('ec2', 'nested_groups')
-
- # Replace dash or not in group names
- self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
-
- # IAM role to assume for connection
- self.iam_role = config.get('ec2', 'iam_role')
-
- # Configure which groups should be created.
-
- group_by_options = [a for a in DEFAULTS if a.startswith('group_by')]
- for option in group_by_options:
- setattr(self, option, config.getboolean('ec2', option))
-
- # Do we need to just include hosts that match a pattern?
- self.pattern_include = config.get('ec2', 'pattern_include')
- if self.pattern_include:
- self.pattern_include = re.compile(self.pattern_include)
-
- # Do we need to exclude hosts that match a pattern?
- self.pattern_exclude = config.get('ec2', 'pattern_exclude')
- if self.pattern_exclude:
- self.pattern_exclude = re.compile(self.pattern_exclude)
-
- # Do we want to stack multiple filters?
- self.stack_filters = config.getboolean('ec2', 'stack_filters')
-
- # Instance filters (see boto and EC2 API docs). Ignore invalid filters.
- self.ec2_instance_filters = []
-
- if config.has_option('ec2', 'instance_filters') or 'EC2_INSTANCE_FILTERS' in os.environ:
- filters = os.getenv('EC2_INSTANCE_FILTERS', config.get('ec2', 'instance_filters') if config.has_option('ec2', 'instance_filters') else '')
-
- if self.stack_filters and '&' in filters:
- self.fail_with_error("AND filters along with stack_filter enabled is not supported.\n")
-
- filter_sets = [f for f in filters.split(',') if f]
-
- for filter_set in filter_sets:
- filters = {}
- filter_set = filter_set.strip()
- for instance_filter in filter_set.split("&"):
- instance_filter = instance_filter.strip()
- if not instance_filter or '=' not in instance_filter:
- continue
- filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
- if not filter_key:
- continue
- filters[filter_key] = filter_value
- self.ec2_instance_filters.append(filters.copy())
-
- def parse_cli_args(self):
- ''' Command line argument processing '''
-
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
- parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
- parser.add_argument('--host', action='store',
- help='Get all the variables about a specific instance')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
- parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
- help='Use boto profile for connections to EC2')
- self.args = parser.parse_args()
-
- def do_api_calls_update_cache(self):
- ''' Do API calls to each region, and save data in cache files '''
-
- if self.route53_enabled:
- self.get_route53_records()
-
- for region in self.regions:
- self.get_instances_by_region(region)
- if self.rds_enabled:
- self.get_rds_instances_by_region(region)
- if self.elasticache_enabled:
- self.get_elasticache_clusters_by_region(region)
- self.get_elasticache_replication_groups_by_region(region)
- if self.include_rds_clusters:
- self.include_rds_clusters_by_region(region)
-
- self.write_to_cache(self.inventory, self.cache_path_cache)
- self.write_to_cache(self.index, self.cache_path_index)
-
- def connect(self, region):
- ''' create connection to api server'''
- if self.eucalyptus:
- conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
- conn.APIVersion = '2010-08-31'
- else:
- conn = self.connect_to_aws(ec2, region)
- return conn
-
- def boto_fix_security_token_in_profile(self, connect_args):
- ''' monkey patch for boto issue boto/boto#2100 '''
- profile = 'profile ' + self.boto_profile
- if boto.config.has_option(profile, 'aws_security_token'):
- connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
- return connect_args
-
- def connect_to_aws(self, module, region):
- connect_args = deepcopy(self.credentials)
-
- # only pass the profile name if it's set (as it is not supported by older boto versions)
- if self.boto_profile:
- connect_args['profile_name'] = self.boto_profile
- self.boto_fix_security_token_in_profile(connect_args)
- elif os.environ.get('AWS_SESSION_TOKEN'):
- connect_args['security_token'] = os.environ.get('AWS_SESSION_TOKEN')
-
- if self.iam_role:
- sts_conn = sts.connect_to_region(region, **connect_args)
- role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory')
- connect_args['aws_access_key_id'] = role.credentials.access_key
- connect_args['aws_secret_access_key'] = role.credentials.secret_key
- connect_args['security_token'] = role.credentials.session_token
-
- conn = module.connect_to_region(region, **connect_args)
- # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
- if conn is None:
- self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
- return conn
-
- def get_instances_by_region(self, region):
- ''' Makes an AWS EC2 API call to the list of instances in a particular
- region '''
-
- try:
- conn = self.connect(region)
- reservations = []
- if self.ec2_instance_filters:
- if self.stack_filters:
- filters_dict = {}
- for filters in self.ec2_instance_filters:
- filters_dict.update(filters)
- reservations.extend(conn.get_all_instances(filters=filters_dict))
- else:
- for filters in self.ec2_instance_filters:
- reservations.extend(conn.get_all_instances(filters=filters))
- else:
- reservations = conn.get_all_instances()
-
- # Pull the tags back in a second step
- # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
- # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
- instance_ids = []
- for reservation in reservations:
- instance_ids.extend([instance.id for instance in reservation.instances])
-
- max_filter_value = 199
- tags = []
- for i in range(0, len(instance_ids), max_filter_value):
- tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]}))
-
- tags_by_instance_id = defaultdict(dict)
- for tag in tags:
- tags_by_instance_id[tag.res_id][tag.name] = tag.value
-
- if (not self.aws_account_id) and reservations:
- self.aws_account_id = reservations[0].owner_id
-
- for reservation in reservations:
- for instance in reservation.instances:
- instance.tags = tags_by_instance_id[instance.id]
- self.add_instance(instance, region)
-
- except boto.exception.BotoServerError as e:
- if e.error_code == 'AuthFailure':
- error = self.get_auth_error_message()
- else:
- backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
- error = "Error connecting to %s backend.\n%s" % (backend, e.message)
- self.fail_with_error(error, 'getting EC2 instances')
-
- def tags_match_filters(self, tags):
- ''' return True if given tags match configured filters '''
- if not self.ec2_instance_filters:
- return True
-
- for filters in self.ec2_instance_filters:
- for filter_name, filter_value in filters.items():
- if filter_name[:4] != 'tag:':
- continue
- filter_name = filter_name[4:]
- if filter_name not in tags:
- if self.stack_filters:
- return False
- continue
- if isinstance(filter_value, list):
- if self.stack_filters and tags[filter_name] not in filter_value:
- return False
- if not self.stack_filters and tags[filter_name] in filter_value:
- return True
- if isinstance(filter_value, six.string_types):
- if self.stack_filters and tags[filter_name] != filter_value:
- return False
- if not self.stack_filters and tags[filter_name] == filter_value:
- return True
-
- return self.stack_filters
-
- def get_rds_instances_by_region(self, region):
- ''' Makes an AWS API call to the list of RDS instances in a particular
- region '''
-
- if not HAS_BOTO3:
- self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again",
- "getting RDS instances")
-
- client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
- db_instances = client.describe_db_instances()
-
- try:
- conn = self.connect_to_aws(rds, region)
- if conn:
- marker = None
- while True:
- instances = conn.get_all_dbinstances(marker=marker)
- marker = instances.marker
- for index, instance in enumerate(instances):
- # Add tags to instances.
- instance.arn = db_instances['DBInstances'][index]['DBInstanceArn']
- tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList']
- instance.tags = {}
- for tag in tags:
- instance.tags[tag['Key']] = tag['Value']
- if self.tags_match_filters(instance.tags):
- self.add_rds_instance(instance, region)
- if not marker:
- break
- except boto.exception.BotoServerError as e:
- error = e.reason
-
- if e.error_code == 'AuthFailure':
- error = self.get_auth_error_message()
- elif e.error_code == "OptInRequired":
- error = "RDS hasn't been enabled for this account yet. " \
- "You must either log in to the RDS service through the AWS console to enable it, " \
- "or set 'rds = False' in ec2.ini"
- elif not e.reason == "Forbidden":
- error = "Looks like AWS RDS is down:\n%s" % e.message
- self.fail_with_error(error, 'getting RDS instances')
-
- def include_rds_clusters_by_region(self, region):
- if not HAS_BOTO3:
- self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
- "getting RDS clusters")
-
- client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
-
- marker, clusters = '', []
- while marker is not None:
- resp = client.describe_db_clusters(Marker=marker)
- clusters.extend(resp["DBClusters"])
- marker = resp.get('Marker', None)
-
- account_id = boto.connect_iam().get_user().arn.split(':')[4]
- c_dict = {}
- for c in clusters:
- if not self.ec2_instance_filters:
- matches_filter = True
- else:
- matches_filter = False
-
- try:
- # arn:aws:rds:<region>:<account number>:<resourcetype>:<name>
- tags = client.list_tags_for_resource(
- ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
- c['Tags'] = tags['TagList']
-
- if self.ec2_instance_filters:
- for filters in self.ec2_instance_filters:
- for filter_key, filter_values in filters.items():
- # get AWS tag key e.g. tag:env will be 'env'
- tag_name = filter_key.split(":", 1)[1]
- # Filter values is a list (if you put multiple values for the same tag name)
- matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
-
- if matches_filter:
- # it matches a filter, so stop looking for further matches
- break
-
- if matches_filter:
- break
-
- except Exception as e:
- if e.message.find('DBInstanceNotFound') >= 0:
- # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
- # Ignore errors when trying to find tags for these
- pass
-
- # ignore empty clusters caused by AWS bug
- if len(c['DBClusterMembers']) == 0:
- continue
- elif matches_filter:
- c_dict[c['DBClusterIdentifier']] = c
-
- self.inventory['db_clusters'] = c_dict
-
- def get_elasticache_clusters_by_region(self, region):
- ''' Makes an AWS API call to the list of ElastiCache clusters (with
- nodes' info) in a particular region.'''
-
- # ElastiCache boto module doesn't provide a get_all_instances method,
- # that's why we need to call describe directly (it would be called by
- # the shorthand method anyway...)
- clusters = []
- try:
- conn = self.connect_to_aws(elasticache, region)
- if conn:
- # show_cache_node_info = True
- # because we also want nodes' information
- _marker = 1
- while _marker:
- if _marker == 1:
- _marker = None
- response = conn.describe_cache_clusters(None, None, _marker, True)
- _marker = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['Marker']
- try:
- # Boto also doesn't provide wrapper classes to CacheClusters or
- # CacheNodes. Because of that we can't make use of the get_list
- # method in the AWSQueryConnection. Let's do the work manually
- clusters = clusters + response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
- except KeyError as e:
- error = "ElastiCache query to AWS failed (unexpected format)."
- self.fail_with_error(error, 'getting ElastiCache clusters')
- except boto.exception.BotoServerError as e:
- error = e.reason
-
- if e.error_code == 'AuthFailure':
- error = self.get_auth_error_message()
- elif e.error_code == "OptInRequired":
- error = "ElastiCache hasn't been enabled for this account yet. " \
- "You must either log in to the ElastiCache service through the AWS console to enable it, " \
- "or set 'elasticache = False' in ec2.ini"
- elif not e.reason == "Forbidden":
- error = "Looks like AWS ElastiCache is down:\n%s" % e.message
- self.fail_with_error(error, 'getting ElastiCache clusters')
-
- for cluster in clusters:
- self.add_elasticache_cluster(cluster, region)
-
- def get_elasticache_replication_groups_by_region(self, region):
- ''' Makes an AWS API call to the list of ElastiCache replication groups
- in a particular region.'''
-
- # ElastiCache boto module doesn't provide a get_all_instances method,
- # that's why we need to call describe directly (it would be called by
- # the shorthand method anyway...)
- try:
- conn = self.connect_to_aws(elasticache, region)
- if conn:
- response = conn.describe_replication_groups()
-
- except boto.exception.BotoServerError as e:
- error = e.reason
-
- if e.error_code == 'AuthFailure':
- error = self.get_auth_error_message()
- if not e.reason == "Forbidden":
- error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
- self.fail_with_error(error, 'getting ElastiCache clusters')
-
- try:
- # Boto also doesn't provide wrapper classes to ReplicationGroups
- # Because of that we can't make use of the get_list method in the
- # AWSQueryConnection. Let's do the work manually
- replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
-
- except KeyError as e:
- error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
- self.fail_with_error(error, 'getting ElastiCache clusters')
-
- for replication_group in replication_groups:
- self.add_elasticache_replication_group(replication_group, region)
-
- def get_auth_error_message(self):
- ''' create an informative error message if there is an issue authenticating'''
- errors = ["Authentication error retrieving ec2 inventory."]
- if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
- errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
- else:
- errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
-
- boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
- boto_config_found = [p for p in boto_paths if os.path.isfile(os.path.expanduser(p))]
- if len(boto_config_found) > 0:
- errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
- else:
- errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
-
- return '\n'.join(errors)
-
- def fail_with_error(self, err_msg, err_operation=None):
- '''log an error to std err for ansible-playbook to consume and exit'''
- if err_operation:
- err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
- err_msg=err_msg, err_operation=err_operation)
- sys.stderr.write(err_msg)
- sys.exit(1)
-
- def get_instance(self, region, instance_id):
- conn = self.connect(region)
-
- reservations = conn.get_all_instances([instance_id])
- for reservation in reservations:
- for instance in reservation.instances:
- return instance
-
- def add_instance(self, instance, region):
- ''' Adds an instance to the inventory and index, as long as it is
- addressable '''
-
- # Only return instances with desired instance states
- if instance.state not in self.ec2_instance_states:
- return
-
- # Select the best destination address
- # When destination_format and destination_format_tags are specified
- # the following code will attempt to find the instance tags first,
- # then the instance attributes next, and finally if neither are found
- # assign nil for the desired destination format attribute.
- if self.destination_format and self.destination_format_tags:
- dest_vars = []
- inst_tags = getattr(instance, 'tags')
- for tag in self.destination_format_tags:
- if tag in inst_tags:
- dest_vars.append(inst_tags[tag])
- elif hasattr(instance, tag):
- dest_vars.append(getattr(instance, tag))
- else:
- dest_vars.append('nil')
-
- dest = self.destination_format.format(*dest_vars)
- elif instance.subnet_id:
- dest = getattr(instance, self.vpc_destination_variable, None)
- if dest is None:
- dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
- else:
- dest = getattr(instance, self.destination_variable, None)
- if dest is None:
- dest = getattr(instance, 'tags').get(self.destination_variable, None)
-
- if not dest:
- # Skip instances we cannot address (e.g. private VPC subnet)
- return
-
- # Set the inventory name
- hostname = None
- if self.hostname_variable:
- if self.hostname_variable.startswith('tag_'):
- hostname = instance.tags.get(self.hostname_variable[4:], None)
- else:
- hostname = getattr(instance, self.hostname_variable)
-
- # set the hostname from route53
- if self.route53_enabled and self.route53_hostnames:
- route53_names = self.get_instance_route53_names(instance)
- for name in route53_names:
- if name.endswith(self.route53_hostnames):
- hostname = name
-
- # If we can't get a nice hostname, use the destination address
- if not hostname:
- hostname = dest
- # to_safe strips hostname characters like dots, so don't strip route53 hostnames
- elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames):
- hostname = hostname.lower()
- else:
- hostname = self.to_safe(hostname).lower()
-
- # if we only want to include hosts that match a pattern, skip those that don't
- if self.pattern_include and not self.pattern_include.match(hostname):
- return
-
- # if we need to exclude hosts that match a pattern, skip those
- if self.pattern_exclude and self.pattern_exclude.match(hostname):
- return
-
- # Add to index
- self.index[hostname] = [region, instance.id]
-
- # Inventory: Group by instance ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[instance.id] = [hostname]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', instance.id)
-
- # Inventory: Group by region
- if self.group_by_region:
- self.push(self.inventory, region, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone
- if self.group_by_availability_zone:
- self.push(self.inventory, instance.placement, hostname)
- if self.nested_groups:
- if self.group_by_region:
- self.push_group(self.inventory, region, instance.placement)
- self.push_group(self.inventory, 'zones', instance.placement)
-
- # Inventory: Group by Amazon Machine Image (AMI) ID
- if self.group_by_ami_id:
- ami_id = self.to_safe(instance.image_id)
- self.push(self.inventory, ami_id, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'images', ami_id)
-
- # Inventory: Group by instance type
- if self.group_by_instance_type:
- type_name = self.to_safe('type_' + instance.instance_type)
- self.push(self.inventory, type_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'types', type_name)
-
- # Inventory: Group by instance state
- if self.group_by_instance_state:
- state_name = self.to_safe('instance_state_' + instance.state)
- self.push(self.inventory, state_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'instance_states', state_name)
-
- # Inventory: Group by platform
- if self.group_by_platform:
- if instance.platform:
- platform = self.to_safe('platform_' + instance.platform)
- else:
- platform = self.to_safe('platform_undefined')
- self.push(self.inventory, platform, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'platforms', platform)
-
- # Inventory: Group by key pair
- if self.group_by_key_pair and instance.key_name:
- key_name = self.to_safe('key_' + instance.key_name)
- self.push(self.inventory, key_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'keys', key_name)
-
- # Inventory: Group by VPC
- if self.group_by_vpc_id and instance.vpc_id:
- vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
- self.push(self.inventory, vpc_id_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'vpcs', vpc_id_name)
-
- # Inventory: Group by security group
- if self.group_by_security_group:
- try:
- for group in instance.groups:
- key = self.to_safe("security_group_" + group.name)
- self.push(self.inventory, key, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'security_groups', key)
- except AttributeError:
- self.fail_with_error('\n'.join(['Package boto seems a bit older.',
- 'Please upgrade boto >= 2.3.0.']))
-
- # Inventory: Group by AWS account ID
- if self.group_by_aws_account:
- self.push(self.inventory, self.aws_account_id, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'accounts', self.aws_account_id)
-
- # Inventory: Group by tag keys
- if self.group_by_tag_keys:
- for k, v in instance.tags.items():
- if self.expand_csv_tags and v and ',' in v:
- values = map(lambda x: x.strip(), v.split(','))
- else:
- values = [v]
-
- for v in values:
- if v:
- key = self.to_safe("tag_" + k + "=" + v)
- else:
- key = self.to_safe("tag_" + k)
- self.push(self.inventory, key, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
- if v:
- self.push_group(self.inventory, self.to_safe("tag_" + k), key)
-
- # Inventory: Group by Route53 domain names if enabled
- if self.route53_enabled and self.group_by_route53_names:
- route53_names = self.get_instance_route53_names(instance)
- for name in route53_names:
- self.push(self.inventory, name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'route53', name)
-
- # Global Tag: instances without tags
- if self.group_by_tag_none and len(instance.tags) == 0:
- self.push(self.inventory, 'tag_none', hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', 'tag_none')
-
- # Global Tag: tag all EC2 instances
- self.push(self.inventory, 'ec2', hostname)
-
- self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
- self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest
-
- def add_rds_instance(self, instance, region):
- ''' Adds an RDS instance to the inventory and index, as long as it is
- addressable '''
-
- # Only want available instances unless all_rds_instances is True
- if not self.all_rds_instances and instance.status != 'available':
- return
-
- # Select the best destination address
- dest = instance.endpoint[0]
-
- if not dest:
- # Skip instances we cannot address (e.g. private VPC subnet)
- return
-
- # Set the inventory name
- hostname = None
- if self.hostname_variable:
- if self.hostname_variable.startswith('tag_'):
- hostname = instance.tags.get(self.hostname_variable[4:], None)
- else:
- hostname = getattr(instance, self.hostname_variable)
-
- # If we can't get a nice hostname, use the destination address
- if not hostname:
- hostname = dest
-
- hostname = self.to_safe(hostname).lower()
-
- # Add to index
- self.index[hostname] = [region, instance.id]
-
- # Inventory: Group by instance ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[instance.id] = [hostname]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', instance.id)
-
- # Inventory: Group by region
- if self.group_by_region:
- self.push(self.inventory, region, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone
- if self.group_by_availability_zone:
- self.push(self.inventory, instance.availability_zone, hostname)
- if self.nested_groups:
- if self.group_by_region:
- self.push_group(self.inventory, region, instance.availability_zone)
- self.push_group(self.inventory, 'zones', instance.availability_zone)
-
- # Inventory: Group by instance type
- if self.group_by_instance_type:
- type_name = self.to_safe('type_' + instance.instance_class)
- self.push(self.inventory, type_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'types', type_name)
-
- # Inventory: Group by VPC
- if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
- vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
- self.push(self.inventory, vpc_id_name, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'vpcs', vpc_id_name)
-
- # Inventory: Group by security group
- if self.group_by_security_group:
- try:
- if instance.security_group:
- key = self.to_safe("security_group_" + instance.security_group.name)
- self.push(self.inventory, key, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'security_groups', key)
-
- except AttributeError:
- self.fail_with_error('\n'.join(['Package boto seems a bit older.',
- 'Please upgrade boto >= 2.3.0.']))
- # Inventory: Group by tag keys
- if self.group_by_tag_keys:
- for k, v in instance.tags.items():
- if self.expand_csv_tags and v and ',' in v:
- values = map(lambda x: x.strip(), v.split(','))
- else:
- values = [v]
-
- for v in values:
- if v:
- key = self.to_safe("tag_" + k + "=" + v)
- else:
- key = self.to_safe("tag_" + k)
- self.push(self.inventory, key, hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
- if v:
- self.push_group(self.inventory, self.to_safe("tag_" + k), key)
-
- # Inventory: Group by engine
- if self.group_by_rds_engine:
- self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
-
- # Inventory: Group by parameter group
- if self.group_by_rds_parameter_group:
- self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
-
- # Global Tag: instances without tags
- if self.group_by_tag_none and len(instance.tags) == 0:
- self.push(self.inventory, 'tag_none', hostname)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', 'tag_none')
-
- # Global Tag: all RDS instances
- self.push(self.inventory, 'rds', hostname)
-
- self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
- self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest
-
- def add_elasticache_cluster(self, cluster, region):
- ''' Adds an ElastiCache cluster to the inventory and index, as long as
- it's nodes are addressable '''
-
- # Only want available clusters unless all_elasticache_clusters is True
- if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
- return
-
- # Select the best destination address
- if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
- # Memcached cluster
- dest = cluster['ConfigurationEndpoint']['Address']
- is_redis = False
- else:
- # Redis sigle node cluster
- # Because all Redis clusters are single nodes, we'll merge the
- # info from the cluster with info about the node
- dest = cluster['CacheNodes'][0]['Endpoint']['Address']
- is_redis = True
-
- if not dest:
- # Skip clusters we cannot address (e.g. private VPC subnet)
- return
-
- # Add to index
- self.index[dest] = [region, cluster['CacheClusterId']]
-
- # Inventory: Group by instance ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[cluster['CacheClusterId']] = [dest]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
-
- # Inventory: Group by region
- if self.group_by_region and not is_redis:
- self.push(self.inventory, region, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone
- if self.group_by_availability_zone and not is_redis:
- self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
- if self.nested_groups:
- if self.group_by_region:
- self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
- self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
-
- # Inventory: Group by node type
- if self.group_by_instance_type and not is_redis:
- type_name = self.to_safe('type_' + cluster['CacheNodeType'])
- self.push(self.inventory, type_name, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'types', type_name)
-
- # Inventory: Group by VPC (information not available in the current
- # AWS API version for ElastiCache)
-
- # Inventory: Group by security group
- if self.group_by_security_group and not is_redis:
-
- # Check for the existence of the 'SecurityGroups' key and also if
- # this key has some value. When the cluster is not placed in a SG
- # the query can return None here and cause an error.
- if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
- for security_group in cluster['SecurityGroups']:
- key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
- self.push(self.inventory, key, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'security_groups', key)
-
- # Inventory: Group by engine
- if self.group_by_elasticache_engine and not is_redis:
- self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
-
- # Inventory: Group by parameter group
- if self.group_by_elasticache_parameter_group:
- self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
-
- # Inventory: Group by replication group
- if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
- self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
-
- # Global Tag: all ElastiCache clusters
- self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
-
- host_info = self.get_host_info_dict_from_describe_dict(cluster)
-
- self.inventory["_meta"]["hostvars"][dest] = host_info
-
- # Add the nodes
- for node in cluster['CacheNodes']:
- self.add_elasticache_node(node, cluster, region)
-
- def add_elasticache_node(self, node, cluster, region):
- ''' Adds an ElastiCache node to the inventory and index, as long as
- it is addressable '''
-
- # Only want available nodes unless all_elasticache_nodes is True
- if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
- return
-
- # Select the best destination address
- dest = node['Endpoint']['Address']
-
- if not dest:
- # Skip nodes we cannot address (e.g. private VPC subnet)
- return
-
- node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
-
- # Add to index
- self.index[dest] = [region, node_id]
-
- # Inventory: Group by node ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[node_id] = [dest]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', node_id)
-
- # Inventory: Group by region
- if self.group_by_region:
- self.push(self.inventory, region, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone
- if self.group_by_availability_zone:
- self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
- if self.nested_groups:
- if self.group_by_region:
- self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
- self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
-
- # Inventory: Group by node type
- if self.group_by_instance_type:
- type_name = self.to_safe('type_' + cluster['CacheNodeType'])
- self.push(self.inventory, type_name, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'types', type_name)
-
- # Inventory: Group by VPC (information not available in the current
- # AWS API version for ElastiCache)
-
- # Inventory: Group by security group
- if self.group_by_security_group:
-
- # Check for the existence of the 'SecurityGroups' key and also if
- # this key has some value. When the cluster is not placed in a SG
- # the query can return None here and cause an error.
- if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
- for security_group in cluster['SecurityGroups']:
- key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
- self.push(self.inventory, key, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'security_groups', key)
-
- # Inventory: Group by engine
- if self.group_by_elasticache_engine:
- self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
-
- # Inventory: Group by parameter group (done at cluster level)
-
- # Inventory: Group by replication group (done at cluster level)
-
- # Inventory: Group by ElastiCache Cluster
- if self.group_by_elasticache_cluster:
- self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
-
- # Global Tag: all ElastiCache nodes
- self.push(self.inventory, 'elasticache_nodes', dest)
-
- host_info = self.get_host_info_dict_from_describe_dict(node)
-
- if dest in self.inventory["_meta"]["hostvars"]:
- self.inventory["_meta"]["hostvars"][dest].update(host_info)
- else:
- self.inventory["_meta"]["hostvars"][dest] = host_info
-
- def add_elasticache_replication_group(self, replication_group, region):
- ''' Adds an ElastiCache replication group to the inventory and index '''
-
- # Only want available clusters unless all_elasticache_replication_groups is True
- if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
- return
-
- # Skip clusters we cannot address (e.g. private VPC subnet or clustered redis)
- if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \
- replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None:
- return
-
- # Select the best destination address (PrimaryEndpoint)
- dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
-
- # Add to index
- self.index[dest] = [region, replication_group['ReplicationGroupId']]
-
- # Inventory: Group by ID (always a group of 1)
- if self.group_by_instance_id:
- self.inventory[replication_group['ReplicationGroupId']] = [dest]
- if self.nested_groups:
- self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
-
- # Inventory: Group by region
- if self.group_by_region:
- self.push(self.inventory, region, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'regions', region)
-
- # Inventory: Group by availability zone (doesn't apply to replication groups)
-
- # Inventory: Group by node type (doesn't apply to replication groups)
-
- # Inventory: Group by VPC (information not available in the current
- # AWS API version for replication groups
-
- # Inventory: Group by security group (doesn't apply to replication groups)
- # Check this value in cluster level
-
- # Inventory: Group by engine (replication groups are always Redis)
- if self.group_by_elasticache_engine:
- self.push(self.inventory, 'elasticache_redis', dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'elasticache_engines', 'redis')
-
- # Global Tag: all ElastiCache clusters
- self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
-
- host_info = self.get_host_info_dict_from_describe_dict(replication_group)
-
- self.inventory["_meta"]["hostvars"][dest] = host_info
-
- def get_route53_records(self):
- ''' Get and store the map of resource records to domain names that
- point to them. '''
-
- if self.boto_profile:
- r53_conn = route53.Route53Connection(profile_name=self.boto_profile)
- else:
- r53_conn = route53.Route53Connection()
- all_zones = r53_conn.get_zones()
-
- route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones]
-
- self.route53_records = {}
-
- for zone in route53_zones:
- rrsets = r53_conn.get_all_rrsets(zone.id)
-
- for record_set in rrsets:
- record_name = record_set.name
-
- if record_name.endswith('.'):
- record_name = record_name[:-1]
-
- for resource in record_set.resource_records:
- self.route53_records.setdefault(resource, set())
- self.route53_records[resource].add(record_name)
-
- def get_instance_route53_names(self, instance):
- ''' Check if an instance is referenced in the records we have from
- Route53. If it is, return the list of domain names pointing to said
- instance. If nothing points to it, return an empty list. '''
-
- instance_attributes = ['public_dns_name', 'private_dns_name',
- 'ip_address', 'private_ip_address']
-
- name_list = set()
-
- for attrib in instance_attributes:
- try:
- value = getattr(instance, attrib)
- except AttributeError:
- continue
-
- if value in self.route53_records:
- name_list.update(self.route53_records[value])
-
- return list(name_list)
-
- def get_host_info_dict_from_instance(self, instance):
- instance_vars = {}
- for key in vars(instance):
- value = getattr(instance, key)
- key = self.to_safe('ec2_' + key)
-
- # Handle complex types
- # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
- if key == 'ec2__state':
- instance_vars['ec2_state'] = instance.state or ''
- instance_vars['ec2_state_code'] = instance.state_code
- elif key == 'ec2__previous_state':
- instance_vars['ec2_previous_state'] = instance.previous_state or ''
- instance_vars['ec2_previous_state_code'] = instance.previous_state_code
- elif isinstance(value, (int, bool)):
- instance_vars[key] = value
- elif isinstance(value, six.string_types):
- instance_vars[key] = value.strip()
- elif value is None:
- instance_vars[key] = ''
- elif key == 'ec2_region':
- instance_vars[key] = value.name
- elif key == 'ec2__placement':
- instance_vars['ec2_placement'] = value.zone
- elif key == 'ec2_tags':
- for k, v in value.items():
- if self.expand_csv_tags and ',' in v:
- v = list(map(lambda x: x.strip(), v.split(',')))
- key = self.to_safe('ec2_tag_' + k)
- instance_vars[key] = v
- elif key == 'ec2_groups':
- group_ids = []
- group_names = []
- for group in value:
- group_ids.append(group.id)
- group_names.append(group.name)
- instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
- instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
- elif key == 'ec2_block_device_mapping':
- instance_vars["ec2_block_devices"] = {}
- for k, v in value.items():
- instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id
- else:
- pass
- # TODO Product codes if someone finds them useful
- # print key
- # print type(value)
- # print value
-
- instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id
-
- return instance_vars
-
- def get_host_info_dict_from_describe_dict(self, describe_dict):
- ''' Parses the dictionary returned by the API call into a flat list
- of parameters. This method should be used only when 'describe' is
- used directly because Boto doesn't provide specific classes. '''
-
- # I really don't agree with prefixing everything with 'ec2'
- # because EC2, RDS and ElastiCache are different services.
- # I'm just following the pattern used until now to not break any
- # compatibility.
-
- host_info = {}
- for key in describe_dict:
- value = describe_dict[key]
- key = self.to_safe('ec2_' + self.uncammelize(key))
-
- # Handle complex types
-
- # Target: Memcached Cache Clusters
- if key == 'ec2_configuration_endpoint' and value:
- host_info['ec2_configuration_endpoint_address'] = value['Address']
- host_info['ec2_configuration_endpoint_port'] = value['Port']
-
- # Target: Cache Nodes and Redis Cache Clusters (single node)
- if key == 'ec2_endpoint' and value:
- host_info['ec2_endpoint_address'] = value['Address']
- host_info['ec2_endpoint_port'] = value['Port']
-
- # Target: Redis Replication Groups
- if key == 'ec2_node_groups' and value:
- host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
- host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
- replica_count = 0
- for node in value[0]['NodeGroupMembers']:
- if node['CurrentRole'] == 'primary':
- host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
- host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
- host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
- elif node['CurrentRole'] == 'replica':
- host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address']
- host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port']
- host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId']
- replica_count += 1
-
- # Target: Redis Replication Groups
- if key == 'ec2_member_clusters' and value:
- host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
-
- # Target: All Cache Clusters
- elif key == 'ec2_cache_parameter_group':
- host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
- host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
- host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
-
- # Target: Almost everything
- elif key == 'ec2_security_groups':
-
- # Skip if SecurityGroups is None
- # (it is possible to have the key defined but no value in it).
- if value is not None:
- sg_ids = []
- for sg in value:
- sg_ids.append(sg['SecurityGroupId'])
- host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
-
- # Target: Everything
- # Preserve booleans and integers
- elif isinstance(value, (int, bool)):
- host_info[key] = value
-
- # Target: Everything
- # Sanitize string values
- elif isinstance(value, six.string_types):
- host_info[key] = value.strip()
-
- # Target: Everything
- # Replace None by an empty string
- elif value is None:
- host_info[key] = ''
-
- else:
- # Remove non-processed complex types
- pass
-
- return host_info
-
- def get_host_info(self):
- ''' Get variables about a specific host '''
-
- if len(self.index) == 0:
- # Need to load index from cache
- self.load_index_from_cache()
-
- if self.args.host not in self.index:
- # try updating the cache
- self.do_api_calls_update_cache()
- if self.args.host not in self.index:
- # host might not exist anymore
- return self.json_format_dict({}, True)
-
- (region, instance_id) = self.index[self.args.host]
-
- instance = self.get_instance(region, instance_id)
- return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
-
- def push(self, my_dict, key, element):
- ''' Push an element onto an array that may not have been defined in
- the dict '''
- group_info = my_dict.setdefault(key, [])
- if isinstance(group_info, dict):
- host_list = group_info.setdefault('hosts', [])
- host_list.append(element)
- else:
- group_info.append(element)
-
- def push_group(self, my_dict, key, element):
- ''' Push a group as a child of another group. '''
- parent_group = my_dict.setdefault(key, {})
- if not isinstance(parent_group, dict):
- parent_group = my_dict[key] = {'hosts': parent_group}
- child_groups = parent_group.setdefault('children', [])
- if element not in child_groups:
- child_groups.append(element)
-
- def get_inventory_from_cache(self):
- ''' Reads the inventory from the cache file and returns it as a JSON
- object '''
-
- with open(self.cache_path_cache, 'r') as f:
- json_inventory = f.read()
- return json_inventory
-
- def load_index_from_cache(self):
- ''' Reads the index from the cache file sets self.index '''
-
- with open(self.cache_path_index, 'rb') as f:
- self.index = json.load(f)
-
- def write_to_cache(self, data, filename):
- ''' Writes data in JSON format to a file '''
-
- json_data = self.json_format_dict(data, True)
- with open(filename, 'w') as f:
- f.write(json_data)
-
- def uncammelize(self, key):
- temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
- return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
-
- def to_safe(self, word):
- ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
- regex = r"[^A-Za-z0-9\_"
- if not self.replace_dash_in_groups:
- regex += r"\-"
- return re.sub(regex + "]", "_", word)
-
- def json_format_dict(self, data, pretty=False):
- ''' Converts a dict to a JSON object and dumps it as a formatted
- string '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2, default=self._json_serial)
- else:
- return json.dumps(data, default=self._json_serial)
-
-
-if __name__ == '__main__':
- # Run the script
- Ec2Inventory()
diff --git a/lib/ansible/modules/cloud/amazon/_aws_acm_facts.py b/lib/ansible/modules/cloud/amazon/_aws_acm_facts.py
deleted file mode 120000
index 42dbcf0df9..0000000000
--- a/lib/ansible/modules/cloud/amazon/_aws_acm_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-aws_acm_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_aws_kms_facts.py b/lib/ansible/modules/cloud/amazon/_aws_kms_facts.py
deleted file mode 120000
index ccd052f519..0000000000
--- a/lib/ansible/modules/cloud/amazon/_aws_kms_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-aws_kms_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_aws_region_facts.py b/lib/ansible/modules/cloud/amazon/_aws_region_facts.py
deleted file mode 120000
index 03b0d29932..0000000000
--- a/lib/ansible/modules/cloud/amazon/_aws_region_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-aws_region_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_aws_s3_bucket_facts.py b/lib/ansible/modules/cloud/amazon/_aws_s3_bucket_facts.py
deleted file mode 120000
index 88f68b437a..0000000000
--- a/lib/ansible/modules/cloud/amazon/_aws_s3_bucket_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-aws_s3_bucket_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_aws_sgw_facts.py b/lib/ansible/modules/cloud/amazon/_aws_sgw_facts.py
deleted file mode 120000
index 0af0560a3b..0000000000
--- a/lib/ansible/modules/cloud/amazon/_aws_sgw_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-aws_sgw_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_aws_waf_facts.py b/lib/ansible/modules/cloud/amazon/_aws_waf_facts.py
deleted file mode 120000
index 3fd538387a..0000000000
--- a/lib/ansible/modules/cloud/amazon/_aws_waf_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-aws_waf_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_cloudfront_facts.py b/lib/ansible/modules/cloud/amazon/_cloudfront_facts.py
deleted file mode 120000
index 700056e714..0000000000
--- a/lib/ansible/modules/cloud/amazon/_cloudfront_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-cloudfront_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_cloudwatchlogs_log_group_facts.py b/lib/ansible/modules/cloud/amazon/_cloudwatchlogs_log_group_facts.py
deleted file mode 120000
index 402937478a..0000000000
--- a/lib/ansible/modules/cloud/amazon/_cloudwatchlogs_log_group_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-cloudwatchlogs_log_group_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_asg_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_asg_facts.py
deleted file mode 120000
index 88ec952458..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_asg_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_asg_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_customer_gateway_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_customer_gateway_facts.py
deleted file mode 120000
index 2e1aec0aba..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_customer_gateway_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_customer_gateway_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_eip_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_eip_facts.py
deleted file mode 120000
index 0ba519697b..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_eip_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_eip_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_elb_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_elb_facts.py
deleted file mode 120000
index a029c6d0b0..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_elb_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_elb_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_instance_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_instance_facts.py
deleted file mode 120000
index 7010fdcb95..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_instance_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_instance_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_lc_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_lc_facts.py
deleted file mode 120000
index cb62597c07..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_lc_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_lc_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_placement_group_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_placement_group_facts.py
deleted file mode 120000
index 7d33ef0167..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_placement_group_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_placement_group_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc_endpoint_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc_endpoint_facts.py
deleted file mode 120000
index d2a144a7b8..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc_endpoint_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_vpc_endpoint_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc_igw_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc_igw_facts.py
deleted file mode 120000
index b3eeb3fee6..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc_igw_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_vpc_igw_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc_nacl_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc_nacl_facts.py
deleted file mode 120000
index a88962d88f..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc_nacl_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_vpc_nacl_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc_nat_gateway_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc_nat_gateway_facts.py
deleted file mode 120000
index fd96998997..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc_nat_gateway_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_vpc_nat_gateway_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc_peering_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc_peering_facts.py
deleted file mode 120000
index 074baf65a0..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc_peering_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_vpc_peering_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc_route_table_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc_route_table_facts.py
deleted file mode 120000
index ed0f72a1aa..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc_route_table_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_vpc_route_table_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc_vgw_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc_vgw_facts.py
deleted file mode 120000
index bbcf44bef4..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc_vgw_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_vpc_vgw_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc_vpn_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc_vpn_facts.py
deleted file mode 120000
index 671a1a3034..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc_vpn_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ec2_vpc_vpn_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ecs_service_facts.py b/lib/ansible/modules/cloud/amazon/_ecs_service_facts.py
deleted file mode 120000
index fead2dab76..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ecs_service_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ecs_service_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_ecs_taskdefinition_facts.py b/lib/ansible/modules/cloud/amazon/_ecs_taskdefinition_facts.py
deleted file mode 120000
index 0eb6f10b8f..0000000000
--- a/lib/ansible/modules/cloud/amazon/_ecs_taskdefinition_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-ecs_taskdefinition_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_efs_facts.py b/lib/ansible/modules/cloud/amazon/_efs_facts.py
deleted file mode 120000
index 781c362da4..0000000000
--- a/lib/ansible/modules/cloud/amazon/_efs_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-efs_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_elasticache_facts.py b/lib/ansible/modules/cloud/amazon/_elasticache_facts.py
deleted file mode 120000
index d6cd32eb0c..0000000000
--- a/lib/ansible/modules/cloud/amazon/_elasticache_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-elasticache_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_elb_application_lb_facts.py b/lib/ansible/modules/cloud/amazon/_elb_application_lb_facts.py
deleted file mode 120000
index c5ee0eaca8..0000000000
--- a/lib/ansible/modules/cloud/amazon/_elb_application_lb_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-elb_application_lb_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_elb_classic_lb_facts.py b/lib/ansible/modules/cloud/amazon/_elb_classic_lb_facts.py
deleted file mode 120000
index d182d5e144..0000000000
--- a/lib/ansible/modules/cloud/amazon/_elb_classic_lb_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-elb_classic_lb_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_elb_target_facts.py b/lib/ansible/modules/cloud/amazon/_elb_target_facts.py
deleted file mode 120000
index 897c23897d..0000000000
--- a/lib/ansible/modules/cloud/amazon/_elb_target_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-elb_target_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_elb_target_group_facts.py b/lib/ansible/modules/cloud/amazon/_elb_target_group_facts.py
deleted file mode 120000
index 3abd2ee5a6..0000000000
--- a/lib/ansible/modules/cloud/amazon/_elb_target_group_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-elb_target_group_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_iam_cert_facts.py b/lib/ansible/modules/cloud/amazon/_iam_cert_facts.py
deleted file mode 120000
index 63244caa58..0000000000
--- a/lib/ansible/modules/cloud/amazon/_iam_cert_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-iam_server_certificate_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_iam_mfa_device_facts.py b/lib/ansible/modules/cloud/amazon/_iam_mfa_device_facts.py
deleted file mode 120000
index 63be2b059f..0000000000
--- a/lib/ansible/modules/cloud/amazon/_iam_mfa_device_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-iam_mfa_device_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_iam_role_facts.py b/lib/ansible/modules/cloud/amazon/_iam_role_facts.py
deleted file mode 120000
index e15c454b71..0000000000
--- a/lib/ansible/modules/cloud/amazon/_iam_role_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-iam_role_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_iam_server_certificate_facts.py b/lib/ansible/modules/cloud/amazon/_iam_server_certificate_facts.py
deleted file mode 120000
index 63244caa58..0000000000
--- a/lib/ansible/modules/cloud/amazon/_iam_server_certificate_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-iam_server_certificate_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_lambda_facts.py b/lib/ansible/modules/cloud/amazon/_lambda_facts.py
deleted file mode 100644
index f332c2d9be..0000000000
--- a/lib/ansible/modules/cloud/amazon/_lambda_facts.py
+++ /dev/null
@@ -1,389 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['deprecated'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: lambda_facts
-deprecated:
- removed_in: '2.13'
- why: Deprecated in favour of C(_info) module.
- alternative: Use M(lambda_info) instead.
-short_description: Gathers AWS Lambda function details as Ansible facts
-description:
- - Gathers various details related to Lambda functions, including aliases, versions and event source mappings.
- Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases and
- M(lambda_event) to manage lambda event source mappings.
-
-version_added: "2.2"
-
-options:
- query:
- description:
- - Specifies the resource type for which to gather facts. Leave blank to retrieve all facts.
- choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ]
- default: "all"
- type: str
- function_name:
- description:
- - The name of the lambda function for which facts are requested.
- aliases: [ "function", "name"]
- type: str
- event_source_arn:
- description:
- - For query type 'mappings', this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream.
- type: str
-author: Pierre Jodouin (@pjodouin)
-requirements:
- - boto3
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
----
-# Simple example of listing all info for a function
-- name: List all for a specific function
- lambda_facts:
- query: all
- function_name: myFunction
- register: my_function_details
-# List all versions of a function
-- name: List function versions
- lambda_facts:
- query: versions
- function_name: myFunction
- register: my_function_versions
-# List all lambda function versions
-- name: List all function
- lambda_facts:
- query: all
- max_items: 20
-- name: show Lambda facts
- debug:
- var: lambda_facts
-'''
-
-RETURN = '''
----
-lambda_facts:
- description: lambda facts
- returned: success
- type: dict
-lambda_facts.function:
- description: lambda function list
- returned: success
- type: dict
-lambda_facts.function.TheName:
- description: lambda function information, including event, mapping, and version information
- returned: success
- type: dict
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-import json
-import datetime
-import sys
-import re
-
-
-try:
- from botocore.exceptions import ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def fix_return(node):
- """
- fixup returned dictionary
-
- :param node:
- :return:
- """
-
- if isinstance(node, datetime.datetime):
- node_value = str(node)
-
- elif isinstance(node, list):
- node_value = [fix_return(item) for item in node]
-
- elif isinstance(node, dict):
- node_value = dict([(item, fix_return(node[item])) for item in node.keys()])
-
- else:
- node_value = node
-
- return node_value
-
-
-def alias_details(client, module):
- """
- Returns list of aliases for a specified function.
-
- :param client: AWS API client reference (boto3)
- :param module: Ansible module reference
- :return dict:
- """
-
- lambda_facts = dict()
-
- function_name = module.params.get('function_name')
- if function_name:
- params = dict()
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- if module.params.get('next_marker'):
- params['Marker'] = module.params.get('next_marker')
- try:
- lambda_facts.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases'])
- except ClientError as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- lambda_facts.update(aliases=[])
- else:
- module.fail_json_aws(e, msg="Trying to get aliases")
- else:
- module.fail_json(msg='Parameter function_name required for query=aliases.')
-
- return {function_name: camel_dict_to_snake_dict(lambda_facts)}
-
-
-def all_details(client, module):
- """
- Returns all lambda related facts.
-
- :param client: AWS API client reference (boto3)
- :param module: Ansible module reference
- :return dict:
- """
-
- if module.params.get('max_items') or module.params.get('next_marker'):
- module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.')
-
- lambda_facts = dict()
-
- function_name = module.params.get('function_name')
- if function_name:
- lambda_facts[function_name] = {}
- lambda_facts[function_name].update(config_details(client, module)[function_name])
- lambda_facts[function_name].update(alias_details(client, module)[function_name])
- lambda_facts[function_name].update(policy_details(client, module)[function_name])
- lambda_facts[function_name].update(version_details(client, module)[function_name])
- lambda_facts[function_name].update(mapping_details(client, module)[function_name])
- else:
- lambda_facts.update(config_details(client, module))
-
- return lambda_facts
-
-
-def config_details(client, module):
- """
- Returns configuration details for one or all lambda functions.
-
- :param client: AWS API client reference (boto3)
- :param module: Ansible module reference
- :return dict:
- """
-
- lambda_facts = dict()
-
- function_name = module.params.get('function_name')
- if function_name:
- try:
- lambda_facts.update(client.get_function_configuration(FunctionName=function_name))
- except ClientError as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- lambda_facts.update(function={})
- else:
- module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name))
- else:
- params = dict()
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- if module.params.get('next_marker'):
- params['Marker'] = module.params.get('next_marker')
-
- try:
- lambda_facts.update(function_list=client.list_functions(**params)['Functions'])
- except ClientError as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- lambda_facts.update(function_list=[])
- else:
- module.fail_json_aws(e, msg="Trying to get function list")
-
- functions = dict()
- for func in lambda_facts.pop('function_list', []):
- functions[func['FunctionName']] = camel_dict_to_snake_dict(func)
- return functions
-
- return {function_name: camel_dict_to_snake_dict(lambda_facts)}
-
-
-def mapping_details(client, module):
- """
- Returns all lambda event source mappings.
-
- :param client: AWS API client reference (boto3)
- :param module: Ansible module reference
- :return dict:
- """
-
- lambda_facts = dict()
- params = dict()
- function_name = module.params.get('function_name')
-
- if function_name:
- params['FunctionName'] = module.params.get('function_name')
-
- if module.params.get('event_source_arn'):
- params['EventSourceArn'] = module.params.get('event_source_arn')
-
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- if module.params.get('next_marker'):
- params['Marker'] = module.params.get('next_marker')
-
- try:
- lambda_facts.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings'])
- except ClientError as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- lambda_facts.update(mappings=[])
- else:
- module.fail_json_aws(e, msg="Trying to get source event mappings")
-
- if function_name:
- return {function_name: camel_dict_to_snake_dict(lambda_facts)}
-
- return camel_dict_to_snake_dict(lambda_facts)
-
-
-def policy_details(client, module):
- """
- Returns policy attached to a lambda function.
-
- :param client: AWS API client reference (boto3)
- :param module: Ansible module reference
- :return dict:
- """
-
- if module.params.get('max_items') or module.params.get('next_marker'):
- module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.')
-
- lambda_facts = dict()
-
- function_name = module.params.get('function_name')
- if function_name:
- try:
- # get_policy returns a JSON string so must convert to dict before reassigning to its key
- lambda_facts.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy']))
- except ClientError as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- lambda_facts.update(policy={})
- else:
- module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name))
- else:
- module.fail_json(msg='Parameter function_name required for query=policy.')
-
- return {function_name: camel_dict_to_snake_dict(lambda_facts)}
-
-
-def version_details(client, module):
- """
- Returns all lambda function versions.
-
- :param client: AWS API client reference (boto3)
- :param module: Ansible module reference
- :return dict:
- """
-
- lambda_facts = dict()
-
- function_name = module.params.get('function_name')
- if function_name:
- params = dict()
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- if module.params.get('next_marker'):
- params['Marker'] = module.params.get('next_marker')
-
- try:
- lambda_facts.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions'])
- except ClientError as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- lambda_facts.update(versions=[])
- else:
- module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name))
- else:
- module.fail_json(msg='Parameter function_name required for query=versions.')
-
- return {function_name: camel_dict_to_snake_dict(lambda_facts)}
-
-
-def main():
- """
- Main entry point.
-
- :return dict: ansible facts
- """
- argument_spec = dict(
- function_name=dict(required=False, default=None, aliases=['function', 'name']),
- query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'),
- event_source_arn=dict(required=False, default=None)
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[],
- required_together=[]
- )
-
- # validate function_name if present
- function_name = module.params['function_name']
- if function_name:
- if not re.search(r"^[\w\-:]+$", function_name):
- module.fail_json(
- msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
- )
- if len(function_name) > 64:
- module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
-
- client = module.client('lambda')
-
- this_module = sys.modules[__name__]
-
- invocations = dict(
- aliases='alias_details',
- all='all_details',
- config='config_details',
- mappings='mapping_details',
- policy='policy_details',
- versions='version_details',
- )
-
- this_module_function = getattr(this_module, invocations[module.params['query']])
- all_facts = fix_return(this_module_function(client, module))
-
- results = dict(ansible_facts={'lambda_facts': {'function': all_facts}}, changed=False)
-
- if module.check_mode:
- results['msg'] = 'Check mode set but ignored for fact gathering only.'
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/_rds_instance_facts.py b/lib/ansible/modules/cloud/amazon/_rds_instance_facts.py
deleted file mode 120000
index f3dda86727..0000000000
--- a/lib/ansible/modules/cloud/amazon/_rds_instance_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-rds_instance_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_rds_snapshot_facts.py b/lib/ansible/modules/cloud/amazon/_rds_snapshot_facts.py
deleted file mode 120000
index 7281d3b696..0000000000
--- a/lib/ansible/modules/cloud/amazon/_rds_snapshot_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-rds_snapshot_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_redshift_facts.py b/lib/ansible/modules/cloud/amazon/_redshift_facts.py
deleted file mode 120000
index 40a774faad..0000000000
--- a/lib/ansible/modules/cloud/amazon/_redshift_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-redshift_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/_route53_facts.py b/lib/ansible/modules/cloud/amazon/_route53_facts.py
deleted file mode 120000
index 6b40f0529b..0000000000
--- a/lib/ansible/modules/cloud/amazon/_route53_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-route53_info.py \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/aws_acm.py b/lib/ansible/modules/cloud/amazon/aws_acm.py
deleted file mode 100644
index 9504b4c078..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_acm.py
+++ /dev/null
@@ -1,397 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2019 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-#
-# Author:
-# - Matthew Davis <Matthew.Davis.2@team.telstra.com>
-# on behalf of Telstra Corporation Limited
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-module: aws_acm
-short_description: Upload and delete certificates in the AWS Certificate Manager service
-description:
- - Import and delete certificates in Amazon Web Service's Certificate Manager (AWS ACM).
- - >
- This module does not currently interact with AWS-provided certificates.
- It currently only manages certificates provided to AWS by the user.
- - The ACM API allows users to upload multiple certificates for the same domain name,
- and even multiple identical certificates.
- This module attempts to restrict such freedoms, to be idempotent, as per the Ansible philosophy.
- It does this through applying AWS resource "Name" tags to ACM certificates.
- - >
- When I(state=present),
- if there is one certificate in ACM
- with a C(Name) tag equal to the C(name_tag) parameter,
- and an identical body and chain,
- this task will succeed without effect.
- - >
- When I(state=present),
- if there is one certificate in ACM
- a I(Name) tag equal to the I(name_tag) parameter,
- and a different body,
- this task will overwrite that certificate.
- - >
- When I(state=present),
- if there are multiple certificates in ACM
- with a I(Name) tag equal to the I(name_tag) parameter,
- this task will fail.
- - >
- When I(state=absent) and I(certificate_arn) is defined,
- this module will delete the ACM resource with that ARN if it exists in this region,
- and succeed without effect if it doesn't exist.
- - >
- When I(state=absent) and I(domain_name) is defined,
- this module will delete all ACM resources in this AWS region with a corresponding domain name.
- If there are none, it will succeed without effect.
- - >
- When I(state=absent) and I(certificate_arn) is not defined,
- and I(domain_name) is not defined,
- this module will delete all ACM resources in this AWS region with a corresponding I(Name) tag.
- If there are none, it will succeed without effect.
- - Note that this may not work properly with keys of size 4096 bits, due to a limitation of the ACM API.
-version_added: "2.10"
-options:
- certificate:
- description:
- - The body of the PEM encoded public certificate.
- - Required when I(state) is not C(absent).
- - If your certificate is in a file, use C(lookup('file', 'path/to/cert.pem')).
- type: str
-
- certificate_arn:
- description:
- - The ARN of a certificate in ACM to delete
- - Ignored when I(state=present).
- - If I(state=absent), you must provide one of I(certificate_arn), I(domain_name) or I(name_tag).
- - >
- If I(state=absent) and no resource exists with this ARN in this region,
- the task will succeed with no effect.
- - >
- If I(state=absent) and the corresponding resource exists in a different region,
- this task may report success without deleting that resource.
- type: str
- aliases: [arn]
-
- certificate_chain:
- description:
- - The body of the PEM encoded chain for your certificate.
- - If your certificate chain is in a file, use C(lookup('file', 'path/to/chain.pem')).
- - Ignored when I(state=absent)
- type: str
-
- domain_name:
- description:
- - The domain name of the certificate.
- - >
- If I(state=absent) and I(domain_name) is specified,
- this task will delete all ACM certificates with this domain.
- - Exactly one of I(domain_name), I(name_tag) and I(certificate_arn) must be provided.
- - >
- If I(state=present) this must not be specified.
- (Since the domain name is encoded within the public certificate's body.)
- type: str
- aliases: [domain]
-
- name_tag:
- description:
- - The unique identifier for tagging resources using AWS tags, with key I(Name).
- - This can be any set of characters accepted by AWS for tag values.
- - >
- This is to ensure Ansible can treat certificates idempotently,
- even though the ACM API allows duplicate certificates.
- - If I(state=preset), this must be specified.
- - >
- If I(state=absent), you must provide exactly one of
- I(certificate_arn), I(domain_name) or I(name_tag).
- type: str
- aliases: [name]
-
- private_key:
- description:
- - The body of the PEM encoded private key.
- - Required when I(state=present).
- - Ignored when I(state=absent).
- - If your private key is in a file, use C(lookup('file', 'path/to/key.pem')).
- type: str
-
- state:
- description:
- - >
- If I(state=present), the specified public certificate and private key
- will be uploaded, with I(Name) tag equal to I(name_tag).
- - >
- If I(state=absent), any certificates in this region
- with a corresponding I(domain_name), I(name_tag) or I(certificate_arn)
- will be deleted.
- choices: [present, absent]
- default: present
- type: str
-requirements:
- - boto3
-author:
- - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-
-- name: upload a self-signed certificate
- aws_acm:
- certificate: "{{ lookup('file', 'cert.pem' ) }}"
- privateKey: "{{ lookup('file', 'key.pem' ) }}"
- name_tag: my_cert # to be applied through an AWS tag as "Name":"my_cert"
- region: ap-southeast-2 # AWS region
-
-- name: create/update a certificate with a chain
- aws_acm:
- certificate: "{{ lookup('file', 'cert.pem' ) }}"
- privateKey: "{{ lookup('file', 'key.pem' ) }}"
- name_tag: my_cert
- certificate_chain: "{{ lookup('file', 'chain.pem' ) }}"
- state: present
- region: ap-southeast-2
- register: cert_create
-
-- name: print ARN of cert we just created
- debug:
- var: cert_create.certificate.arn
-
-- name: delete the cert we just created
- aws_acm:
- name_tag: my_cert
- state: absent
- region: ap-southeast-2
-
-- name: delete a certificate with a particular ARN
- aws_acm:
- certificate_arn: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
- state: absent
- region: ap-southeast-2
-
-- name: delete all certificates with a particular domain name
- aws_acm:
- domain_name: acm.ansible.com
- state: absent
- region: ap-southeast-2
-
-'''
-
-RETURN = '''
-certificate:
- description: Information about the certificate which was uploaded
- type: complex
- returned: when I(state=present)
- contains:
- arn:
- description: The ARN of the certificate in ACM
- type: str
- returned: when I(state=present)
- sample: "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
- domain_name:
- description: The domain name encoded within the public certificate
- type: str
- returned: when I(state=present)
- sample: acm.ansible.com
-arns:
- description: A list of the ARNs of the certificates in ACM which were deleted
- type: list
- elements: str
- returned: when I(state=absent)
- sample:
- - "arn:aws:acm:ap-southeast-2:123456789012:certificate/01234567-abcd-abcd-abcd-012345678901"
-'''
-
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.acm import ACMServiceManager
-from ansible.module_utils._text import to_text
-import base64
-import re # regex library
-
-
-# Takes in two text arguments
-# Each a PEM encoded certificate
-# Or a chain of PEM encoded certificates
-# May include some lines between each chain in the cert, e.g. "Subject: ..."
-# Returns True iff the chains/certs are functionally identical (including chain order)
-def chain_compare(module, a, b):
-
- chain_a_pem = pem_chain_split(module, a)
- chain_b_pem = pem_chain_split(module, b)
-
- if len(chain_a_pem) != len(chain_b_pem):
- return False
-
- # Chain length is the same
- for (ca, cb) in zip(chain_a_pem, chain_b_pem):
- der_a = PEM_body_to_DER(module, ca)
- der_b = PEM_body_to_DER(module, cb)
- if der_a != der_b:
- return False
-
- return True
-
-
-# Takes in PEM encoded data with no headers
-# returns equivilent DER as byte array
-def PEM_body_to_DER(module, pem):
- try:
- der = base64.b64decode(to_text(pem))
- except (ValueError, TypeError) as e:
- module.fail_json_aws(e, msg="Unable to decode certificate chain")
- return der
-
-
-# Store this globally to avoid repeated recompilation
-pem_chain_split_regex = re.compile(r"------?BEGIN [A-Z0-9. ]*CERTIFICATE------?([a-zA-Z0-9\+\/=\s]+)------?END [A-Z0-9. ]*CERTIFICATE------?")
-
-
-# Use regex to split up a chain or single cert into an array of base64 encoded data
-# Using "-----BEGIN CERTIFICATE-----" and "----END CERTIFICATE----"
-# Noting that some chains have non-pem data in between each cert
-# This function returns only what's between the headers, excluding the headers
-def pem_chain_split(module, pem):
-
- pem_arr = re.findall(pem_chain_split_regex, to_text(pem))
-
- if len(pem_arr) == 0:
- # This happens if the regex doesn't match at all
- module.fail_json(msg="Unable to split certificate chain. Possibly zero-length chain?")
-
- return pem_arr
-
-
-def main():
- argument_spec = dict(
- certificate=dict(),
- certificate_arn=dict(aliases=['arn']),
- certificate_chain=dict(),
- domain_name=dict(aliases=['domain']),
- name_tag=dict(aliases=['name']),
- private_key=dict(no_log=True),
- state=dict(default='present', choices=['present', 'absent'])
- )
- required_if = [
- ['state', 'present', ['certificate', 'name_tag', 'private_key']],
- ]
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
- acm = ACMServiceManager(module)
-
- # Check argument requirements
- if module.params['state'] == 'present':
- if module.params['certificate_arn']:
- module.fail_json(msg="Parameter 'certificate_arn' is only valid if parameter 'state' is specified as 'absent'")
- else: # absent
- # exactly one of these should be specified
- absent_args = ['certificate_arn', 'domain_name', 'name_tag']
- if sum([(module.params[a] is not None) for a in absent_args]) != 1:
- for a in absent_args:
- module.debug("%s is %s" % (a, module.params[a]))
- module.fail_json(msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', certificate_arn' or 'domain_name' must be specified")
-
- if module.params['name_tag']:
- tags = dict(Name=module.params['name_tag'])
- else:
- tags = None
-
- client = module.client('acm')
-
- # fetch the list of certificates currently in ACM
- certificates = acm.get_certificates(client=client,
- module=module,
- domain_name=module.params['domain_name'],
- arn=module.params['certificate_arn'],
- only_tags=tags)
-
- module.debug("Found %d corresponding certificates in ACM" % len(certificates))
-
- if module.params['state'] == 'present':
- if len(certificates) > 1:
- msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params['name_tag']
- module.fail_json(msg=msg, certificates=certificates)
- elif len(certificates) == 1:
- # update the existing certificate
- module.debug("Existing certificate found in ACM")
- old_cert = certificates[0] # existing cert in ACM
- if ('tags' not in old_cert) or ('Name' not in old_cert['tags']) or (old_cert['tags']['Name'] != module.params['name_tag']):
- # shouldn't happen
- module.fail_json(msg="Internal error, unsure which certificate to update", certificate=old_cert)
-
- if 'certificate' not in old_cert:
- # shouldn't happen
- module.fail_json(msg="Internal error, unsure what the existing cert in ACM is", certificate=old_cert)
-
- # Are the existing certificate in ACM and the local certificate the same?
- same = True
- same &= chain_compare(module, old_cert['certificate'], module.params['certificate'])
- if module.params['certificate_chain']:
- # Need to test this
- # not sure if Amazon appends the cert itself to the chain when self-signed
- same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate_chain'])
- else:
- # When there is no chain with a cert
- # it seems Amazon returns the cert itself as the chain
- same &= chain_compare(module, old_cert['certificate_chain'], module.params['certificate'])
-
- if same:
- module.debug("Existing certificate in ACM is the same, doing nothing")
- domain = acm.get_domain_of_cert(client=client, module=module, arn=old_cert['certificate_arn'])
- module.exit_json(certificate=dict(domain_name=domain, arn=old_cert['certificate_arn']), changed=False)
- else:
- module.debug("Existing certificate in ACM is different, overwriting")
-
- # update cert in ACM
- arn = acm.import_certificate(client, module,
- certificate=module.params['certificate'],
- private_key=module.params['private_key'],
- certificate_chain=module.params['certificate_chain'],
- arn=old_cert['certificate_arn'],
- tags=tags)
- domain = acm.get_domain_of_cert(client=client, module=module, arn=arn)
- module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True)
- else: # len(certificates) == 0
- module.debug("No certificate in ACM. Creating new one.")
- arn = acm.import_certificate(client=client,
- module=module,
- certificate=module.params['certificate'],
- private_key=module.params['private_key'],
- certificate_chain=module.params['certificate_chain'],
- tags=tags)
- domain = acm.get_domain_of_cert(client=client, module=module, arn=arn)
-
- module.exit_json(certificate=dict(domain_name=domain, arn=arn), changed=True)
-
- else: # state == absent
- for cert in certificates:
- acm.delete_certificate(client, module, cert['certificate_arn'])
- module.exit_json(arns=[cert['certificate_arn'] for cert in certificates],
- changed=(len(certificates) > 0))
-
-
-if __name__ == '__main__':
- # tests()
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_acm_info.py b/lib/ansible/modules/cloud/amazon/aws_acm_info.py
deleted file mode 100644
index 0687e13d38..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_acm_info.py
+++ /dev/null
@@ -1,299 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-module: aws_acm_info
-short_description: Retrieve certificate information from AWS Certificate Manager service
-description:
- - Retrieve information for ACM certificates
- - This module was called C(aws_acm_facts) before Ansible 2.9. The usage did not change.
- - Note that this will not return information about uploaded keys of size 4096 bits, due to a limitation of the ACM API.
-version_added: "2.5"
-options:
- certificate_arn:
- description:
- - If provided, the results will be filtered to show only the certificate with this ARN.
- - If no certificate with this ARN exists, this task will fail.
- - If a certificate with this ARN exists in a different region, this task will fail
- aliases:
- - arn
- version_added: '2.10'
- type: str
- domain_name:
- description:
- - The domain name of an ACM certificate to limit the search to
- aliases:
- - name
- type: str
- statuses:
- description:
- - Status to filter the certificate results
- choices: ['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']
- type: list
- elements: str
- tags:
- description:
- - Filter results to show only certificates with tags that match all the tags specified here.
- type: dict
- version_added: '2.10'
-requirements:
- - boto3
-author:
- - Will Thames (@willthames)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: obtain all ACM certificates
- aws_acm_info:
-
-- name: obtain all information for a single ACM certificate
- aws_acm_info:
- domain_name: "*.example_com"
-
-- name: obtain all certificates pending validation
- aws_acm_info:
- statuses:
- - PENDING_VALIDATION
-
-- name: obtain all certificates with tag Name=foo and myTag=bar
- aws_acm_info:
- tags:
- Name: foo
- myTag: bar
-
-
-# The output is still a list of certificates, just one item long.
-- name: obtain information about a certificate with a particular ARN
- aws_acm_info:
- certificate_arn: "arn:aws:acm:ap-southeast-2:123456789876:certificate/abcdeabc-abcd-1234-4321-abcdeabcde12"
-
-'''
-
-RETURN = '''
-certificates:
- description: A list of certificates
- returned: always
- type: complex
- contains:
- certificate:
- description: The ACM Certificate body
- returned: when certificate creation is complete
- sample: '-----BEGIN CERTIFICATE-----\\nMII.....-----END CERTIFICATE-----\\n'
- type: str
- certificate_arn:
- description: Certificate ARN
- returned: always
- sample: arn:aws:acm:ap-southeast-2:123456789012:certificate/abcd1234-abcd-1234-abcd-123456789abc
- type: str
- certificate_chain:
- description: Full certificate chain for the certificate
- returned: when certificate creation is complete
- sample: '-----BEGIN CERTIFICATE-----\\nMII...\\n-----END CERTIFICATE-----\\n-----BEGIN CERTIFICATE-----\\n...'
- type: str
- created_at:
- description: Date certificate was created
- returned: always
- sample: '2017-08-15T10:31:19+10:00'
- type: str
- domain_name:
- description: Domain name for the certificate
- returned: always
- sample: '*.example.com'
- type: str
- domain_validation_options:
- description: Options used by ACM to validate the certificate
- returned: when certificate type is AMAZON_ISSUED
- type: complex
- contains:
- domain_name:
- description: Fully qualified domain name of the certificate
- returned: always
- sample: example.com
- type: str
- validation_domain:
- description: The domain name ACM used to send validation emails
- returned: always
- sample: example.com
- type: str
- validation_emails:
- description: A list of email addresses that ACM used to send domain validation emails
- returned: always
- sample:
- - admin@example.com
- - postmaster@example.com
- type: list
- elements: str
- validation_status:
- description: Validation status of the domain
- returned: always
- sample: SUCCESS
- type: str
- failure_reason:
- description: Reason certificate request failed
- returned: only when certificate issuing failed
- type: str
- sample: NO_AVAILABLE_CONTACTS
- in_use_by:
- description: A list of ARNs for the AWS resources that are using the certificate.
- returned: always
- sample: []
- type: list
- elements: str
- issued_at:
- description: Date certificate was issued
- returned: always
- sample: '2017-01-01T00:00:00+10:00'
- type: str
- issuer:
- description: Issuer of the certificate
- returned: always
- sample: Amazon
- type: str
- key_algorithm:
- description: Algorithm used to generate the certificate
- returned: always
- sample: RSA-2048
- type: str
- not_after:
- description: Date after which the certificate is not valid
- returned: always
- sample: '2019-01-01T00:00:00+10:00'
- type: str
- not_before:
- description: Date before which the certificate is not valid
- returned: always
- sample: '2017-01-01T00:00:00+10:00'
- type: str
- renewal_summary:
- description: Information about managed renewal process
- returned: when certificate is issued by Amazon and a renewal has been started
- type: complex
- contains:
- domain_validation_options:
- description: Options used by ACM to validate the certificate
- returned: when certificate type is AMAZON_ISSUED
- type: complex
- contains:
- domain_name:
- description: Fully qualified domain name of the certificate
- returned: always
- sample: example.com
- type: str
- validation_domain:
- description: The domain name ACM used to send validation emails
- returned: always
- sample: example.com
- type: str
- validation_emails:
- description: A list of email addresses that ACM used to send domain validation emails
- returned: always
- sample:
- - admin@example.com
- - postmaster@example.com
- type: list
- elements: str
- validation_status:
- description: Validation status of the domain
- returned: always
- sample: SUCCESS
- type: str
- renewal_status:
- description: Status of the domain renewal
- returned: always
- sample: PENDING_AUTO_RENEWAL
- type: str
- revocation_reason:
- description: Reason for certificate revocation
- returned: when the certificate has been revoked
- sample: SUPERCEDED
- type: str
- revoked_at:
- description: Date certificate was revoked
- returned: when the certificate has been revoked
- sample: '2017-09-01T10:00:00+10:00'
- type: str
- serial:
- description: The serial number of the certificate
- returned: always
- sample: 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
- type: str
- signature_algorithm:
- description: Algorithm used to sign the certificate
- returned: always
- sample: SHA256WITHRSA
- type: str
- status:
- description: Status of the certificate in ACM
- returned: always
- sample: ISSUED
- type: str
- subject:
- description: The name of the entity that is associated with the public key contained in the certificate
- returned: always
- sample: CN=*.example.com
- type: str
- subject_alternative_names:
- description: Subject Alternative Names for the certificate
- returned: always
- sample:
- - '*.example.com'
- type: list
- elements: str
- tags:
- description: Tags associated with the certificate
- returned: always
- type: dict
- sample:
- Application: helloworld
- Environment: test
- type:
- description: The source of the certificate
- returned: always
- sample: AMAZON_ISSUED
- type: str
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.acm import ACMServiceManager
-
-
-def main():
- argument_spec = dict(
- certificate_arn=dict(aliases=['arn']),
- domain_name=dict(aliases=['name']),
- statuses=dict(type='list', choices=['PENDING_VALIDATION', 'ISSUED', 'INACTIVE', 'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED', 'FAILED']),
- tags=dict(type='dict'),
- )
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- acm_info = ACMServiceManager(module)
-
- if module._name == 'aws_acm_facts':
- module.deprecate("The 'aws_acm_facts' module has been renamed to 'aws_acm_info'", version='2.13')
-
- client = module.client('acm')
-
- certificates = acm_info.get_certificates(client, module,
- domain_name=module.params['domain_name'],
- statuses=module.params['statuses'],
- arn=module.params['certificate_arn'],
- only_tags=module.params['tags'])
-
- if module.params['certificate_arn'] and len(certificates) != 1:
- module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params['certificate_arn'])
-
- module.exit_json(certificates=certificates)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_api_gateway.py b/lib/ansible/modules/cloud/amazon/aws_api_gateway.py
deleted file mode 100644
index 769e5b45c9..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_api_gateway.py
+++ /dev/null
@@ -1,375 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: aws_api_gateway
-short_description: Manage AWS API Gateway APIs
-description:
- - Allows for the management of API Gateway APIs
- - Normally you should give the api_id since there is no other
- stable guaranteed unique identifier for the API. If you do
- not give api_id then a new API will be create each time
- this is run.
- - Beware that there are very hard limits on the rate that
- you can call API Gateway's REST API. You may need to patch
- your boto. See U(https://github.com/boto/boto3/issues/876)
- and discuss with your AWS rep.
- - swagger_file and swagger_text are passed directly on to AWS
- transparently whilst swagger_dict is an ansible dict which is
- converted to JSON before the API definitions are uploaded.
-version_added: '2.4'
-requirements: [ boto3 ]
-options:
- api_id:
- description:
- - The ID of the API you want to manage.
- type: str
- state:
- description: Create or delete API Gateway.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- swagger_file:
- description:
- - JSON or YAML file containing swagger definitions for API.
- Exactly one of swagger_file, swagger_text or swagger_dict must
- be present.
- type: path
- aliases: ['src', 'api_file']
- swagger_text:
- description:
- - Swagger definitions for API in JSON or YAML as a string direct
- from playbook.
- type: str
- swagger_dict:
- description:
- - Swagger definitions API ansible dictionary which will be
- converted to JSON and uploaded.
- type: json
- stage:
- description:
- - The name of the stage the API should be deployed to.
- type: str
- deploy_desc:
- description:
- - Description of the deployment - recorded and visible in the
- AWS console.
- default: Automatic deployment by Ansible.
- type: str
- cache_enabled:
- description:
- - Enable API GW caching of backend responses. Defaults to false.
- type: bool
- default: false
- version_added: '2.10'
- cache_size:
- description:
- - Size in GB of the API GW cache, becomes effective when cache_enabled is true.
- choices: ['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']
- type: str
- default: '0.5'
- version_added: '2.10'
- stage_variables:
- description:
- - ENV variables for the stage. Define a dict of key values pairs for variables.
- type: dict
- version_added: '2.10'
- stage_canary_settings:
- description:
- - Canary settings for the deployment of the stage.
- - 'Dict with following settings:'
- - 'percentTraffic: The percent (0-100) of traffic diverted to a canary deployment.'
- - 'deploymentId: The ID of the canary deployment.'
- - 'stageVariableOverrides: Stage variables overridden for a canary release deployment.'
- - 'useStageCache: A Boolean flag to indicate whether the canary deployment uses the stage cache or not.'
- - See docs U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/apigateway.html#APIGateway.Client.create_stage)
- type: dict
- version_added: '2.10'
- tracing_enabled:
- description:
- - Specifies whether active tracing with X-ray is enabled for the API GW stage.
- type: bool
- version_added: '2.10'
- endpoint_type:
- description:
- - Type of endpoint configuration, use C(EDGE) for an edge optimized API endpoint,
- - C(REGIONAL) for just a regional deploy or PRIVATE for a private API.
- - This will flag will only be used when creating a new API Gateway setup, not for updates.
- choices: ['EDGE', 'REGIONAL', 'PRIVATE']
- type: str
- default: EDGE
- version_added: '2.10'
-author:
- - 'Michael De La Rue (@mikedlr)'
-extends_documentation_fragment:
- - aws
- - ec2
-notes:
- - A future version of this module will probably use tags or another
- ID so that an API can be create only once.
- - As an early work around an intermediate version will probably do
- the same using a tag embedded in the API name.
-
-'''
-
-EXAMPLES = '''
-- name: Setup AWS API Gateway setup on AWS and deploy API definition
- aws_api_gateway:
- swagger_file: my_api.yml
- stage: production
- cache_enabled: true
- cache_size: '1.6'
- tracing_enabled: true
- endpoint_type: EDGE
- state: present
-
-- name: Update API definition to deploy new version
- aws_api_gateway:
- api_id: 'abc123321cba'
- swagger_file: my_api.yml
- deploy_desc: Make auth fix available.
- cache_enabled: true
- cache_size: '1.6'
- endpoint_type: EDGE
- state: present
-
-- name: Update API definitions and settings and deploy as canary
- aws_api_gateway:
- api_id: 'abc123321cba'
- swagger_file: my_api.yml
- cache_enabled: true
- cache_size: '6.1'
- canary_settings: { percentTraffic: 50.0, deploymentId: '123', useStageCache: True }
- state: present
-'''
-
-RETURN = '''
-api_id:
- description: API id of the API endpoint created
- returned: success
- type: str
- sample: '0ln4zq7p86'
-configure_response:
- description: AWS response from the API configure call
- returned: success
- type: dict
- sample: { api_key_source: "HEADER", created_at: "2020-01-01T11:37:59+00:00", id: "0ln4zq7p86" }
-deploy_response:
- description: AWS response from the API deploy call
- returned: success
- type: dict
- sample: { created_date: "2020-01-01T11:36:59+00:00", id: "rptv4b", description: "Automatic deployment by Ansible." }
-resource_actions:
- description: Actions performed against AWS API
- returned: always
- type: list
- sample: ["apigateway:CreateRestApi", "apigateway:CreateDeployment", "apigateway:PutRestApi"]
-'''
-
-import json
-
-try:
- import botocore
-except ImportError:
- # HAS_BOTOCORE taken care of in AnsibleAWSModule
- pass
-
-import traceback
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict)
-
-
-def main():
- argument_spec = dict(
- api_id=dict(type='str', required=False),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- swagger_file=dict(type='path', default=None, aliases=['src', 'api_file']),
- swagger_dict=dict(type='json', default=None),
- swagger_text=dict(type='str', default=None),
- stage=dict(type='str', default=None),
- deploy_desc=dict(type='str', default="Automatic deployment by Ansible."),
- cache_enabled=dict(type='bool', default=False),
- cache_size=dict(type='str', default='0.5', choices=['0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118', '237']),
- stage_variables=dict(type='dict', default={}),
- stage_canary_settings=dict(type='dict', default={}),
- tracing_enabled=dict(type='bool', default=False),
- endpoint_type=dict(type='str', default='EDGE', choices=['EDGE', 'REGIONAL', 'PRIVATE'])
- )
-
- mutually_exclusive = [['swagger_file', 'swagger_dict', 'swagger_text']] # noqa: F841
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=False,
- mutually_exclusive=mutually_exclusive,
- )
-
- api_id = module.params.get('api_id')
- state = module.params.get('state') # noqa: F841
- swagger_file = module.params.get('swagger_file')
- swagger_dict = module.params.get('swagger_dict')
- swagger_text = module.params.get('swagger_text')
- endpoint_type = module.params.get('endpoint_type')
-
- client = module.client('apigateway')
-
- changed = True # for now it will stay that way until we can sometimes avoid change
- conf_res = None
- dep_res = None
- del_res = None
-
- if state == "present":
- if api_id is None:
- api_id = create_empty_api(module, client, endpoint_type)
- api_data = get_api_definitions(module, swagger_file=swagger_file,
- swagger_dict=swagger_dict, swagger_text=swagger_text)
- conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id, api_data)
- if state == "absent":
- del_res = delete_rest_api(module, client, api_id)
-
- exit_args = {"changed": changed, "api_id": api_id}
-
- if conf_res is not None:
- exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res)
- if dep_res is not None:
- exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res)
- if del_res is not None:
- exit_args['delete_response'] = camel_dict_to_snake_dict(del_res)
-
- module.exit_json(**exit_args)
-
-
-def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_text=None):
- apidata = None
- if swagger_file is not None:
- try:
- with open(swagger_file) as f:
- apidata = f.read()
- except OSError as e:
- msg = "Failed trying to read swagger file {0}: {1}".format(str(swagger_file), str(e))
- module.fail_json(msg=msg, exception=traceback.format_exc())
- if swagger_dict is not None:
- apidata = json.dumps(swagger_dict)
- if swagger_text is not None:
- apidata = swagger_text
-
- if apidata is None:
- module.fail_json(msg='module error - no swagger info provided')
- return apidata
-
-
-def create_empty_api(module, client, endpoint_type):
- """
- creates a new empty API ready to be configured. The description is
- temporarily set to show the API as incomplete but should be
- updated when the API is configured.
- """
- desc = "Incomplete API creation by ansible aws_api_gateway module"
- try:
- awsret = create_api(client, name="ansible-temp-api", description=desc, endpoint_type=endpoint_type)
- except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
- module.fail_json_aws(e, msg="creating API")
- return awsret["id"]
-
-
-def delete_rest_api(module, client, api_id):
- """
- Deletes entire REST API setup
- """
- try:
- delete_response = delete_api(client, api_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
- module.fail_json_aws(e, msg="deleting API {0}".format(api_id))
- return delete_response
-
-
-def ensure_api_in_correct_state(module, client, api_id, api_data):
- """Make sure that we have the API configured and deployed as instructed.
-
- This function first configures the API correctly uploading the
- swagger definitions and then deploys those. Configuration and
- deployment should be closely tied because there is only one set of
- definitions so if we stop, they may be updated by someone else and
- then we deploy the wrong configuration.
- """
-
- configure_response = None
- try:
- configure_response = configure_api(client, api_id, api_data=api_data)
- except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
- module.fail_json_aws(e, msg="configuring API {0}".format(api_id))
-
- deploy_response = None
-
- stage = module.params.get('stage')
- if stage:
- try:
- deploy_response = create_deployment(client, api_id, **module.params)
- except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
- msg = "deploying api {0} to stage {1}".format(api_id, stage)
- module.fail_json_aws(e, msg)
-
- return configure_response, deploy_response
-
-
-retry_params = {"tries": 10, "delay": 5, "backoff": 1.2}
-
-
-@AWSRetry.backoff(**retry_params)
-def create_api(client, name=None, description=None, endpoint_type=None):
- return client.create_rest_api(name="ansible-temp-api", description=description, endpointConfiguration={'types': [endpoint_type]})
-
-
-@AWSRetry.backoff(**retry_params)
-def delete_api(client, api_id):
- return client.delete_rest_api(restApiId=api_id)
-
-
-@AWSRetry.backoff(**retry_params)
-def configure_api(client, api_id, api_data=None, mode="overwrite"):
- return client.put_rest_api(restApiId=api_id, mode=mode, body=api_data)
-
-
-@AWSRetry.backoff(**retry_params)
-def create_deployment(client, rest_api_id, **params):
- canary_settings = params.get('stage_canary_settings')
-
- if canary_settings and len(canary_settings) > 0:
- result = client.create_deployment(
- restApiId=rest_api_id,
- stageName=params.get('stage'),
- description=params.get('deploy_desc'),
- cacheClusterEnabled=params.get('cache_enabled'),
- cacheClusterSize=params.get('cache_size'),
- variables=params.get('stage_variables'),
- canarySettings=canary_settings,
- tracingEnabled=params.get('tracing_enabled')
- )
- else:
- result = client.create_deployment(
- restApiId=rest_api_id,
- stageName=params.get('stage'),
- description=params.get('deploy_desc'),
- cacheClusterEnabled=params.get('cache_enabled'),
- cacheClusterSize=params.get('cache_size'),
- variables=params.get('stage_variables'),
- tracingEnabled=params.get('tracing_enabled')
- )
-
- return result
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_application_scaling_policy.py b/lib/ansible/modules/cloud/amazon/aws_application_scaling_policy.py
deleted file mode 100644
index 6a3fca9a1e..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_application_scaling_policy.py
+++ /dev/null
@@ -1,543 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: aws_application_scaling_policy
-short_description: Manage Application Auto Scaling Scaling Policies
-notes:
- - for details of the parameters and returns see
- U(http://boto3.readthedocs.io/en/latest/reference/services/application-autoscaling.html#ApplicationAutoScaling.Client.put_scaling_policy)
-description:
- - Creates, updates or removes a Scaling Policy
-version_added: "2.5"
-author:
- - Gustavo Maia (@gurumaia)
- - Chen Leibovich (@chenl87)
-requirements: [ json, botocore, boto3 ]
-options:
- state:
- description: Whether a policy should be present or absent
- required: yes
- choices: ['absent', 'present']
- type: str
- policy_name:
- description: The name of the scaling policy.
- required: yes
- type: str
- service_namespace:
- description: The namespace of the AWS service.
- required: yes
- choices: ['ecs', 'elasticmapreduce', 'ec2', 'appstream', 'dynamodb']
- type: str
- resource_id:
- description: The identifier of the resource associated with the scalable target.
- required: yes
- type: str
- scalable_dimension:
- description: The scalable dimension associated with the scalable target.
- required: yes
- choices: [ 'ecs:service:DesiredCount',
- 'ec2:spot-fleet-request:TargetCapacity',
- 'elasticmapreduce:instancegroup:InstanceCount',
- 'appstream:fleet:DesiredCapacity',
- 'dynamodb:table:ReadCapacityUnits',
- 'dynamodb:table:WriteCapacityUnits',
- 'dynamodb:index:ReadCapacityUnits',
- 'dynamodb:index:WriteCapacityUnits']
- type: str
- policy_type:
- description: The policy type.
- required: yes
- choices: ['StepScaling', 'TargetTrackingScaling']
- type: str
- step_scaling_policy_configuration:
- description: A step scaling policy. This parameter is required if you are creating a policy and the policy type is StepScaling.
- required: no
- type: dict
- target_tracking_scaling_policy_configuration:
- description:
- - A target tracking policy. This parameter is required if you are creating a new policy and the policy type is TargetTrackingScaling.
- - 'Full documentation of the suboptions can be found in the API documentation:'
- - 'U(https://docs.aws.amazon.com/autoscaling/application/APIReference/API_TargetTrackingScalingPolicyConfiguration.html)'
- required: no
- type: dict
- suboptions:
- CustomizedMetricSpecification:
- description: The metric to use if using a customized metric.
- type: dict
- DisableScaleIn:
- description: Whether scaling-in should be disabled.
- type: bool
- PredefinedMetricSpecification:
- description: The metric to use if using a predefined metric.
- type: dict
- ScaleInCooldown:
- description: The time (in seconds) to wait after scaling-in before another scaling action can occur.
- type: int
- ScaleOutCooldown:
- description: The time (in seconds) to wait after scaling-out before another scaling action can occur.
- type: int
- TargetValue:
- description: The target value for the metric
- type: float
- minimum_tasks:
- description: The minimum value to scale to in response to a scale in event.
- This parameter is required if you are creating a first new policy for the specified service.
- required: no
- version_added: "2.6"
- type: int
- maximum_tasks:
- description: The maximum value to scale to in response to a scale out event.
- This parameter is required if you are creating a first new policy for the specified service.
- required: no
- version_added: "2.6"
- type: int
- override_task_capacity:
- description: Whether or not to override values of minimum and/or maximum tasks if it's already set.
- required: no
- default: no
- type: bool
- version_added: "2.6"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Create step scaling policy for ECS Service
-- name: scaling_policy
- aws_application_scaling_policy:
- state: present
- policy_name: test_policy
- service_namespace: ecs
- resource_id: service/poc-pricing/test-as
- scalable_dimension: ecs:service:DesiredCount
- policy_type: StepScaling
- minimum_tasks: 1
- maximum_tasks: 6
- step_scaling_policy_configuration:
- AdjustmentType: ChangeInCapacity
- StepAdjustments:
- - MetricIntervalUpperBound: 123
- ScalingAdjustment: 2
- - MetricIntervalLowerBound: 123
- ScalingAdjustment: -2
- Cooldown: 123
- MetricAggregationType: Average
-
-# Create target tracking scaling policy for ECS Service
-- name: scaling_policy
- aws_application_scaling_policy:
- state: present
- policy_name: test_policy
- service_namespace: ecs
- resource_id: service/poc-pricing/test-as
- scalable_dimension: ecs:service:DesiredCount
- policy_type: TargetTrackingScaling
- minimum_tasks: 1
- maximum_tasks: 6
- target_tracking_scaling_policy_configuration:
- TargetValue: 60
- PredefinedMetricSpecification:
- PredefinedMetricType: ECSServiceAverageCPUUtilization
- ScaleOutCooldown: 60
- ScaleInCooldown: 60
-
-# Remove scalable target for ECS Service
-- name: scaling_policy
- aws_application_scaling_policy:
- state: absent
- policy_name: test_policy
- policy_type: StepScaling
- service_namespace: ecs
- resource_id: service/cluster-name/service-name
- scalable_dimension: ecs:service:DesiredCount
-'''
-
-RETURN = '''
-alarms:
- description: List of the CloudWatch alarms associated with the scaling policy
- returned: when state present
- type: complex
- contains:
- alarm_arn:
- description: The Amazon Resource Name (ARN) of the alarm
- returned: when state present
- type: str
- alarm_name:
- description: The name of the alarm
- returned: when state present
- type: str
-service_namespace:
- description: The namespace of the AWS service.
- returned: when state present
- type: str
- sample: ecs
-resource_id:
- description: The identifier of the resource associated with the scalable target.
- returned: when state present
- type: str
- sample: service/cluster-name/service-name
-scalable_dimension:
- description: The scalable dimension associated with the scalable target.
- returned: when state present
- type: str
- sample: ecs:service:DesiredCount
-policy_arn:
- description: The Amazon Resource Name (ARN) of the scaling policy..
- returned: when state present
- type: str
-policy_name:
- description: The name of the scaling policy.
- returned: when state present
- type: str
-policy_type:
- description: The policy type.
- returned: when state present
- type: str
-min_capacity:
- description: The minimum value to scale to in response to a scale in event. Required if I(state) is C(present).
- returned: when state present
- type: int
- sample: 1
-max_capacity:
- description: The maximum value to scale to in response to a scale out event. Required if I(state) is C(present).
- returned: when state present
- type: int
- sample: 2
-role_arn:
- description: The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. Required if I(state) is C(present).
- returned: when state present
- type: str
- sample: arn:aws:iam::123456789123:role/roleName
-step_scaling_policy_configuration:
- description: The step scaling policy.
- returned: when state present and the policy type is StepScaling
- type: complex
- contains:
- adjustment_type:
- description: The adjustment type
- returned: when state present and the policy type is StepScaling
- type: str
- sample: "ChangeInCapacity, PercentChangeInCapacity, ExactCapacity"
- cooldown:
- description: The amount of time, in seconds, after a scaling activity completes
- where previous trigger-related scaling activities can influence future scaling events
- returned: when state present and the policy type is StepScaling
- type: int
- sample: 60
- metric_aggregation_type:
- description: The aggregation type for the CloudWatch metrics
- returned: when state present and the policy type is StepScaling
- type: str
- sample: "Average, Minimum, Maximum"
- step_adjustments:
- description: A set of adjustments that enable you to scale based on the size of the alarm breach
- returned: when state present and the policy type is StepScaling
- type: list
- elements: dict
-target_tracking_scaling_policy_configuration:
- description: The target tracking policy.
- returned: when state present and the policy type is TargetTrackingScaling
- type: complex
- contains:
- predefined_metric_specification:
- description: A predefined metric
- returned: when state present and the policy type is TargetTrackingScaling
- type: complex
- contains:
- predefined_metric_type:
- description: The metric type
- returned: when state present and the policy type is TargetTrackingScaling
- type: str
- sample: "ECSServiceAverageCPUUtilization, ECSServiceAverageMemoryUtilization"
- resource_label:
- description: Identifies the resource associated with the metric type
- returned: when metric type is ALBRequestCountPerTarget
- type: str
- scale_in_cooldown:
- description: The amount of time, in seconds, after a scale in activity completes before another scale in activity can start
- returned: when state present and the policy type is TargetTrackingScaling
- type: int
- sample: 60
- scale_out_cooldown:
- description: The amount of time, in seconds, after a scale out activity completes before another scale out activity can start
- returned: when state present and the policy type is TargetTrackingScaling
- type: int
- sample: 60
- target_value:
- description: The target value for the metric
- returned: when state present and the policy type is TargetTrackingScaling
- type: int
- sample: 70
-creation_time:
- description: The Unix timestamp for when the scalable target was created.
- returned: when state present
- type: str
- sample: '2017-09-28T08:22:51.881000-03:00'
-''' # NOQA
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import _camel_to_snake, camel_dict_to_snake_dict
-
-try:
- import botocore
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-
-# Merge the results of the scalable target creation and policy deletion/creation
-# There's no risk in overriding values since mutual keys have the same values in our case
-def merge_results(scalable_target_result, policy_result):
- if scalable_target_result['changed'] or policy_result['changed']:
- changed = True
- else:
- changed = False
-
- merged_response = scalable_target_result['response'].copy()
- merged_response.update(policy_result['response'])
-
- return {"changed": changed, "response": merged_response}
-
-
-def delete_scaling_policy(connection, module):
- changed = False
- try:
- scaling_policy = connection.describe_scaling_policies(
- ServiceNamespace=module.params.get('service_namespace'),
- ResourceId=module.params.get('resource_id'),
- ScalableDimension=module.params.get('scalable_dimension'),
- PolicyNames=[module.params.get('policy_name')],
- MaxResults=1
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to describe scaling policies")
-
- if scaling_policy['ScalingPolicies']:
- try:
- connection.delete_scaling_policy(
- ServiceNamespace=module.params.get('service_namespace'),
- ResourceId=module.params.get('resource_id'),
- ScalableDimension=module.params.get('scalable_dimension'),
- PolicyName=module.params.get('policy_name'),
- )
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to delete scaling policy")
-
- return {"changed": changed}
-
-
-def create_scalable_target(connection, module):
- changed = False
-
- try:
- scalable_targets = connection.describe_scalable_targets(
- ServiceNamespace=module.params.get('service_namespace'),
- ResourceIds=[
- module.params.get('resource_id'),
- ],
- ScalableDimension=module.params.get('scalable_dimension')
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to describe scalable targets")
-
- # Scalable target registration will occur if:
- # 1. There is no scalable target registered for this service
- # 2. A scalable target exists, different min/max values are defined and override is set to "yes"
- if (
- not scalable_targets['ScalableTargets']
- or (
- module.params.get('override_task_capacity')
- and (
- scalable_targets['ScalableTargets'][0]['MinCapacity'] != module.params.get('minimum_tasks')
- or scalable_targets['ScalableTargets'][0]['MaxCapacity'] != module.params.get('maximum_tasks')
- )
- )
- ):
- changed = True
- try:
- connection.register_scalable_target(
- ServiceNamespace=module.params.get('service_namespace'),
- ResourceId=module.params.get('resource_id'),
- ScalableDimension=module.params.get('scalable_dimension'),
- MinCapacity=module.params.get('minimum_tasks'),
- MaxCapacity=module.params.get('maximum_tasks')
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to register scalable target")
-
- try:
- response = connection.describe_scalable_targets(
- ServiceNamespace=module.params.get('service_namespace'),
- ResourceIds=[
- module.params.get('resource_id'),
- ],
- ScalableDimension=module.params.get('scalable_dimension')
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to describe scalable targets")
-
- if (response['ScalableTargets']):
- snaked_response = camel_dict_to_snake_dict(response['ScalableTargets'][0])
- else:
- snaked_response = {}
-
- return {"changed": changed, "response": snaked_response}
-
-
-def create_scaling_policy(connection, module):
- try:
- scaling_policy = connection.describe_scaling_policies(
- ServiceNamespace=module.params.get('service_namespace'),
- ResourceId=module.params.get('resource_id'),
- ScalableDimension=module.params.get('scalable_dimension'),
- PolicyNames=[module.params.get('policy_name')],
- MaxResults=1
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to describe scaling policies")
-
- changed = False
-
- if scaling_policy['ScalingPolicies']:
- scaling_policy = scaling_policy['ScalingPolicies'][0]
- # check if the input parameters are equal to what's already configured
- for attr in ('PolicyName',
- 'ServiceNamespace',
- 'ResourceId',
- 'ScalableDimension',
- 'PolicyType',
- 'StepScalingPolicyConfiguration',
- 'TargetTrackingScalingPolicyConfiguration'):
- if attr in scaling_policy and scaling_policy[attr] != module.params.get(_camel_to_snake(attr)):
- changed = True
- scaling_policy[attr] = module.params.get(_camel_to_snake(attr))
- else:
- changed = True
- scaling_policy = {
- 'PolicyName': module.params.get('policy_name'),
- 'ServiceNamespace': module.params.get('service_namespace'),
- 'ResourceId': module.params.get('resource_id'),
- 'ScalableDimension': module.params.get('scalable_dimension'),
- 'PolicyType': module.params.get('policy_type'),
- 'StepScalingPolicyConfiguration': module.params.get('step_scaling_policy_configuration'),
- 'TargetTrackingScalingPolicyConfiguration': module.params.get('target_tracking_scaling_policy_configuration')
- }
-
- if changed:
- try:
- if (module.params.get('step_scaling_policy_configuration')):
- connection.put_scaling_policy(
- PolicyName=scaling_policy['PolicyName'],
- ServiceNamespace=scaling_policy['ServiceNamespace'],
- ResourceId=scaling_policy['ResourceId'],
- ScalableDimension=scaling_policy['ScalableDimension'],
- PolicyType=scaling_policy['PolicyType'],
- StepScalingPolicyConfiguration=scaling_policy['StepScalingPolicyConfiguration']
- )
- elif (module.params.get('target_tracking_scaling_policy_configuration')):
- connection.put_scaling_policy(
- PolicyName=scaling_policy['PolicyName'],
- ServiceNamespace=scaling_policy['ServiceNamespace'],
- ResourceId=scaling_policy['ResourceId'],
- ScalableDimension=scaling_policy['ScalableDimension'],
- PolicyType=scaling_policy['PolicyType'],
- TargetTrackingScalingPolicyConfiguration=scaling_policy['TargetTrackingScalingPolicyConfiguration']
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to create scaling policy")
-
- try:
- response = connection.describe_scaling_policies(
- ServiceNamespace=module.params.get('service_namespace'),
- ResourceId=module.params.get('resource_id'),
- ScalableDimension=module.params.get('scalable_dimension'),
- PolicyNames=[module.params.get('policy_name')],
- MaxResults=1
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to describe scaling policies")
-
- if (response['ScalingPolicies']):
- snaked_response = camel_dict_to_snake_dict(response['ScalingPolicies'][0])
- else:
- snaked_response = {}
-
- return {"changed": changed, "response": snaked_response}
-
-
-def main():
- argument_spec = dict(
- state=dict(type='str', required=True, choices=['present', 'absent']),
- policy_name=dict(type='str', required=True),
- service_namespace=dict(type='str', required=True, choices=['appstream', 'dynamodb', 'ec2', 'ecs', 'elasticmapreduce']),
- resource_id=dict(type='str', required=True),
- scalable_dimension=dict(type='str',
- required=True,
- choices=['ecs:service:DesiredCount',
- 'ec2:spot-fleet-request:TargetCapacity',
- 'elasticmapreduce:instancegroup:InstanceCount',
- 'appstream:fleet:DesiredCapacity',
- 'dynamodb:table:ReadCapacityUnits',
- 'dynamodb:table:WriteCapacityUnits',
- 'dynamodb:index:ReadCapacityUnits',
- 'dynamodb:index:WriteCapacityUnits']),
- policy_type=dict(type='str', required=True, choices=['StepScaling', 'TargetTrackingScaling']),
- step_scaling_policy_configuration=dict(type='dict'),
- target_tracking_scaling_policy_configuration=dict(
- type='dict',
- options=dict(
- CustomizedMetricSpecification=dict(type='dict'),
- DisableScaleIn=dict(type='bool'),
- PredefinedMetricSpecification=dict(type='dict'),
- ScaleInCooldown=dict(type='int'),
- ScaleOutCooldown=dict(type='int'),
- TargetValue=dict(type='float'),
- )
- ),
- minimum_tasks=dict(type='int'),
- maximum_tasks=dict(type='int'),
- override_task_capacity=dict(type='bool'),
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
-
- connection = module.client('application-autoscaling')
-
- # Remove any target_tracking_scaling_policy_configuration suboptions that are None
- policy_config_options = [
- 'CustomizedMetricSpecification', 'DisableScaleIn', 'PredefinedMetricSpecification', 'ScaleInCooldown', 'ScaleOutCooldown', 'TargetValue'
- ]
- if isinstance(module.params['target_tracking_scaling_policy_configuration'], dict):
- for option in policy_config_options:
- if module.params['target_tracking_scaling_policy_configuration'][option] is None:
- module.params['target_tracking_scaling_policy_configuration'].pop(option)
-
- if module.params.get("state") == 'present':
- # A scalable target must be registered prior to creating a scaling policy
- scalable_target_result = create_scalable_target(connection, module)
- policy_result = create_scaling_policy(connection, module)
- # Merge the results of the scalable target creation and policy deletion/creation
- # There's no risk in overriding values since mutual keys have the same values in our case
- merged_result = merge_results(scalable_target_result, policy_result)
- module.exit_json(**merged_result)
- else:
- policy_result = delete_scaling_policy(connection, module)
- module.exit_json(**policy_result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_batch_compute_environment.py b/lib/ansible/modules/cloud/amazon/aws_batch_compute_environment.py
deleted file mode 100644
index aee7b5c058..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_batch_compute_environment.py
+++ /dev/null
@@ -1,490 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: aws_batch_compute_environment
-short_description: Manage AWS Batch Compute Environments
-description:
- - This module allows the management of AWS Batch Compute Environments.
- It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute
- environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions.
-
-version_added: "2.5"
-
-author: Jon Meran (@jonmer85)
-options:
- compute_environment_name:
- description:
- - The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, and underscores
- are allowed.
- required: true
- type: str
- type:
- description:
- - The type of the compute environment.
- required: true
- choices: ["MANAGED", "UNMANAGED"]
- type: str
- state:
- description:
- - Describes the desired state.
- default: "present"
- choices: ["present", "absent"]
- type: str
- compute_environment_state:
- description:
- - The state of the compute environment. If the state is ENABLED, then the compute environment accepts jobs
- from a queue and can scale out automatically based on queues.
- default: "ENABLED"
- choices: ["ENABLED", "DISABLED"]
- type: str
- service_role:
- description:
- - The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS
- services on your behalf.
- required: true
- type: str
- compute_resource_type:
- description:
- - The type of compute resource.
- required: true
- choices: ["EC2", "SPOT"]
- type: str
- minv_cpus:
- description:
- - The minimum number of EC2 vCPUs that an environment should maintain.
- required: true
- type: int
- maxv_cpus:
- description:
- - The maximum number of EC2 vCPUs that an environment can reach.
- required: true
- type: int
- desiredv_cpus:
- description:
- - The desired number of EC2 vCPUS in the compute environment.
- type: int
- instance_types:
- description:
- - The instance types that may be launched.
- required: true
- type: list
- elements: str
- image_id:
- description:
- - The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
- type: str
- subnets:
- description:
- - The VPC subnets into which the compute resources are launched.
- required: true
- type: list
- elements: str
- security_group_ids:
- description:
- - The EC2 security groups that are associated with instances launched in the compute environment.
- required: true
- type: list
- elements: str
- ec2_key_pair:
- description:
- - The EC2 key pair that is used for instances launched in the compute environment.
- type: str
- instance_role:
- description:
- - The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment.
- required: true
- type: str
- tags:
- description:
- - Key-value pair tags to be applied to resources that are launched in the compute environment.
- type: dict
- bid_percentage:
- description:
- - The minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that
- instance type before instances are launched. For example, if your bid percentage is 20%, then the Spot price
- must be below 20% of the current On-Demand price for that EC2 instance.
- type: int
- spot_iam_fleet_role:
- description:
- - The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment.
- type: str
-
-requirements:
- - boto3
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
----
-- hosts: localhost
- gather_facts: no
- vars:
- state: present
- tasks:
- - name: My Batch Compute Environment
- aws_batch_compute_environment:
- compute_environment_name: computeEnvironmentName
- state: present
- region: us-east-1
- compute_environment_state: ENABLED
- type: MANAGED
- compute_resource_type: EC2
- minv_cpus: 0
- maxv_cpus: 2
- desiredv_cpus: 1
- instance_types:
- - optimal
- subnets:
- - my-subnet1
- - my-subnet2
- security_group_ids:
- - my-sg1
- - my-sg2
- instance_role: arn:aws:iam::<account>:instance-profile/<role>
- tags:
- tag1: value1
- tag2: value2
- service_role: arn:aws:iam::<account>:role/service-role/<role>
- register: aws_batch_compute_environment_action
-
- - name: show results
- debug:
- var: aws_batch_compute_environment_action
-'''
-
-RETURN = '''
----
-output:
- description: "returns what action was taken, whether something was changed, invocation and response"
- returned: always
- sample:
- batch_compute_environment_action: none
- changed: false
- invocation:
- module_args:
- aws_access_key: ~
- aws_secret_key: ~
- bid_percentage: ~
- compute_environment_name: <name>
- compute_environment_state: ENABLED
- compute_resource_type: EC2
- desiredv_cpus: 0
- ec2_key_pair: ~
- ec2_url: ~
- image_id: ~
- instance_role: "arn:aws:iam::..."
- instance_types:
- - optimal
- maxv_cpus: 8
- minv_cpus: 0
- profile: ~
- region: us-east-1
- security_group_ids:
- - "*******"
- security_token: ~
- service_role: "arn:aws:iam::...."
- spot_iam_fleet_role: ~
- state: present
- subnets:
- - "******"
- tags:
- Environment: <name>
- Name: <name>
- type: MANAGED
- validate_certs: true
- response:
- computeEnvironmentArn: "arn:aws:batch:...."
- computeEnvironmentName: <name>
- computeResources:
- desiredvCpus: 0
- instanceRole: "arn:aws:iam::..."
- instanceTypes:
- - optimal
- maxvCpus: 8
- minvCpus: 0
- securityGroupIds:
- - "******"
- subnets:
- - "*******"
- tags:
- Environment: <name>
- Name: <name>
- type: EC2
- ecsClusterArn: "arn:aws:ecs:....."
- serviceRole: "arn:aws:iam::..."
- state: ENABLED
- status: VALID
- statusReason: "ComputeEnvironment Healthy"
- type: MANAGED
- type: dict
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
-import re
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-
-# ---------------------------------------------------------------------------------------------------
-#
-# Helper Functions & classes
-#
-# ---------------------------------------------------------------------------------------------------
-
-def set_api_params(module, module_params):
- """
- Sets module parameters to those expected by the boto3 API.
-
- :param module:
- :param module_params:
- :return:
- """
- api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
- return snake_dict_to_camel_dict(api_params)
-
-
-def validate_params(module):
- """
- Performs basic parameter validation.
-
- :param module:
- :return:
- """
-
- compute_environment_name = module.params['compute_environment_name']
-
- # validate compute environment name
- if not re.search(r'^[\w\_:]+$', compute_environment_name):
- module.fail_json(
- msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters "
- "and underscores.".format(compute_environment_name)
- )
- if not compute_environment_name.startswith('arn:aws:batch:'):
- if len(compute_environment_name) > 128:
- module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit'
- .format(compute_environment_name))
-
- return
-
-
-# ---------------------------------------------------------------------------------------------------
-#
-# Batch Compute Environment functions
-#
-# ---------------------------------------------------------------------------------------------------
-
-def get_current_compute_environment(module, client):
- try:
- environments = client.describe_compute_environments(
- computeEnvironments=[module.params['compute_environment_name']]
- )
- if len(environments['computeEnvironments']) > 0:
- return environments['computeEnvironments'][0]
- else:
- return None
- except ClientError:
- return None
-
-
-def create_compute_environment(module, client):
- """
- Adds a Batch compute environment
-
- :param module:
- :param client:
- :return:
- """
-
- changed = False
-
- # set API parameters
- params = (
- 'compute_environment_name', 'type', 'service_role')
- api_params = set_api_params(module, params)
-
- if module.params['compute_environment_state'] is not None:
- api_params['state'] = module.params['compute_environment_state']
-
- compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets',
- 'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage',
- 'spot_iam_fleet_role')
- compute_resources_params = set_api_params(module, compute_resources_param_list)
-
- if module.params['compute_resource_type'] is not None:
- compute_resources_params['type'] = module.params['compute_resource_type']
-
- # if module.params['minv_cpus'] is not None:
- # compute_resources_params['minvCpus'] = module.params['minv_cpus']
-
- api_params['computeResources'] = compute_resources_params
-
- try:
- if not module.check_mode:
- client.create_compute_environment(**api_params)
- changed = True
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg='Error creating compute environment')
-
- return changed
-
-
-def remove_compute_environment(module, client):
- """
- Remove a Batch compute environment
-
- :param module:
- :param client:
- :return:
- """
-
- changed = False
-
- # set API parameters
- api_params = {'computeEnvironment': module.params['compute_environment_name']}
-
- try:
- if not module.check_mode:
- client.delete_compute_environment(**api_params)
- changed = True
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg='Error removing compute environment')
- return changed
-
-
-def manage_state(module, client):
- changed = False
- current_state = 'absent'
- state = module.params['state']
- compute_environment_state = module.params['compute_environment_state']
- compute_environment_name = module.params['compute_environment_name']
- service_role = module.params['service_role']
- minv_cpus = module.params['minv_cpus']
- maxv_cpus = module.params['maxv_cpus']
- desiredv_cpus = module.params['desiredv_cpus']
- action_taken = 'none'
- update_env_response = ''
-
- check_mode = module.check_mode
-
- # check if the compute environment exists
- current_compute_environment = get_current_compute_environment(module, client)
- response = current_compute_environment
- if current_compute_environment:
- current_state = 'present'
-
- if state == 'present':
- if current_state == 'present':
- updates = False
- # Update Batch Compute Environment configuration
- compute_kwargs = {'computeEnvironment': compute_environment_name}
-
- # Update configuration if needed
- compute_resources = {}
- if compute_environment_state and current_compute_environment['state'] != compute_environment_state:
- compute_kwargs.update({'state': compute_environment_state})
- updates = True
- if service_role and current_compute_environment['serviceRole'] != service_role:
- compute_kwargs.update({'serviceRole': service_role})
- updates = True
- if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus:
- compute_resources['minvCpus'] = minv_cpus
- if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus:
- compute_resources['maxvCpus'] = maxv_cpus
- if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus:
- compute_resources['desiredvCpus'] = desiredv_cpus
- if len(compute_resources) > 0:
- compute_kwargs['computeResources'] = compute_resources
- updates = True
- if updates:
- try:
- if not check_mode:
- update_env_response = client.update_compute_environment(**compute_kwargs)
- if not update_env_response:
- module.fail_json(msg='Unable to get compute environment information after creating')
- changed = True
- action_taken = "updated"
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to update environment.")
-
- else:
- # Create Batch Compute Environment
- changed = create_compute_environment(module, client)
- # Describe compute environment
- action_taken = 'added'
- response = get_current_compute_environment(module, client)
- if not response:
- module.fail_json(msg='Unable to get compute environment information after creating')
- else:
- if current_state == 'present':
- # remove the compute environment
- changed = remove_compute_environment(module, client)
- action_taken = 'deleted'
- return dict(changed=changed, batch_compute_environment_action=action_taken, response=response)
-
-
-# ---------------------------------------------------------------------------------------------------
-#
-# MAIN
-#
-# ---------------------------------------------------------------------------------------------------
-
-def main():
- """
- Main entry point.
-
- :return dict: changed, batch_compute_environment_action, response
- """
-
- argument_spec = dict(
- state=dict(default='present', choices=['present', 'absent']),
- compute_environment_name=dict(required=True),
- type=dict(required=True, choices=['MANAGED', 'UNMANAGED']),
- compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
- service_role=dict(required=True),
- compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']),
- minv_cpus=dict(type='int', required=True),
- maxv_cpus=dict(type='int', required=True),
- desiredv_cpus=dict(type='int'),
- instance_types=dict(type='list', required=True),
- image_id=dict(),
- subnets=dict(type='list', required=True),
- security_group_ids=dict(type='list', required=True),
- ec2_key_pair=dict(),
- instance_role=dict(required=True),
- tags=dict(type='dict'),
- bid_percentage=dict(type='int'),
- spot_iam_fleet_role=dict(),
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- client = module.client('batch')
-
- validate_params(module)
-
- results = manage_state(module, client)
-
- module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags']))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_batch_job_definition.py b/lib/ansible/modules/cloud/amazon/aws_batch_job_definition.py
deleted file mode 100644
index 959677c42d..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_batch_job_definition.py
+++ /dev/null
@@ -1,459 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: aws_batch_job_definition
-short_description: Manage AWS Batch Job Definitions
-description:
- - This module allows the management of AWS Batch Job Definitions.
- It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute
- environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions.
-
-version_added: "2.5"
-
-author: Jon Meran (@jonmer85)
-options:
- job_definition_arn:
- description:
- - The ARN for the job definition.
- type: str
- job_definition_name:
- description:
- - The name for the job definition.
- required: true
- type: str
- state:
- description:
- - Describes the desired state.
- default: "present"
- choices: ["present", "absent"]
- type: str
- type:
- description:
- - The type of job definition.
- required: true
- type: str
- parameters:
- description:
- - Default parameter substitution placeholders to set in the job definition. Parameters are specified as a
- key-value pair mapping. Parameters in a SubmitJob request override any corresponding parameter defaults from
- the job definition.
- type: dict
- image:
- description:
- - The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker
- Hub registry are available by default. Other repositories are specified with `` repository-url /image <colon>tag ``.
- Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes,
- and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker
- Remote API and the IMAGE parameter of docker run.
- required: true
- type: str
- vcpus:
- description:
- - The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container
- section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to
- 1,024 CPU shares.
- required: true
- type: int
- memory:
- description:
- - The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory
- specified here, the container is killed. This parameter maps to Memory in the Create a container section of the
- Docker Remote API and the --memory option to docker run.
- required: true
- type: int
- command:
- description:
- - The command that is passed to the container. This parameter maps to Cmd in the Create a container section of
- the Docker Remote API and the COMMAND parameter to docker run. For more information,
- see U(https://docs.docker.com/engine/reference/builder/#cmd).
- type: list
- elements: str
- job_role_arn:
- description:
- - The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions.
- type: str
- volumes:
- description:
- - A list of data volumes used in a job.
- suboptions:
- host:
- description:
- - The contents of the host parameter determine whether your data volume persists on the host container
- instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host
- path for your data volume, but the data is not guaranteed to persist after the containers associated with
- it stop running.
- This is a dictionary with one property, sourcePath - The path on the host container
- instance that is presented to the container. If this parameter is empty,then the Docker daemon has assigned
- a host path for you. If the host parameter contains a sourcePath file location, then the data volume
- persists at the specified location on the host container instance until you delete it manually. If the
- sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the
- location does exist, the contents of the source path folder are exported.
- name:
- description:
- - The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are
- allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.
- type: list
- elements: dict
- environment:
- description:
- - The environment variables to pass to a container. This parameter maps to Env in the Create a container section
- of the Docker Remote API and the --env option to docker run.
- suboptions:
- name:
- description:
- - The name of the key value pair. For environment variables, this is the name of the environment variable.
- value:
- description:
- - The value of the key value pair. For environment variables, this is the value of the environment variable.
- type: list
- elements: dict
- mount_points:
- description:
- - The mount points for data volumes in your container. This parameter maps to Volumes in the Create a container
- section of the Docker Remote API and the --volume option to docker run.
- suboptions:
- containerPath:
- description:
- - The path on the container at which to mount the host volume.
- readOnly:
- description:
- - If this value is true , the container has read-only access to the volume; otherwise, the container can write
- to the volume. The default value is C(false).
- sourceVolume:
- description:
- - The name of the volume to mount.
- type: list
- elements: dict
- readonly_root_filesystem:
- description:
- - When this parameter is true, the container is given read-only access to its root file system. This parameter
- maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option
- to docker run.
- type: str
- privileged:
- description:
- - When this parameter is true, the container is given elevated privileges on the host container instance
- (similar to the root user). This parameter maps to Privileged in the Create a container section of the
- Docker Remote API and the --privileged option to docker run.
- type: str
- ulimits:
- description:
- - A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section
- of the Docker Remote API and the --ulimit option to docker run.
- suboptions:
- hardLimit:
- description:
- - The hard limit for the ulimit type.
- name:
- description:
- - The type of the ulimit.
- softLimit:
- description:
- - The soft limit for the ulimit type.
- type: list
- elements: dict
- user:
- description:
- - The user name to use inside the container. This parameter maps to User in the Create a container section of
- the Docker Remote API and the --user option to docker run.
- type: str
- attempts:
- description:
- - Retry strategy - The number of times to move a job to the RUNNABLE status. You may specify between 1 and 10
- attempts. If attempts is greater than one, the job is retried if it fails until it has moved to RUNNABLE that
- many times.
- type: int
-requirements:
- - boto3
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
----
-- hosts: localhost
- gather_facts: no
- vars:
- state: present
- tasks:
-- name: My Batch Job Definition
- aws_batch_job_definition:
- job_definition_name: My Batch Job Definition
- state: present
- type: container
- parameters:
- Param1: Val1
- Param2: Val2
- image: <Docker Image URL>
- vcpus: 1
- memory: 512
- command:
- - python
- - run_my_script.py
- - arg1
- job_role_arn: <Job Role ARN>
- attempts: 3
- register: job_definition_create_result
-
-- name: show results
- debug: var=job_definition_create_result
-'''
-
-RETURN = '''
----
-output:
- description: "returns what action was taken, whether something was changed, invocation and response"
- returned: always
- sample:
- aws_batch_job_definition_action: none
- changed: false
- response:
- job_definition_arn: "arn:aws:batch:...."
- job_definition_name: <name>
- status: INACTIVE
- type: container
- type: dict
-'''
-
-from ansible.module_utils.aws.batch import cc, set_api_params
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-
-# ---------------------------------------------------------------------------------------------------
-#
-# Helper Functions & classes
-#
-# ---------------------------------------------------------------------------------------------------
-
-# logger = logging.getLogger()
-# logging.basicConfig(filename='ansible_debug.log')
-# logger.setLevel(logging.DEBUG)
-
-
-def validate_params(module, batch_client):
- """
- Performs basic parameter validation.
-
- :param module:
- :param batch_client:
- :return:
- """
- return
-
-
-# ---------------------------------------------------------------------------------------------------
-#
-# Batch Job Definition functions
-#
-# ---------------------------------------------------------------------------------------------------
-
-def get_current_job_definition(module, batch_client):
- try:
- environments = batch_client.describe_job_definitions(
- jobDefinitionName=module.params['job_definition_name']
- )
- if len(environments['jobDefinitions']) > 0:
- latest_revision = max(map(lambda d: d['revision'], environments['jobDefinitions']))
- latest_definition = next((x for x in environments['jobDefinitions'] if x['revision'] == latest_revision),
- None)
- return latest_definition
- return None
- except ClientError:
- return None
-
-
-def create_job_definition(module, batch_client):
- """
- Adds a Batch job definition
-
- :param module:
- :param batch_client:
- :return:
- """
-
- changed = False
-
- # set API parameters
- api_params = set_api_params(module, get_base_params())
- container_properties_params = set_api_params(module, get_container_property_params())
- retry_strategy_params = set_api_params(module, get_retry_strategy_params())
-
- api_params['retryStrategy'] = retry_strategy_params
- api_params['containerProperties'] = container_properties_params
-
- try:
- if not module.check_mode:
- batch_client.register_job_definition(**api_params)
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Error registering job definition')
-
- return changed
-
-
-def get_retry_strategy_params():
- return 'attempts',
-
-
-def get_container_property_params():
- return ('image', 'vcpus', 'memory', 'command', 'job_role_arn', 'volumes', 'environment', 'mount_points',
- 'readonly_root_filesystem', 'privileged', 'ulimits', 'user')
-
-
-def get_base_params():
- return 'job_definition_name', 'type', 'parameters'
-
-
-def get_compute_environment_order_list(module):
- compute_environment_order_list = []
- for ceo in module.params['compute_environment_order']:
- compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment']))
- return compute_environment_order_list
-
-
-def remove_job_definition(module, batch_client):
- """
- Remove a Batch job definition
-
- :param module:
- :param batch_client:
- :return:
- """
-
- changed = False
-
- try:
- if not module.check_mode:
- batch_client.deregister_job_definition(jobDefinition=module.params['job_definition_arn'])
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Error removing job definition')
- return changed
-
-
-def job_definition_equal(module, current_definition):
- equal = True
-
- for param in get_base_params():
- if module.params.get(param) != current_definition.get(cc(param)):
- equal = False
- break
-
- for param in get_container_property_params():
- if module.params.get(param) != current_definition.get('containerProperties').get(cc(param)):
- equal = False
- break
-
- for param in get_retry_strategy_params():
- if module.params.get(param) != current_definition.get('retryStrategy').get(cc(param)):
- equal = False
- break
-
- return equal
-
-
-def manage_state(module, batch_client):
- changed = False
- current_state = 'absent'
- state = module.params['state']
- job_definition_name = module.params['job_definition_name']
- action_taken = 'none'
- response = None
-
- check_mode = module.check_mode
-
- # check if the job definition exists
- current_job_definition = get_current_job_definition(module, batch_client)
- if current_job_definition:
- current_state = 'present'
-
- if state == 'present':
- if current_state == 'present':
- # check if definition has changed and register a new version if necessary
- if not job_definition_equal(module, current_job_definition):
- create_job_definition(module, batch_client)
- action_taken = 'updated with new version'
- changed = True
- else:
- # Create Job definition
- changed = create_job_definition(module, batch_client)
- action_taken = 'added'
-
- response = get_current_job_definition(module, batch_client)
- if not response:
- module.fail_json(msg='Unable to get job definition information after creating/updating')
- else:
- if current_state == 'present':
- # remove the Job definition
- changed = remove_job_definition(module, batch_client)
- action_taken = 'deregistered'
- return dict(changed=changed, batch_job_definition_action=action_taken, response=response)
-
-
-# ---------------------------------------------------------------------------------------------------
-#
-# MAIN
-#
-# ---------------------------------------------------------------------------------------------------
-
-def main():
- """
- Main entry point.
-
- :return dict: ansible facts
- """
-
- argument_spec = dict(
- state=dict(required=False, default='present', choices=['present', 'absent']),
- job_definition_name=dict(required=True),
- job_definition_arn=dict(),
- type=dict(required=True),
- parameters=dict(type='dict'),
- image=dict(required=True),
- vcpus=dict(type='int', required=True),
- memory=dict(type='int', required=True),
- command=dict(type='list', default=[]),
- job_role_arn=dict(),
- volumes=dict(type='list', default=[]),
- environment=dict(type='list', default=[]),
- mount_points=dict(type='list', default=[]),
- readonly_root_filesystem=dict(),
- privileged=dict(),
- ulimits=dict(type='list', default=[]),
- user=dict(),
- attempts=dict(type='int')
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- batch_client = module.client('batch')
-
- validate_params(module, batch_client)
-
- results = manage_state(module, batch_client)
-
- module.exit_json(**camel_dict_to_snake_dict(results))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_batch_job_queue.py b/lib/ansible/modules/cloud/amazon/aws_batch_job_queue.py
deleted file mode 100644
index 9c61c69efe..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_batch_job_queue.py
+++ /dev/null
@@ -1,316 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: aws_batch_job_queue
-short_description: Manage AWS Batch Job Queues
-description:
- - This module allows the management of AWS Batch Job Queues.
- It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute
- environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions.
-
-version_added: "2.5"
-
-author: Jon Meran (@jonmer85)
-options:
- job_queue_name:
- description:
- - The name for the job queue
- required: true
- type: str
- state:
- description:
- - Describes the desired state.
- default: "present"
- choices: ["present", "absent"]
- type: str
- job_queue_state:
- description:
- - The state of the job queue. If the job queue state is ENABLED , it is able to accept jobs.
- default: "ENABLED"
- choices: ["ENABLED", "DISABLED"]
- type: str
- priority:
- description:
- - The priority of the job queue. Job queues with a higher priority (or a lower integer value for the priority
- parameter) are evaluated first when associated with same compute environment. Priority is determined in
- ascending order, for example, a job queue with a priority value of 1 is given scheduling preference over a job
- queue with a priority value of 10.
- required: true
- type: int
- compute_environment_order:
- description:
- - The set of compute environments mapped to a job queue and their order relative to each other. The job
- scheduler uses this parameter to determine which compute environment should execute a given job. Compute
- environments must be in the VALID state before you can associate them with a job queue. You can associate up to
- 3 compute environments with a job queue.
- required: true
- type: list
- elements: dict
- suboptions:
- order:
- type: int
- description: The relative priority of the environment.
- compute_environment:
- type: str
- description: The name of the compute environment.
-requirements:
- - boto3
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
----
-- hosts: localhost
- gather_facts: no
- vars:
- state: present
- tasks:
- - name: My Batch Job Queue
- aws_batch_job_queue:
- job_queue_name: jobQueueName
- state: present
- region: us-east-1
- job_queue_state: ENABLED
- priority: 1
- compute_environment_order:
- - order: 1
- compute_environment: my_compute_env1
- - order: 2
- compute_environment: my_compute_env2
- register: batch_job_queue_action
-
- - name: show results
- debug:
- var: batch_job_queue_action
-'''
-
-RETURN = '''
----
-output:
- description: "returns what action was taken, whether something was changed, invocation and response"
- returned: always
- sample:
- batch_job_queue_action: updated
- changed: false
- response:
- job_queue_arn: "arn:aws:batch:...."
- job_queue_name: <name>
- priority: 1
- state: DISABLED
- status: UPDATING
- status_reason: "JobQueue Healthy"
- type: dict
-'''
-
-from ansible.module_utils.aws.batch import set_api_params
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-# ---------------------------------------------------------------------------------------------------
-#
-# Helper Functions & classes
-#
-# ---------------------------------------------------------------------------------------------------
-
-
-def validate_params(module):
- """
- Performs basic parameter validation.
-
- :param module:
- """
- return
-
-
-# ---------------------------------------------------------------------------------------------------
-#
-# Batch Job Queue functions
-#
-# ---------------------------------------------------------------------------------------------------
-
-def get_current_job_queue(module, client):
- try:
- environments = client.describe_job_queues(
- jobQueues=[module.params['job_queue_name']]
- )
- return environments['jobQueues'][0] if len(environments['jobQueues']) > 0 else None
- except ClientError:
- return None
-
-
-def create_job_queue(module, client):
- """
- Adds a Batch job queue
-
- :param module:
- :param client:
- :return:
- """
-
- changed = False
-
- # set API parameters
- params = ('job_queue_name', 'priority')
- api_params = set_api_params(module, params)
-
- if module.params['job_queue_state'] is not None:
- api_params['state'] = module.params['job_queue_state']
-
- api_params['computeEnvironmentOrder'] = get_compute_environment_order_list(module)
-
- try:
- if not module.check_mode:
- client.create_job_queue(**api_params)
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Error creating compute environment')
-
- return changed
-
-
-def get_compute_environment_order_list(module):
- compute_environment_order_list = []
- for ceo in module.params['compute_environment_order']:
- compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment']))
- return compute_environment_order_list
-
-
-def remove_job_queue(module, client):
- """
- Remove a Batch job queue
-
- :param module:
- :param client:
- :return:
- """
-
- changed = False
-
- # set API parameters
- api_params = {'jobQueue': module.params['job_queue_name']}
-
- try:
- if not module.check_mode:
- client.delete_job_queue(**api_params)
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Error removing job queue')
- return changed
-
-
-def manage_state(module, client):
- changed = False
- current_state = 'absent'
- state = module.params['state']
- job_queue_state = module.params['job_queue_state']
- job_queue_name = module.params['job_queue_name']
- priority = module.params['priority']
- action_taken = 'none'
- response = None
-
- check_mode = module.check_mode
-
- # check if the job queue exists
- current_job_queue = get_current_job_queue(module, client)
- if current_job_queue:
- current_state = 'present'
-
- if state == 'present':
- if current_state == 'present':
- updates = False
- # Update Batch Job Queue configuration
- job_kwargs = {'jobQueue': job_queue_name}
-
- # Update configuration if needed
- if job_queue_state and current_job_queue['state'] != job_queue_state:
- job_kwargs.update({'state': job_queue_state})
- updates = True
- if priority is not None and current_job_queue['priority'] != priority:
- job_kwargs.update({'priority': priority})
- updates = True
-
- new_compute_environment_order_list = get_compute_environment_order_list(module)
- if new_compute_environment_order_list != current_job_queue['computeEnvironmentOrder']:
- job_kwargs['computeEnvironmentOrder'] = new_compute_environment_order_list
- updates = True
-
- if updates:
- try:
- if not check_mode:
- client.update_job_queue(**job_kwargs)
- changed = True
- action_taken = "updated"
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to update job queue")
-
- else:
- # Create Job Queue
- changed = create_job_queue(module, client)
- action_taken = 'added'
-
- # Describe job queue
- response = get_current_job_queue(module, client)
- if not response:
- module.fail_json(msg='Unable to get job queue information after creating/updating')
- else:
- if current_state == 'present':
- # remove the Job Queue
- changed = remove_job_queue(module, client)
- action_taken = 'deleted'
- return dict(changed=changed, batch_job_queue_action=action_taken, response=response)
-
-
-# ---------------------------------------------------------------------------------------------------
-#
-# MAIN
-#
-# ---------------------------------------------------------------------------------------------------
-
-def main():
- """
- Main entry point.
-
- :return dict: changed, batch_job_queue_action, response
- """
-
- argument_spec = dict(
- state=dict(required=False, default='present', choices=['present', 'absent']),
- job_queue_name=dict(required=True),
- job_queue_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
- priority=dict(type='int', required=True),
- compute_environment_order=dict(type='list', required=True),
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- client = module.client('batch')
-
- validate_params(module)
-
- results = manage_state(module, client)
-
- module.exit_json(**camel_dict_to_snake_dict(results))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_codebuild.py b/lib/ansible/modules/cloud/amazon/aws_codebuild.py
deleted file mode 100644
index 837e22e005..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_codebuild.py
+++ /dev/null
@@ -1,408 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: aws_codebuild
-short_description: Create or delete an AWS CodeBuild project
-notes:
- - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html).
-description:
- - Create or delete a CodeBuild projects on AWS, used for building code artifacts from source code.
-version_added: "2.9"
-author:
- - Stefan Horning (@stefanhorning) <horning@mediapeers.com>
-requirements: [ botocore, boto3 ]
-options:
- name:
- description:
- - Name of the CodeBuild project.
- required: true
- type: str
- description:
- description:
- - Descriptive text of the CodeBuild project.
- type: str
- source:
- description:
- - Configure service and location for the build input source.
- required: true
- suboptions:
- type:
- description:
- - "The type of the source. Allows one of these: C(CODECOMMIT), C(CODEPIPELINE), C(GITHUB), C(S3), C(BITBUCKET), C(GITHUB_ENTERPRISE)."
- required: true
- type: str
- location:
- description:
- - Information about the location of the source code to be built. For type CODEPIPELINE location should not be specified.
- type: str
- git_clone_depth:
- description:
- - When using git you can specify the clone depth as an integer here.
- type: int
- buildspec:
- description:
- - The build spec declaration to use for the builds in this build project. Leave empty if part of the code project.
- type: str
- insecure_ssl:
- description:
- - Enable this flag to ignore SSL warnings while connecting to the project source code.
- type: bool
- type: dict
- artifacts:
- description:
- - Information about the build output artifacts for the build project.
- required: true
- suboptions:
- type:
- description:
- - "The type of build output for artifacts. Can be one of the following: C(CODEPIPELINE), C(NO_ARTIFACTS), C(S3)."
- required: true
- location:
- description:
- - Information about the build output artifact location. When choosing type S3, set the bucket name here.
- path:
- description:
- - Along with namespace_type and name, the pattern that AWS CodeBuild will use to name and store the output artifacts.
- - Used for path in S3 bucket when type is C(S3).
- namespace_type:
- description:
- - Along with path and name, the pattern that AWS CodeBuild will use to determine the name and location to store the output artifacts.
- - Accepts C(BUILD_ID) and C(NONE).
- - "See docs here: U(http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project)."
- name:
- description:
- - Along with path and namespace_type, the pattern that AWS CodeBuild will use to name and store the output artifact.
- packaging:
- description:
- - The type of build output artifact to create on S3, can be NONE for creating a folder or ZIP for a ZIP file.
- type: dict
- cache:
- description:
- - Caching params to speed up following builds.
- suboptions:
- type:
- description:
- - Cache type. Can be C(NO_CACHE) or C(S3).
- required: true
- location:
- description:
- - Caching location on S3.
- required: true
- type: dict
- environment:
- description:
- - Information about the build environment for the build project.
- suboptions:
- type:
- description:
- - The type of build environment to use for the project. Usually C(LINUX_CONTAINER).
- required: true
- image:
- description:
- - The ID of the Docker image to use for this build project.
- required: true
- compute_type:
- description:
- - Information about the compute resources the build project will use.
- - "Available values include: C(BUILD_GENERAL1_SMALL), C(BUILD_GENERAL1_MEDIUM), C(BUILD_GENERAL1_LARGE)."
- required: true
- environment_variables:
- description:
- - A set of environment variables to make available to builds for the build project. List of dictionaries with name and value fields.
- - "Example: { name: 'MY_ENV_VARIABLE', value: 'test' }"
- privileged_mode:
- description:
- - Enables running the Docker daemon inside a Docker container. Set to true only if the build project is be used to build Docker images.
- type: dict
- service_role:
- description:
- - The ARN of the AWS IAM role that enables AWS CodeBuild to interact with dependent AWS services on behalf of the AWS account.
- type: str
- timeout_in_minutes:
- description:
- - How long CodeBuild should wait until timing out any build that has not been marked as completed.
- default: 60
- type: int
- encryption_key:
- description:
- - The AWS Key Management Service (AWS KMS) customer master key (CMK) to be used for encrypting the build output artifacts.
- type: str
- tags:
- description:
- - A set of tags for the build project.
- type: list
- elements: dict
- suboptions:
- key:
- description: The name of the Tag.
- type: str
- value:
- description: The value of the Tag.
- type: str
- vpc_config:
- description:
- - The VPC config enables AWS CodeBuild to access resources in an Amazon VPC.
- type: dict
- state:
- description:
- - Create or remove code build project.
- default: 'present'
- choices: ['present', 'absent']
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- aws_codebuild:
- name: my_project
- description: My nice little project
- service_role: "arn:aws:iam::123123:role/service-role/code-build-service-role"
- source:
- # Possible values: BITBUCKET, CODECOMMIT, CODEPIPELINE, GITHUB, S3
- type: CODEPIPELINE
- buildspec: ''
- artifacts:
- namespaceType: NONE
- packaging: NONE
- type: CODEPIPELINE
- name: my_project
- environment:
- computeType: BUILD_GENERAL1_SMALL
- privilegedMode: "true"
- image: "aws/codebuild/docker:17.09.0"
- type: LINUX_CONTAINER
- environmentVariables:
- - { name: 'PROFILE', value: 'staging' }
- encryption_key: "arn:aws:kms:us-east-1:123123:alias/aws/s3"
- region: us-east-1
- state: present
-'''
-
-RETURN = '''
-project:
- description: Returns the dictionary describing the code project configuration.
- returned: success
- type: complex
- contains:
- name:
- description: Name of the CodeBuild project
- returned: always
- type: str
- sample: my_project
- arn:
- description: ARN of the CodeBuild project
- returned: always
- type: str
- sample: arn:aws:codebuild:us-east-1:123123123:project/vod-api-app-builder
- description:
- description: A description of the build project
- returned: always
- type: str
- sample: My nice little project
- source:
- description: Information about the build input source code.
- returned: always
- type: complex
- contains:
- type:
- description: The type of the repository
- returned: always
- type: str
- sample: CODEPIPELINE
- location:
- description: Location identifier, depending on the source type.
- returned: when configured
- type: str
- git_clone_depth:
- description: The git clone depth
- returned: when configured
- type: int
- build_spec:
- description: The build spec declaration to use for the builds in this build project.
- returned: always
- type: str
- auth:
- description: Information about the authorization settings for AWS CodeBuild to access the source code to be built.
- returned: when configured
- type: complex
- insecure_ssl:
- description: True if set to ignore SSL warnings.
- returned: when configured
- type: bool
- artifacts:
- description: Information about the output of build artifacts
- returned: always
- type: complex
- contains:
- type:
- description: The type of build artifact.
- returned: always
- type: str
- sample: CODEPIPELINE
- location:
- description: Output location for build artifacts
- returned: when configured
- type: str
- # and more... see http://boto3.readthedocs.io/en/latest/reference/services/codebuild.html#CodeBuild.Client.create_project
- cache:
- description: Cache settings for the build project.
- returned: when configured
- type: dict
- environment:
- description: Environment settings for the build
- returned: always
- type: dict
- service_role:
- description: IAM role to be used during build to access other AWS services.
- returned: always
- type: str
- sample: arn:aws:iam::123123123:role/codebuild-service-role
- timeout_in_minutes:
- description: The timeout of a build in minutes
- returned: always
- type: int
- sample: 60
- tags:
- description: Tags added to the project
- returned: when configured
- type: list
- created:
- description: Timestamp of the create time of the project
- returned: always
- type: str
- sample: "2018-04-17T16:56:03.245000+02:00"
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule, get_boto3_client_method_parameters
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
-
-
-try:
- import botocore
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-
-def create_or_update_project(client, params, module):
- resp = {}
- name = params['name']
- # clean up params
- formatted_params = snake_dict_to_camel_dict(dict((k, v) for k, v in params.items() if v is not None))
- permitted_create_params = get_boto3_client_method_parameters(client, 'create_project')
- permitted_update_params = get_boto3_client_method_parameters(client, 'update_project')
-
- formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params)
- formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params)
-
- # Check if project with that name already exists and if so update existing:
- found = describe_project(client=client, name=name, module=module)
- changed = False
-
- if 'name' in found:
- found_project = found
- resp = update_project(client=client, params=formatted_update_params, module=module)
- updated_project = resp['project']
-
- # Prep both dicts for sensible change comparison:
- found_project.pop('lastModified')
- updated_project.pop('lastModified')
- if 'tags' not in updated_project:
- updated_project['tags'] = []
-
- if updated_project != found_project:
- changed = True
- return resp, changed
- # Or create new project:
- try:
- resp = client.create_project(**formatted_create_params)
- changed = True
- return resp, changed
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to create CodeBuild project")
-
-
-def update_project(client, params, module):
- name = params['name']
-
- try:
- resp = client.update_project(**params)
- return resp
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to update CodeBuild project")
-
-
-def delete_project(client, name, module):
- found = describe_project(client=client, name=name, module=module)
- changed = False
- if 'name' in found:
- # Mark as changed when a project with that name existed before calling delete
- changed = True
- try:
- resp = client.delete_project(name=name)
- return resp, changed
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to delete CodeBuild project")
-
-
-def describe_project(client, name, module):
- project = {}
- try:
- projects = client.batch_get_projects(names=[name])['projects']
- if len(projects) > 0:
- project = projects[0]
- return project
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to describe CodeBuild projects")
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- description=dict(),
- source=dict(required=True, type='dict'),
- artifacts=dict(required=True, type='dict'),
- cache=dict(type='dict'),
- environment=dict(type='dict'),
- service_role=dict(),
- timeout_in_minutes=dict(type='int', default=60),
- encryption_key=dict(),
- tags=dict(type='list'),
- vpc_config=dict(type='dict'),
- state=dict(choices=['present', 'absent'], default='present')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
- client_conn = module.client('codebuild')
-
- state = module.params.get('state')
- changed = False
-
- if state == 'present':
- project_result, changed = create_or_update_project(
- client=client_conn,
- params=module.params,
- module=module)
- elif state == 'absent':
- project_result, changed = delete_project(client=client_conn, name=module.params['name'], module=module)
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(project_result))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_codecommit.py b/lib/ansible/modules/cloud/amazon/aws_codecommit.py
deleted file mode 100644
index 51b752a38a..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_codecommit.py
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2018, Shuang Wang <ooocamel@icloud.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'status': ['preview'],
- 'supported_by': 'community',
- 'metadata_version': '1.1'}
-
-DOCUMENTATION = '''
----
-module: aws_codecommit
-version_added: "2.8"
-short_description: Manage repositories in AWS CodeCommit
-description:
- - Supports creation and deletion of CodeCommit repositories.
- - See U(https://aws.amazon.com/codecommit/) for more information about CodeCommit.
-author: Shuang Wang (@ptux)
-
-requirements:
- - botocore
- - boto3
- - python >= 2.6
-
-options:
- name:
- description:
- - name of repository.
- required: true
- type: str
- description:
- description:
- - description or comment of repository.
- required: false
- aliases:
- - comment
- type: str
- state:
- description:
- - Specifies the state of repository.
- required: true
- choices: [ 'present', 'absent' ]
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-RETURN = '''
-repository_metadata:
- description: "Information about the repository."
- returned: always
- type: complex
- contains:
- account_id:
- description: "The ID of the AWS account associated with the repository."
- returned: when state is present
- type: str
- sample: "268342293637"
- arn:
- description: "The Amazon Resource Name (ARN) of the repository."
- returned: when state is present
- type: str
- sample: "arn:aws:codecommit:ap-northeast-1:268342293637:username"
- clone_url_http:
- description: "The URL to use for cloning the repository over HTTPS."
- returned: when state is present
- type: str
- sample: "https://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
- clone_url_ssh:
- description: "The URL to use for cloning the repository over SSH."
- returned: when state is present
- type: str
- sample: "ssh://git-codecommit.ap-northeast-1.amazonaws.com/v1/repos/reponame"
- creation_date:
- description: "The date and time the repository was created, in timestamp format."
- returned: when state is present
- type: str
- sample: "2018-10-16T13:21:41.261000+09:00"
- last_modified_date:
- description: "The date and time the repository was last modified, in timestamp format."
- returned: when state is present
- type: str
- sample: "2018-10-16T13:21:41.261000+09:00"
- repository_description:
- description: "A comment or description about the repository."
- returned: when state is present
- type: str
- sample: "test from ptux"
- repository_id:
- description: "The ID of the repository that was created or deleted"
- returned: always
- type: str
- sample: "e62a5c54-i879-497b-b62f-9f99e4ebfk8e"
- repository_name:
- description: "The repository's name."
- returned: when state is present
- type: str
- sample: "reponame"
-
-response_metadata:
- description: "Information about the response."
- returned: always
- type: complex
- contains:
- http_headers:
- description: "http headers of http response"
- returned: always
- type: dict
- http_status_code:
- description: "http status code of http response"
- returned: always
- type: str
- sample: "200"
- request_id:
- description: "http request id"
- returned: always
- type: str
- sample: "fb49cfca-d0fa-11e8-85cb-b3cc4b5045ef"
- retry_attempts:
- description: "numbers of retry attempts"
- returned: always
- type: str
- sample: "0"
-'''
-
-EXAMPLES = '''
-# Create a new repository
-- aws_codecommit:
- name: repo
- state: present
-
-# Delete a repository
-- aws_codecommit:
- name: repo
- state: absent
-'''
-
-try:
- import botocore
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-
-class CodeCommit(object):
- def __init__(self, module=None):
- self._module = module
- self._client = self._module.client('codecommit')
- self._check_mode = self._module.check_mode
-
- def process(self):
- result = dict(changed=False)
-
- if self._module.params['state'] == 'present':
- if not self._repository_exists():
- if not self._check_mode:
- result = self._create_repository()
- result['changed'] = True
- else:
- metadata = self._get_repository()['repositoryMetadata']
- if metadata['repositoryDescription'] != self._module.params['description']:
- if not self._check_mode:
- self._update_repository()
- result['changed'] = True
- result.update(self._get_repository())
- if self._module.params['state'] == 'absent' and self._repository_exists():
- if not self._check_mode:
- result = self._delete_repository()
- result['changed'] = True
- return result
-
- def _repository_exists(self):
- try:
- paginator = self._client.get_paginator('list_repositories')
- for page in paginator.paginate():
- repositories = page['repositories']
- for item in repositories:
- if self._module.params['name'] in item.values():
- return True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="couldn't get repository")
- return False
-
- def _get_repository(self):
- try:
- result = self._client.get_repository(
- repositoryName=self._module.params['name']
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="couldn't get repository")
- return result
-
- def _update_repository(self):
- try:
- result = self._client.update_repository_description(
- repositoryName=self._module.params['name'],
- repositoryDescription=self._module.params['description']
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="couldn't create repository")
- return result
-
- def _create_repository(self):
- try:
- result = self._client.create_repository(
- repositoryName=self._module.params['name'],
- repositoryDescription=self._module.params['description']
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="couldn't create repository")
- return result
-
- def _delete_repository(self):
- try:
- result = self._client.delete_repository(
- repositoryName=self._module.params['name']
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="couldn't delete repository")
- return result
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- state=dict(choices=['present', 'absent'], required=True),
- description=dict(default='', aliases=['comment'])
- )
-
- ansible_aws_module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- aws_codecommit = CodeCommit(module=ansible_aws_module)
- result = aws_codecommit.process()
- ansible_aws_module.exit_json(**camel_dict_to_snake_dict(result))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_codepipeline.py b/lib/ansible/modules/cloud/amazon/aws_codepipeline.py
deleted file mode 100644
index 3d3d683445..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_codepipeline.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: aws_codepipeline
-short_description: Create or delete AWS CodePipelines
-notes:
- - for details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/codepipeline.html)
-description:
- - Create or delete a CodePipeline on AWS.
-version_added: "2.9"
-author:
- - Stefan Horning (@stefanhorning) <horning@mediapeers.com>
-requirements: [ botocore, boto3 ]
-options:
- name:
- description:
- - Name of the pipeline
- required: true
- type: str
- role_arn:
- description:
- - ARN of the IAM role to use when executing the pipeline
- required: true
- type: str
- artifact_store:
- description:
- - Location information where artifacts are stored (on S3). Dictionary with fields type and location.
- required: true
- suboptions:
- type:
- description:
- - Type of the artifacts storage (only 'S3' is currently supported).
- type: str
- location:
- description:
- - Bucket name for artifacts.
- type: str
- type: dict
- stages:
- description:
- - List of stages to perform in the CodePipeline. List of dictionaries containing name and actions for each stage.
- required: true
- suboptions:
- name:
- description:
- - Name of the stage (step) in the codepipeline
- type: str
- actions:
- description:
- - List of action configurations for that stage.
- - 'See the boto3 documentation for full documentation of suboptions:'
- - 'U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/codepipeline.html#CodePipeline.Client.create_pipeline)'
- type: list
- elements: dict
- elements: dict
- type: list
- version:
- description:
- - Version number of the pipeline. This number is automatically incremented when a pipeline is updated.
- required: false
- type: int
- state:
- description:
- - Create or remove code pipeline
- default: 'present'
- choices: ['present', 'absent']
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Example for creating a pipeline for continuous deploy of Github code to an ECS cluster (container)
-- aws_codepipeline:
- name: my_deploy_pipeline
- role_arn: arn:aws:iam::123456:role/AWS-CodePipeline-Service
- artifact_store:
- type: S3
- location: my_s3_codepipline_bucket
- stages:
- - name: Get_source
- actions:
- -
- name: Git_pull
- actionTypeId:
- category: Source
- owner: ThirdParty
- provider: GitHub
- version: '1'
- outputArtifacts:
- - { name: my-app-source }
- configuration:
- Owner: mediapeers
- Repo: my_gh_repo
- PollForSourceChanges: 'true'
- Branch: master
- # Generate token like this:
- # https://docs.aws.amazon.com/codepipeline/latest/userguide/GitHub-rotate-personal-token-CLI.html
- # GH Link: https://github.com/settings/tokens
- OAuthToken: 'abc123def456'
- runOrder: 1
- - name: Build
- actions:
- -
- name: CodeBuild
- actionTypeId:
- category: Build
- owner: AWS
- provider: CodeBuild
- version: '1'
- inputArtifacts:
- - { name: my-app-source }
- outputArtifacts:
- - { name: my-app-build }
- configuration:
- # A project with that name needs to be setup on AWS CodeBuild already (use code_build module).
- ProjectName: codebuild-project-name
- runOrder: 1
- - name: ECS_deploy
- actions:
- -
- name: ECS_deploy
- actionTypeId:
- category: Deploy
- owner: AWS
- provider: ECS
- version: '1'
- inputArtifacts:
- - { name: vod-api-app-build }
- configuration:
- # an ECS cluster with that name needs to be setup on AWS ECS already (use ecs_cluster and ecs_service module)
- ClusterName: ecs-cluster-name
- ServiceName: ecs-cluster-service-name
- FileName: imagedefinitions.json
- region: us-east-1
- state: present
-'''
-
-RETURN = '''
-pipeline:
- description: Returns the dictionary describing the code pipeline configuration.
- returned: success
- type: complex
- contains:
- name:
- description: Name of the CodePipeline
- returned: always
- type: str
- sample: my_deploy_pipeline
- role_arn:
- description: ARN of the IAM role attached to the code pipeline
- returned: always
- type: str
- sample: arn:aws:iam::123123123:role/codepipeline-service-role
- artifact_store:
- description: Information about where the build artifacts are stored
- returned: always
- type: complex
- contains:
- type:
- description: The type of the artifacts store, such as S3
- returned: always
- type: str
- sample: S3
- location:
- description: The location of the artifacts storage (s3 bucket name)
- returned: always
- type: str
- sample: my_s3_codepipline_bucket
- encryption_key:
- description: The encryption key used to encrypt the artifacts store, such as an AWS KMS key.
- returned: when configured
- type: str
- stages:
- description: List of stages configured for this pipeline
- returned: always
- type: list
- version:
- description: The version number of the pipeline. This number is auto incremented when pipeline params are changed.
- returned: always
- type: int
-'''
-
-import copy
-import traceback
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies
-
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def create_pipeline(client, name, role_arn, artifact_store, stages, version, module):
- pipeline_dict = {'name': name, 'roleArn': role_arn, 'artifactStore': artifact_store, 'stages': stages}
- if version:
- pipeline_dict['version'] = version
- try:
- resp = client.create_pipeline(pipeline=pipeline_dict)
- return resp
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable create pipeline {0}: {1}".format(name, to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Unable to create pipeline {0}: {1}".format(name, to_native(e)),
- exception=traceback.format_exc())
-
-
-def update_pipeline(client, pipeline_dict, module):
- try:
- resp = client.update_pipeline(pipeline=pipeline_dict)
- return resp
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Unable to update pipeline {0}: {1}".format(pipeline_dict['name'], to_native(e)),
- exception=traceback.format_exc())
-
-
-def delete_pipeline(client, name, module):
- try:
- resp = client.delete_pipeline(name=name)
- return resp
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable delete pipeline {0}: {1}".format(name, to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Unable to delete pipeline {0}: {1}".format(name, to_native(e)),
- exception=traceback.format_exc())
-
-
-def describe_pipeline(client, name, version, module):
- pipeline = {}
- try:
- if version is not None:
- pipeline = client.get_pipeline(name=name, version=version)
- return pipeline
- else:
- pipeline = client.get_pipeline(name=name)
- return pipeline
- except is_boto3_error_code('PipelineNotFoundException'):
- return pipeline
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e)
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True, type='str'),
- role_arn=dict(required=True, type='str'),
- artifact_store=dict(required=True, type='dict'),
- stages=dict(required=True, type='list'),
- version=dict(type='int'),
- state=dict(choices=['present', 'absent'], default='present')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
- client_conn = module.client('codepipeline')
-
- state = module.params.get('state')
- changed = False
-
- # Determine if the CodePipeline exists
- found_code_pipeline = describe_pipeline(client=client_conn, name=module.params['name'], version=module.params['version'], module=module)
- pipeline_result = {}
-
- if state == 'present':
- if 'pipeline' in found_code_pipeline:
- pipeline_dict = copy.deepcopy(found_code_pipeline['pipeline'])
- # Update dictionary with provided module params:
- pipeline_dict['roleArn'] = module.params['role_arn']
- pipeline_dict['artifactStore'] = module.params['artifact_store']
- pipeline_dict['stages'] = module.params['stages']
- if module.params['version'] is not None:
- pipeline_dict['version'] = module.params['version']
-
- pipeline_result = update_pipeline(client=client_conn, pipeline_dict=pipeline_dict, module=module)
-
- if compare_policies(found_code_pipeline['pipeline'], pipeline_result['pipeline']):
- changed = True
- else:
- pipeline_result = create_pipeline(
- client=client_conn,
- name=module.params['name'],
- role_arn=module.params['role_arn'],
- artifact_store=module.params['artifact_store'],
- stages=module.params['stages'],
- version=module.params['version'],
- module=module)
- changed = True
- elif state == 'absent':
- if found_code_pipeline:
- pipeline_result = delete_pipeline(client=client_conn, name=module.params['name'], module=module)
- changed = True
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(pipeline_result))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_config_aggregation_authorization.py b/lib/ansible/modules/cloud/amazon/aws_config_aggregation_authorization.py
deleted file mode 100644
index 687e5cb125..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_config_aggregation_authorization.py
+++ /dev/null
@@ -1,163 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: aws_config_aggregation_authorization
-short_description: Manage cross-account AWS Config authorizations
-description:
- - Module manages AWS Config resources.
-version_added: "2.6"
-requirements: [ 'botocore', 'boto3' ]
-author:
- - "Aaron Smith (@slapula)"
-options:
- state:
- description:
- - Whether the Config rule should be present or absent.
- default: present
- choices: ['present', 'absent']
- type: str
- authorized_account_id:
- description:
- - The 12-digit account ID of the account authorized to aggregate data.
- type: str
- required: true
- authorized_aws_region:
- description:
- - The region authorized to collect aggregated data.
- type: str
- required: true
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Get current account ID
- aws_caller_info:
- register: whoami
-- aws_config_aggregation_authorization:
- state: present
- authorized_account_id: '{{ whoami.account }}'
- authorzed_aws_region: us-east-1
-'''
-
-RETURN = '''#'''
-
-
-try:
- import botocore
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import AWSRetry
-
-
-def resource_exists(client, module, params):
- try:
- current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations']
- authorization_exists = next(
- (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']),
- None
- )
- if authorization_exists:
- return True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
- return False
-
-
-def create_resource(client, module, params, result):
- try:
- response = client.put_aggregation_authorization(
- AuthorizedAccountId=params['AuthorizedAccountId'],
- AuthorizedAwsRegion=params['AuthorizedAwsRegion']
- )
- result['changed'] = True
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization")
-
-
-def update_resource(client, module, params, result):
- current_authorizations = client.describe_aggregation_authorizations()['AggregationAuthorizations']
- current_params = next(
- (item for item in current_authorizations if item["AuthorizedAccountId"] == params['AuthorizedAccountId']),
- None
- )
-
- del current_params['AggregationAuthorizationArn']
- del current_params['CreationTime']
-
- if params != current_params:
- try:
- response = client.put_aggregation_authorization(
- AuthorizedAccountId=params['AuthorizedAccountId'],
- AuthorizedAwsRegion=params['AuthorizedAwsRegion']
- )
- result['changed'] = True
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create AWS Aggregation authorization")
-
-
-def delete_resource(client, module, params, result):
- try:
- response = client.delete_aggregation_authorization(
- AuthorizedAccountId=params['AuthorizedAccountId'],
- AuthorizedAwsRegion=params['AuthorizedAwsRegion']
- )
- result['changed'] = True
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete AWS Aggregation authorization")
-
-
-def main():
- module = AnsibleAWSModule(
- argument_spec={
- 'state': dict(type='str', choices=['present', 'absent'], default='present'),
- 'authorized_account_id': dict(type='str', required=True),
- 'authorized_aws_region': dict(type='str', required=True),
- },
- supports_check_mode=False,
- )
-
- result = {'changed': False}
-
- params = {
- 'AuthorizedAccountId': module.params.get('authorized_account_id'),
- 'AuthorizedAwsRegion': module.params.get('authorized_aws_region'),
- }
-
- client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
- resource_status = resource_exists(client, module, params)
-
- if module.params.get('state') == 'present':
- if not resource_status:
- create_resource(client, module, params, result)
- else:
- update_resource(client, module, params, result)
-
- if module.params.get('state') == 'absent':
- if resource_status:
- delete_resource(client, module, params, result)
-
- module.exit_json(changed=result['changed'])
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_config_aggregator.py b/lib/ansible/modules/cloud/amazon/aws_config_aggregator.py
deleted file mode 100644
index 7108a47f02..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_config_aggregator.py
+++ /dev/null
@@ -1,232 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: aws_config_aggregator
-short_description: Manage AWS Config aggregations across multiple accounts
-description:
- - Module manages AWS Config resources
-version_added: "2.6"
-requirements: [ 'botocore', 'boto3' ]
-author:
- - "Aaron Smith (@slapula)"
-options:
- name:
- description:
- - The name of the AWS Config resource.
- required: true
- type: str
- state:
- description:
- - Whether the Config rule should be present or absent.
- default: present
- choices: ['present', 'absent']
- type: str
- account_sources:
- description:
- - Provides a list of source accounts and regions to be aggregated.
- suboptions:
- account_ids:
- description:
- - A list of 12-digit account IDs of accounts being aggregated.
- type: list
- elements: str
- aws_regions:
- description:
- - A list of source regions being aggregated.
- type: list
- elements: str
- all_aws_regions:
- description:
- - If true, aggregate existing AWS Config regions and future regions.
- type: bool
- type: list
- elements: dict
- required: true
- organization_source:
- description:
- - The region authorized to collect aggregated data.
- suboptions:
- role_arn:
- description:
- - ARN of the IAM role used to retrieve AWS Organization details associated with the aggregator account.
- type: str
- aws_regions:
- description:
- - The source regions being aggregated.
- type: list
- elements: str
- all_aws_regions:
- description:
- - If true, aggregate existing AWS Config regions and future regions.
- type: bool
- type: dict
- required: true
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Create cross-account aggregator
- aws_config_aggregator:
- name: test_config_rule
- state: present
- account_sources:
- account_ids:
- - 1234567890
- - 0123456789
- - 9012345678
- all_aws_regions: yes
-'''
-
-RETURN = '''#'''
-
-
-try:
- import botocore
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
-
-
-def resource_exists(client, module, params):
- try:
- aggregator = client.describe_configuration_aggregators(
- ConfigurationAggregatorNames=[params['name']]
- )
- return aggregator['ConfigurationAggregators'][0]
- except is_boto3_error_code('NoSuchConfigurationAggregatorException'):
- return
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e)
-
-
-def create_resource(client, module, params, result):
- try:
- client.put_configuration_aggregator(
- ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
- AccountAggregationSources=params['AccountAggregationSources'],
- OrganizationAggregationSource=params['OrganizationAggregationSource']
- )
- result['changed'] = True
- result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
-
-
-def update_resource(client, module, params, result):
- current_params = client.describe_configuration_aggregators(
- ConfigurationAggregatorNames=[params['name']]
- )
-
- del current_params['ConfigurationAggregatorArn']
- del current_params['CreationTime']
- del current_params['LastUpdatedTime']
-
- if params != current_params['ConfigurationAggregators'][0]:
- try:
- client.put_configuration_aggregator(
- ConfigurationAggregatorName=params['ConfigurationAggregatorName'],
- AccountAggregationSources=params['AccountAggregationSources'],
- OrganizationAggregationSource=params['OrganizationAggregationSource']
- )
- result['changed'] = True
- result['aggregator'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create AWS Config configuration aggregator")
-
-
-def delete_resource(client, module, params, result):
- try:
- client.delete_configuration_aggregator(
- ConfigurationAggregatorName=params['ConfigurationAggregatorName']
- )
- result['changed'] = True
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration aggregator")
-
-
-def main():
- module = AnsibleAWSModule(
- argument_spec={
- 'name': dict(type='str', required=True),
- 'state': dict(type='str', choices=['present', 'absent'], default='present'),
- 'account_sources': dict(type='list', required=True),
- 'organization_source': dict(type='dict', required=True)
- },
- supports_check_mode=False,
- )
-
- result = {
- 'changed': False
- }
-
- name = module.params.get('name')
- state = module.params.get('state')
-
- params = {}
- if name:
- params['ConfigurationAggregatorName'] = name
- if module.params.get('account_sources'):
- params['AccountAggregationSources'] = []
- for i in module.params.get('account_sources'):
- tmp_dict = {}
- if i.get('account_ids'):
- tmp_dict['AccountIds'] = i.get('account_ids')
- if i.get('aws_regions'):
- tmp_dict['AwsRegions'] = i.get('aws_regions')
- if i.get('all_aws_regions') is not None:
- tmp_dict['AllAwsRegions'] = i.get('all_aws_regions')
- params['AccountAggregationSources'].append(tmp_dict)
- if module.params.get('organization_source'):
- params['OrganizationAggregationSource'] = {}
- if module.params.get('organization_source').get('role_arn'):
- params['OrganizationAggregationSource'].update({
- 'RoleArn': module.params.get('organization_source').get('role_arn')
- })
- if module.params.get('organization_source').get('aws_regions'):
- params['OrganizationAggregationSource'].update({
- 'AwsRegions': module.params.get('organization_source').get('aws_regions')
- })
- if module.params.get('organization_source').get('all_aws_regions') is not None:
- params['OrganizationAggregationSourcep'].update({
- 'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions')
- })
-
- client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
-
- resource_status = resource_exists(client, module, params)
-
- if state == 'present':
- if not resource_status:
- create_resource(client, module, params, result)
- else:
- update_resource(client, module, params, result)
-
- if state == 'absent':
- if resource_status:
- delete_resource(client, module, params, result)
-
- module.exit_json(changed=result['changed'])
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_config_delivery_channel.py b/lib/ansible/modules/cloud/amazon/aws_config_delivery_channel.py
deleted file mode 100644
index 9ef4802f3e..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_config_delivery_channel.py
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: aws_config_delivery_channel
-short_description: Manage AWS Config delivery channels
-description:
- - This module manages AWS Config delivery locations for rule checks and configuration info.
-version_added: "2.6"
-requirements: [ 'botocore', 'boto3' ]
-author:
- - "Aaron Smith (@slapula)"
-options:
- name:
- description:
- - The name of the AWS Config resource.
- required: true
- type: str
- state:
- description:
- - Whether the Config rule should be present or absent.
- default: present
- choices: ['present', 'absent']
- type: str
- s3_bucket:
- description:
- - The name of the Amazon S3 bucket to which AWS Config delivers configuration snapshots and configuration history files.
- type: str
- required: true
- s3_prefix:
- description:
- - The prefix for the specified Amazon S3 bucket.
- type: str
- sns_topic_arn:
- description:
- - The Amazon Resource Name (ARN) of the Amazon SNS topic to which AWS Config sends notifications about configuration changes.
- type: str
- delivery_frequency:
- description:
- - The frequency with which AWS Config delivers configuration snapshots.
- choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours']
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Create Delivery Channel for AWS Config
- aws_config_delivery_channel:
- name: test_delivery_channel
- state: present
- s3_bucket: 'test_aws_config_bucket'
- sns_topic_arn: 'arn:aws:sns:us-east-1:123456789012:aws_config_topic:1234ab56-cdef-7g89-01hi-2jk34l5m67no'
- delivery_frequency: 'Twelve_Hours'
-'''
-
-RETURN = '''#'''
-
-
-try:
- import botocore
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
-
-
-# this waits for an IAM role to become fully available, at the cost of
-# taking a long time to fail when the IAM role/policy really is invalid
-retry_unavailable_iam_on_put_delivery = AWSRetry.backoff(
- catch_extra_error_codes=['InsufficientDeliveryPolicyException'],
-)
-
-
-def resource_exists(client, module, params):
- try:
- channel = client.describe_delivery_channels(
- DeliveryChannelNames=[params['name']],
- aws_retry=True,
- )
- return channel['DeliveryChannels'][0]
- except is_boto3_error_code('NoSuchDeliveryChannelException'):
- return
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e)
-
-
-def create_resource(client, module, params, result):
- try:
- retry_unavailable_iam_on_put_delivery(
- client.put_delivery_channel,
- )(
- DeliveryChannel=params,
- )
- result['changed'] = True
- result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
- return result
- except is_boto3_error_code('InvalidS3KeyPrefixException') as e:
- module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix")
- except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. "
- "Make sure the bucket exists and is available")
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel")
-
-
-def update_resource(client, module, params, result):
- current_params = client.describe_delivery_channels(
- DeliveryChannelNames=[params['name']],
- aws_retry=True,
- )
-
- if params != current_params['DeliveryChannels'][0]:
- try:
- retry_unavailable_iam_on_put_delivery(
- client.put_delivery_channel,
- )(
- DeliveryChannel=params,
- )
- result['changed'] = True
- result['channel'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
- return result
- except is_boto3_error_code('InvalidS3KeyPrefixException') as e:
- module.fail_json_aws(e, msg="The `s3_prefix` parameter was invalid. Try '/' for no prefix")
- except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. "
- "Make sure the bucket exists and is available")
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Couldn't create AWS Config delivery channel")
-
-
-def delete_resource(client, module, params, result):
- try:
- response = client.delete_delivery_channel(
- DeliveryChannelName=params['name']
- )
- result['changed'] = True
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete AWS Config delivery channel")
-
-
-def main():
- module = AnsibleAWSModule(
- argument_spec={
- 'name': dict(type='str', required=True),
- 'state': dict(type='str', choices=['present', 'absent'], default='present'),
- 's3_bucket': dict(type='str', required=True),
- 's3_prefix': dict(type='str'),
- 'sns_topic_arn': dict(type='str'),
- 'delivery_frequency': dict(
- type='str',
- choices=[
- 'One_Hour',
- 'Three_Hours',
- 'Six_Hours',
- 'Twelve_Hours',
- 'TwentyFour_Hours'
- ]
- ),
- },
- supports_check_mode=False,
- )
-
- result = {
- 'changed': False
- }
-
- name = module.params.get('name')
- state = module.params.get('state')
-
- params = {}
- if name:
- params['name'] = name
- if module.params.get('s3_bucket'):
- params['s3BucketName'] = module.params.get('s3_bucket')
- if module.params.get('s3_prefix'):
- params['s3KeyPrefix'] = module.params.get('s3_prefix')
- if module.params.get('sns_topic_arn'):
- params['snsTopicARN'] = module.params.get('sns_topic_arn')
- if module.params.get('delivery_frequency'):
- params['configSnapshotDeliveryProperties'] = {
- 'deliveryFrequency': module.params.get('delivery_frequency')
- }
-
- client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
-
- resource_status = resource_exists(client, module, params)
-
- if state == 'present':
- if not resource_status:
- create_resource(client, module, params, result)
- if resource_status:
- update_resource(client, module, params, result)
-
- if state == 'absent':
- if resource_status:
- delete_resource(client, module, params, result)
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_config_recorder.py b/lib/ansible/modules/cloud/amazon/aws_config_recorder.py
deleted file mode 100644
index d986ed0ebb..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_config_recorder.py
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: aws_config_recorder
-short_description: Manage AWS Config Recorders
-description:
- - Module manages AWS Config configuration recorder settings.
-version_added: "2.6"
-requirements: [ 'botocore', 'boto3' ]
-author:
- - "Aaron Smith (@slapula)"
-options:
- name:
- description:
- - The name of the AWS Config resource.
- required: true
- type: str
- state:
- description:
- - Whether the Config rule should be present or absent.
- default: present
- choices: ['present', 'absent']
- type: str
- role_arn:
- description:
- - Amazon Resource Name (ARN) of the IAM role used to describe the AWS resources associated with the account.
- - Required when I(state=present).
- type: str
- recording_group:
- description:
- - Specifies the types of AWS resources for which AWS Config records configuration changes.
- - Required when I(state=present)
- suboptions:
- all_supported:
- description:
- - Specifies whether AWS Config records configuration changes for every supported type of regional resource.
- - If I(all_supported=true), when AWS Config adds support for a new type of regional resource, it starts
- recording resources of that type automatically.
- - If I(all_supported=true), you cannot enumerate a list of I(resource_types).
- include_global_types:
- description:
- - Specifies whether AWS Config includes all supported types of global resources (for example, IAM resources)
- with the resources that it records.
- - The configuration details for any global resource are the same in all regions. To prevent duplicate configuration items,
- you should consider customizing AWS Config in only one region to record global resources.
- - If you set I(include_global_types=true), you must also set I(all_supported=true).
- - If you set I(include_global_types=true), when AWS Config adds support for a new type of global resource, it starts recording
- resources of that type automatically.
- resource_types:
- description:
- - A list that specifies the types of AWS resources for which AWS Config records configuration changes (for example,
- C(AWS::EC2::Instance) or C(AWS::CloudTrail::Trail)).
- - Before you can set this option, you must set I(all_supported=false).
- type: dict
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Create Configuration Recorder for AWS Config
- aws_config_recorder:
- name: test_configuration_recorder
- state: present
- role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder'
- recording_group:
- all_supported: true
- include_global_types: true
-'''
-
-RETURN = '''#'''
-
-
-try:
- import botocore
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
-
-
-def resource_exists(client, module, params):
- try:
- recorder = client.describe_configuration_recorders(
- ConfigurationRecorderNames=[params['name']]
- )
- return recorder['ConfigurationRecorders'][0]
- except is_boto3_error_code('NoSuchConfigurationRecorderException'):
- return
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e)
-
-
-def create_resource(client, module, params, result):
- try:
- response = client.put_configuration_recorder(
- ConfigurationRecorder=params
- )
- result['changed'] = True
- result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create AWS Config configuration recorder")
-
-
-def update_resource(client, module, params, result):
- current_params = client.describe_configuration_recorders(
- ConfigurationRecorderNames=[params['name']]
- )
-
- if params != current_params['ConfigurationRecorders'][0]:
- try:
- response = client.put_configuration_recorder(
- ConfigurationRecorder=params
- )
- result['changed'] = True
- result['recorder'] = camel_dict_to_snake_dict(resource_exists(client, module, params))
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't update AWS Config configuration recorder")
-
-
-def delete_resource(client, module, params, result):
- try:
- response = client.delete_configuration_recorder(
- ConfigurationRecorderName=params['name']
- )
- result['changed'] = True
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete AWS Config configuration recorder")
-
-
-def main():
-
- module = AnsibleAWSModule(
- argument_spec={
- 'name': dict(type='str', required=True),
- 'state': dict(type='str', choices=['present', 'absent'], default='present'),
- 'role_arn': dict(type='str'),
- 'recording_group': dict(type='dict'),
- },
- supports_check_mode=False,
- required_if=[
- ('state', 'present', ['role_arn', 'recording_group']),
- ],
- )
-
- result = {
- 'changed': False
- }
-
- name = module.params.get('name')
- state = module.params.get('state')
-
- params = {}
- if name:
- params['name'] = name
- if module.params.get('role_arn'):
- params['roleARN'] = module.params.get('role_arn')
- if module.params.get('recording_group'):
- params['recordingGroup'] = {}
- if module.params.get('recording_group').get('all_supported') is not None:
- params['recordingGroup'].update({
- 'allSupported': module.params.get('recording_group').get('all_supported')
- })
- if module.params.get('recording_group').get('include_global_types') is not None:
- params['recordingGroup'].update({
- 'includeGlobalResourceTypes': module.params.get('recording_group').get('include_global_types')
- })
- if module.params.get('recording_group').get('resource_types'):
- params['recordingGroup'].update({
- 'resourceTypes': module.params.get('recording_group').get('resource_types')
- })
- else:
- params['recordingGroup'].update({
- 'resourceTypes': []
- })
-
- client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
-
- resource_status = resource_exists(client, module, params)
-
- if state == 'present':
- if not resource_status:
- create_resource(client, module, params, result)
- if resource_status:
- update_resource(client, module, params, result)
-
- if state == 'absent':
- if resource_status:
- delete_resource(client, module, params, result)
-
- module.exit_json(changed=result['changed'])
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_config_rule.py b/lib/ansible/modules/cloud/amazon/aws_config_rule.py
deleted file mode 100644
index 97d39e4dcc..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_config_rule.py
+++ /dev/null
@@ -1,275 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: aws_config_rule
-short_description: Manage AWS Config resources
-description:
- - Module manages AWS Config rules
-version_added: "2.6"
-requirements: [ 'botocore', 'boto3' ]
-author:
- - "Aaron Smith (@slapula)"
-options:
- name:
- description:
- - The name of the AWS Config resource.
- required: true
- type: str
- state:
- description:
- - Whether the Config rule should be present or absent.
- default: present
- choices: ['present', 'absent']
- type: str
- description:
- description:
- - The description that you provide for the AWS Config rule.
- type: str
- scope:
- description:
- - Defines which resources can trigger an evaluation for the rule.
- suboptions:
- compliance_types:
- description:
- - The resource types of only those AWS resources that you want to trigger an evaluation for the rule.
- You can only specify one type if you also specify a resource ID for I(compliance_id).
- compliance_id:
- description:
- - The ID of the only AWS resource that you want to trigger an evaluation for the rule. If you specify a resource ID,
- you must specify one resource type for I(compliance_types).
- tag_key:
- description:
- - The tag key that is applied to only those AWS resources that you want to trigger an evaluation for the rule.
- tag_value:
- description:
- - The tag value applied to only those AWS resources that you want to trigger an evaluation for the rule.
- If you specify a value for I(tag_value), you must also specify a value for I(tag_key).
- type: dict
- source:
- description:
- - Provides the rule owner (AWS or customer), the rule identifier, and the notifications that cause the function to
- evaluate your AWS resources.
- suboptions:
- owner:
- description:
- - The resource types of only those AWS resources that you want to trigger an evaluation for the rule.
- You can only specify one type if you also specify a resource ID for I(compliance_id).
- identifier:
- description:
- - The ID of the only AWS resource that you want to trigger an evaluation for the rule.
- If you specify a resource ID, you must specify one resource type for I(compliance_types).
- details:
- description:
- - Provides the source and type of the event that causes AWS Config to evaluate your AWS resources.
- - This parameter expects a list of dictionaries. Each dictionary expects the following key/value pairs.
- - Key `EventSource` The source of the event, such as an AWS service, that triggers AWS Config to evaluate your AWS resources.
- - Key `MessageType` The type of notification that triggers AWS Config to run an evaluation for a rule.
- - Key `MaximumExecutionFrequency` The frequency at which you want AWS Config to run evaluations for a custom rule with a periodic trigger.
- type: dict
- required: true
- input_parameters:
- description:
- - A string, in JSON format, that is passed to the AWS Config rule Lambda function.
- type: str
- execution_frequency:
- description:
- - The maximum frequency with which AWS Config runs evaluations for a rule.
- choices: ['One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours']
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Create Config Rule for AWS Config
- aws_config_rule:
- name: test_config_rule
- state: present
- description: 'This AWS Config rule checks for public write access on S3 buckets'
- scope:
- compliance_types:
- - 'AWS::S3::Bucket'
- source:
- owner: AWS
- identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED'
-
-'''
-
-RETURN = '''#'''
-
-
-try:
- import botocore
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
-
-
-def rule_exists(client, module, params):
- try:
- rule = client.describe_config_rules(
- ConfigRuleNames=[params['ConfigRuleName']],
- aws_retry=True,
- )
- return rule['ConfigRules'][0]
- except is_boto3_error_code('NoSuchConfigRuleException'):
- return
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e)
-
-
-def create_resource(client, module, params, result):
- try:
- client.put_config_rule(
- ConfigRule=params
- )
- result['changed'] = True
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create AWS Config rule")
-
-
-def update_resource(client, module, params, result):
- current_params = client.describe_config_rules(
- ConfigRuleNames=[params['ConfigRuleName']],
- aws_retry=True,
- )
-
- del current_params['ConfigRules'][0]['ConfigRuleArn']
- del current_params['ConfigRules'][0]['ConfigRuleId']
-
- if params != current_params['ConfigRules'][0]:
- try:
- client.put_config_rule(
- ConfigRule=params
- )
- result['changed'] = True
- result['rule'] = camel_dict_to_snake_dict(rule_exists(client, module, params))
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create AWS Config rule")
-
-
-def delete_resource(client, module, params, result):
- try:
- response = client.delete_config_rule(
- ConfigRuleName=params['ConfigRuleName'],
- aws_retry=True,
- )
- result['changed'] = True
- result['rule'] = {}
- return result
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete AWS Config rule")
-
-
-def main():
- module = AnsibleAWSModule(
- argument_spec={
- 'name': dict(type='str', required=True),
- 'state': dict(type='str', choices=['present', 'absent'], default='present'),
- 'description': dict(type='str'),
- 'scope': dict(type='dict'),
- 'source': dict(type='dict', required=True),
- 'input_parameters': dict(type='str'),
- 'execution_frequency': dict(
- type='str',
- choices=[
- 'One_Hour',
- 'Three_Hours',
- 'Six_Hours',
- 'Twelve_Hours',
- 'TwentyFour_Hours'
- ]
- ),
- },
- supports_check_mode=False,
- )
-
- result = {
- 'changed': False
- }
-
- name = module.params.get('name')
- resource_type = module.params.get('resource_type')
- state = module.params.get('state')
-
- params = {}
- if name:
- params['ConfigRuleName'] = name
- if module.params.get('description'):
- params['Description'] = module.params.get('description')
- if module.params.get('scope'):
- params['Scope'] = {}
- if module.params.get('scope').get('compliance_types'):
- params['Scope'].update({
- 'ComplianceResourceTypes': module.params.get('scope').get('compliance_types')
- })
- if module.params.get('scope').get('tag_key'):
- params['Scope'].update({
- 'TagKey': module.params.get('scope').get('tag_key')
- })
- if module.params.get('scope').get('tag_value'):
- params['Scope'].update({
- 'TagValue': module.params.get('scope').get('tag_value')
- })
- if module.params.get('scope').get('compliance_id'):
- params['Scope'].update({
- 'ComplianceResourceId': module.params.get('scope').get('compliance_id')
- })
- if module.params.get('source'):
- params['Source'] = {}
- if module.params.get('source').get('owner'):
- params['Source'].update({
- 'Owner': module.params.get('source').get('owner')
- })
- if module.params.get('source').get('identifier'):
- params['Source'].update({
- 'SourceIdentifier': module.params.get('source').get('identifier')
- })
- if module.params.get('source').get('details'):
- params['Source'].update({
- 'SourceDetails': module.params.get('source').get('details')
- })
- if module.params.get('input_parameters'):
- params['InputParameters'] = module.params.get('input_parameters')
- if module.params.get('execution_frequency'):
- params['MaximumExecutionFrequency'] = module.params.get('execution_frequency')
- params['ConfigRuleState'] = 'ACTIVE'
-
- client = module.client('config', retry_decorator=AWSRetry.jittered_backoff())
-
- existing_rule = rule_exists(client, module, params)
-
- if state == 'present':
- if not existing_rule:
- create_resource(client, module, params, result)
- else:
- update_resource(client, module, params, result)
-
- if state == 'absent':
- if existing_rule:
- delete_resource(client, module, params, result)
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_direct_connect_connection.py b/lib/ansible/modules/cloud/amazon/aws_direct_connect_connection.py
deleted file mode 100644
index 2d603571f8..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_direct_connect_connection.py
+++ /dev/null
@@ -1,343 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: aws_direct_connect_connection
-short_description: Creates, deletes, modifies a DirectConnect connection
-description:
- - Create, update, or delete a Direct Connect connection between a network and a specific AWS Direct Connect location.
- Upon creation the connection may be added to a link aggregation group or established as a standalone connection.
- The connection may later be associated or disassociated with a link aggregation group.
-version_added: "2.4"
-author: "Sloane Hertel (@s-hertel)"
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - boto3
- - botocore
-options:
- state:
- description:
- - The state of the Direct Connect connection.
- choices:
- - present
- - absent
- type: str
- required: true
- name:
- description:
- - The name of the Direct Connect connection. This is required to create a
- new connection.
- - One of I(connection_id) or I(name) must be specified.
- type: str
- connection_id:
- description:
- - The ID of the Direct Connect connection.
- - Modifying attributes of a connection with I(forced_update) will result in a new Direct Connect connection ID.
- - One of I(connection_id) or I(name) must be specified.
- type: str
- location:
- description:
- - Where the Direct Connect connection is located.
- - Required when I(state=present).
- type: str
- bandwidth:
- description:
- - The bandwidth of the Direct Connect connection.
- - Required when I(state=present).
- choices:
- - 1Gbps
- - 10Gbps
- type: str
- link_aggregation_group:
- description:
- - The ID of the link aggregation group you want to associate with the connection.
- - This is optional when a stand-alone connection is desired.
- type: str
- forced_update:
- description:
- - To modify bandwidth or location the connection will need to be deleted and recreated.
- By default this will not happen - this option must be set to True.
- type: bool
-"""
-
-EXAMPLES = """
-
-# create a Direct Connect connection
-- aws_direct_connect_connection:
- name: ansible-test-connection
- state: present
- location: EqDC2
- link_aggregation_group: dxlag-xxxxxxxx
- bandwidth: 1Gbps
- register: dc
-
-# disassociate the LAG from the connection
-- aws_direct_connect_connection:
- state: present
- connection_id: dc.connection.connection_id
- location: EqDC2
- bandwidth: 1Gbps
-
-# replace the connection with one with more bandwidth
-- aws_direct_connect_connection:
- state: present
- name: ansible-test-connection
- location: EqDC2
- bandwidth: 10Gbps
- forced_update: True
-
-# delete the connection
-- aws_direct_connect_connection:
- state: absent
- name: ansible-test-connection
-"""
-
-RETURN = """
-connection:
- description: The attributes of the direct connect connection.
- type: complex
- returned: I(state=present)
- contains:
- aws_device:
- description: The endpoint which the physical connection terminates on.
- returned: when the requested state is no longer 'requested'
- type: str
- sample: EqDC2-12pmo7hemtz1z
- bandwidth:
- description: The bandwidth of the connection.
- returned: always
- type: str
- sample: 1Gbps
- connection_id:
- description: The ID of the connection.
- returned: always
- type: str
- sample: dxcon-ffy9ywed
- connection_name:
- description: The name of the connection.
- returned: always
- type: str
- sample: ansible-test-connection
- connection_state:
- description: The state of the connection.
- returned: always
- type: str
- sample: pending
- loa_issue_time:
- description: The issue time of the connection's Letter of Authorization - Connecting Facility Assignment.
- returned: when the LOA-CFA has been issued (the connection state will no longer be 'requested')
- type: str
- sample: '2018-03-20T17:36:26-04:00'
- location:
- description: The location of the connection.
- returned: always
- type: str
- sample: EqDC2
- owner_account:
- description: The account that owns the direct connect connection.
- returned: always
- type: str
- sample: '123456789012'
- region:
- description: The region in which the connection exists.
- returned: always
- type: str
- sample: us-east-1
-"""
-
-import traceback
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry)
-from ansible.module_utils.aws.direct_connect import (DirectConnectError, delete_connection,
- associate_connection_and_lag, disassociate_connection_and_lag)
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except Exception:
- pass
- # handled by imported AnsibleAWSModule
-
-retry_params = {"tries": 10, "delay": 5, "backoff": 1.2, "catch_extra_error_codes": ["DirectConnectClientException"]}
-
-
-def connection_status(client, connection_id):
- return connection_exists(client, connection_id=connection_id, connection_name=None, verify=False)
-
-
-def connection_exists(client, connection_id=None, connection_name=None, verify=True):
- params = {}
- if connection_id:
- params['connectionId'] = connection_id
- try:
- response = AWSRetry.backoff(**retry_params)(client.describe_connections)(**params)
- except (BotoCoreError, ClientError) as e:
- if connection_id:
- msg = "Failed to describe DirectConnect ID {0}".format(connection_id)
- else:
- msg = "Failed to describe DirectConnect connections"
- raise DirectConnectError(msg=msg,
- last_traceback=traceback.format_exc(),
- exception=e)
-
- match = []
- connection = []
-
- # look for matching connections
-
- if len(response.get('connections', [])) == 1 and connection_id:
- if response['connections'][0]['connectionState'] != 'deleted':
- match.append(response['connections'][0]['connectionId'])
- connection.extend(response['connections'])
-
- for conn in response.get('connections', []):
- if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted':
- match.append(conn['connectionId'])
- connection.append(conn)
-
- # verifying if the connections exists; if true, return connection identifier, otherwise return False
- if verify and len(match) == 1:
- return match[0]
- elif verify:
- return False
- # not verifying if the connection exists; just return current connection info
- elif len(connection) == 1:
- return {'connection': connection[0]}
- return {'connection': {}}
-
-
-def create_connection(client, location, bandwidth, name, lag_id):
- if not name:
- raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.")
- params = {
- 'location': location,
- 'bandwidth': bandwidth,
- 'connectionName': name,
- }
- if lag_id:
- params['lagId'] = lag_id
-
- try:
- connection = AWSRetry.backoff(**retry_params)(client.create_connection)(**params)
- except (BotoCoreError, ClientError) as e:
- raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name),
- last_traceback=traceback.format_exc(),
- exception=e)
- return connection['connectionId']
-
-
-def changed_properties(current_status, location, bandwidth):
- current_bandwidth = current_status['bandwidth']
- current_location = current_status['location']
-
- return current_bandwidth != bandwidth or current_location != location
-
-
-@AWSRetry.backoff(**retry_params)
-def update_associations(client, latest_state, connection_id, lag_id):
- changed = False
- if 'lagId' in latest_state and lag_id != latest_state['lagId']:
- disassociate_connection_and_lag(client, connection_id, lag_id=latest_state['lagId'])
- changed = True
- if (changed and lag_id) or (lag_id and 'lagId' not in latest_state):
- associate_connection_and_lag(client, connection_id, lag_id)
- changed = True
- return changed
-
-
-def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update):
- # the connection is found; get the latest state and see if it needs to be updated
- if connection_id:
- latest_state = connection_status(client, connection_id=connection_id)['connection']
- if changed_properties(latest_state, location, bandwidth) and forced_update:
- ensure_absent(client, connection_id)
- return ensure_present(client=client,
- connection_id=None,
- connection_name=connection_name,
- location=location,
- bandwidth=bandwidth,
- lag_id=lag_id,
- forced_update=forced_update)
- elif update_associations(client, latest_state, connection_id, lag_id):
- return True, connection_id
-
- # no connection found; create a new one
- else:
- return True, create_connection(client, location, bandwidth, connection_name, lag_id)
-
- return False, connection_id
-
-
-@AWSRetry.backoff(**retry_params)
-def ensure_absent(client, connection_id):
- changed = False
- if connection_id:
- delete_connection(client, connection_id)
- changed = True
-
- return changed
-
-
-def main():
- argument_spec = dict(
- state=dict(required=True, choices=['present', 'absent']),
- name=dict(),
- location=dict(),
- bandwidth=dict(choices=['1Gbps', '10Gbps']),
- link_aggregation_group=dict(),
- connection_id=dict(),
- forced_update=dict(type='bool', default=False)
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- required_one_of=[('connection_id', 'name')],
- required_if=[('state', 'present', ('location', 'bandwidth'))]
- )
-
- connection = module.client('directconnect')
-
- state = module.params.get('state')
- try:
- connection_id = connection_exists(
- connection,
- connection_id=module.params.get('connection_id'),
- connection_name=module.params.get('name')
- )
- if not connection_id and module.params.get('connection_id'):
- module.fail_json(msg="The Direct Connect connection {0} does not exist.".format(module.params.get('connection_id')))
-
- if state == 'present':
- changed, connection_id = ensure_present(connection,
- connection_id=connection_id,
- connection_name=module.params.get('name'),
- location=module.params.get('location'),
- bandwidth=module.params.get('bandwidth'),
- lag_id=module.params.get('link_aggregation_group'),
- forced_update=module.params.get('forced_update'))
- response = connection_status(connection, connection_id)
- elif state == 'absent':
- changed = ensure_absent(connection, connection_id)
- response = {}
- except DirectConnectError as e:
- if e.last_traceback:
- module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception.response))
- else:
- module.fail_json(msg=e.msg)
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_direct_connect_gateway.py b/lib/ansible/modules/cloud/amazon/aws_direct_connect_gateway.py
deleted file mode 100644
index d885368540..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_direct_connect_gateway.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
-module: aws_direct_connect_gateway
-author: Gobin Sougrakpam (@gobins)
-version_added: "2.5"
-short_description: Manage AWS Direct Connect gateway
-description:
- - Creates AWS Direct Connect Gateway.
- - Deletes AWS Direct Connect Gateway.
- - Attaches Virtual Gateways to Direct Connect Gateway.
- - Detaches Virtual Gateways to Direct Connect Gateway.
-extends_documentation_fragment:
- - aws
- - ec2
-requirements: [ boto3 ]
-options:
- state:
- description:
- - Set I(state=present) to ensure a resource is created.
- - Set I(state=absent) to remove a resource.
- default: present
- choices: [ "present", "absent"]
- type: str
- name:
- description:
- - Name of the Direct Connect Gateway to be created or deleted.
- type: str
- amazon_asn:
- description:
- - The Amazon side ASN.
- - Required when I(state=present).
- type: str
- direct_connect_gateway_id:
- description:
- - The ID of an existing Direct Connect Gateway.
- - Required when I(state=absent).
- type: str
- virtual_gateway_id:
- description:
- - The VPN gateway ID of an existing virtual gateway.
- type: str
- wait_timeout:
- description:
- - How long to wait for the association to be deleted.
- type: int
- default: 320
-'''
-
-EXAMPLES = '''
-- name: Create a new direct connect gateway attached to virtual private gateway
- dxgw:
- state: present
- name: my-dx-gateway
- amazon_asn: 7224
- virtual_gateway_id: vpg-12345
- register: created_dxgw
-
-- name: Create a new unattached dxgw
- dxgw:
- state: present
- name: my-dx-gateway
- amazon_asn: 7224
- register: created_dxgw
-
-'''
-
-RETURN = '''
-result:
- description:
- - The attributes of the Direct Connect Gateway
- type: complex
- returned: I(state=present)
- contains:
- amazon_side_asn:
- description: ASN on the amazon side.
- type: str
- direct_connect_gateway_id:
- description: The ID of the direct connect gateway.
- type: str
- direct_connect_gateway_name:
- description: The name of the direct connect gateway.
- type: str
- direct_connect_gateway_state:
- description: The state of the direct connect gateway.
- type: str
- owner_account:
- description: The AWS account ID of the owner of the direct connect gateway.
- type: str
-'''
-
-import time
-import traceback
-
-try:
- import botocore
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, ec2_argument_spec,
- get_aws_connection_info, boto3_conn)
-from ansible.module_utils._text import to_native
-
-
-def dx_gateway_info(client, gateway_id, module):
- try:
- resp = client.describe_direct_connect_gateways(
- directConnectGatewayId=gateway_id)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
- if resp['directConnectGateways']:
- return resp['directConnectGateways'][0]
-
-
-def wait_for_status(client, module, gateway_id, virtual_gateway_id, status):
- polling_increment_secs = 15
- max_retries = 3
- status_achieved = False
-
- for x in range(0, max_retries):
- try:
- response = check_dxgw_association(
- client,
- module,
- gateway_id=gateway_id,
- virtual_gateway_id=virtual_gateway_id)
- if response['directConnectGatewayAssociations']:
- if response['directConnectGatewayAssociations'][0]['associationState'] == status:
- status_achieved = True
- break
- else:
- time.sleep(polling_increment_secs)
- else:
- status_achieved = True
- break
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- result = response
- return status_achieved, result
-
-
-def associate_direct_connect_gateway(client, module, gateway_id):
- params = dict()
- params['virtual_gateway_id'] = module.params.get('virtual_gateway_id')
- try:
- response = client.create_direct_connect_gateway_association(
- directConnectGatewayId=gateway_id,
- virtualGatewayId=params['virtual_gateway_id'])
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- status_achieved, dxgw = wait_for_status(client, module, gateway_id, params['virtual_gateway_id'], 'associating')
- if not status_achieved:
- module.fail_json(msg='Error waiting for dxgw to attach to vpg - please check the AWS console')
-
- result = response
- return result
-
-
-def delete_association(client, module, gateway_id, virtual_gateway_id):
- try:
- response = client.delete_direct_connect_gateway_association(
- directConnectGatewayId=gateway_id,
- virtualGatewayId=virtual_gateway_id)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- status_achieved, dxgw = wait_for_status(client, module, gateway_id, virtual_gateway_id, 'disassociating')
- if not status_achieved:
- module.fail_json(msg='Error waiting for dxgw to detach from vpg - please check the AWS console')
-
- result = response
- return result
-
-
-def create_dx_gateway(client, module):
- params = dict()
- params['name'] = module.params.get('name')
- params['amazon_asn'] = module.params.get('amazon_asn')
- try:
- response = client.create_direct_connect_gateway(
- directConnectGatewayName=params['name'],
- amazonSideAsn=int(params['amazon_asn']))
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- result = response
- return result
-
-
-def find_dx_gateway(client, module, gateway_id=None):
- params = dict()
- gateways = list()
- if gateway_id is not None:
- params['directConnectGatewayId'] = gateway_id
- while True:
- try:
- resp = client.describe_direct_connect_gateways(**params)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
- gateways.extend(resp['directConnectGateways'])
- if 'nextToken' in resp:
- params['nextToken'] = resp['nextToken']
- else:
- break
- if gateways != []:
- count = 0
- for gateway in gateways:
- if module.params.get('name') == gateway['directConnectGatewayName']:
- count += 1
- return gateway
- return None
-
-
-def check_dxgw_association(client, module, gateway_id, virtual_gateway_id=None):
- try:
- if virtual_gateway_id is None:
- resp = client.describe_direct_connect_gateway_associations(
- directConnectGatewayId=gateway_id
- )
- else:
- resp = client.describe_direct_connect_gateway_associations(
- directConnectGatewayId=gateway_id,
- virtualGatewayId=virtual_gateway_id,
- )
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
- return resp
-
-
-def ensure_present(client, module):
- # If an existing direct connect gateway matches our args
- # then a match is considered to have been found and we will not create another dxgw.
-
- changed = False
- params = dict()
- result = dict()
- params['name'] = module.params.get('name')
- params['amazon_asn'] = module.params.get('amazon_asn')
- params['virtual_gateway_id'] = module.params.get('virtual_gateway_id')
-
- # check if a gateway matching our module args already exists
- existing_dxgw = find_dx_gateway(client, module)
-
- if existing_dxgw is not None and existing_dxgw['directConnectGatewayState'] != 'deleted':
- gateway_id = existing_dxgw['directConnectGatewayId']
- # if a gateway_id was provided, check if it is attach to the DXGW
- if params['virtual_gateway_id']:
- resp = check_dxgw_association(
- client,
- module,
- gateway_id=gateway_id,
- virtual_gateway_id=params['virtual_gateway_id'])
- if not resp["directConnectGatewayAssociations"]:
- # attach the dxgw to the supplied virtual_gateway_id
- associate_direct_connect_gateway(client, module, gateway_id)
- changed = True
- # if params['virtual_gateway_id'] is not provided, check the dxgw is attached to a VPG. If so, detach it.
- else:
- existing_dxgw = find_dx_gateway(client, module)
-
- resp = check_dxgw_association(client, module, gateway_id=gateway_id)
- if resp["directConnectGatewayAssociations"]:
- for association in resp['directConnectGatewayAssociations']:
- if association['associationState'] not in ['disassociating', 'disassociated']:
- delete_association(
- client,
- module,
- gateway_id=gateway_id,
- virtual_gateway_id=association['virtualGatewayId'])
- else:
- # create a new dxgw
- new_dxgw = create_dx_gateway(client, module)
- changed = True
- gateway_id = new_dxgw['directConnectGateway']['directConnectGatewayId']
-
- # if a vpc-id was supplied, attempt to attach it to the dxgw
- if params['virtual_gateway_id']:
- associate_direct_connect_gateway(client, module, gateway_id)
- resp = check_dxgw_association(client,
- module,
- gateway_id=gateway_id
- )
- if resp["directConnectGatewayAssociations"]:
- changed = True
-
- result = dx_gateway_info(client, gateway_id, module)
- return changed, result
-
-
-def ensure_absent(client, module):
- # If an existing direct connect gateway matches our args
- # then a match is considered to have been found and we will not create another dxgw.
-
- changed = False
- result = dict()
- dx_gateway_id = module.params.get('direct_connect_gateway_id')
- existing_dxgw = find_dx_gateway(client, module, dx_gateway_id)
- if existing_dxgw is not None:
- resp = check_dxgw_association(client, module,
- gateway_id=dx_gateway_id)
- if resp["directConnectGatewayAssociations"]:
- for association in resp['directConnectGatewayAssociations']:
- if association['associationState'] not in ['disassociating', 'disassociated']:
- delete_association(client, module,
- gateway_id=dx_gateway_id,
- virtual_gateway_id=association['virtualGatewayId'])
- # wait for deleting association
- timeout = time.time() + module.params.get('wait_timeout')
- while time.time() < timeout:
- resp = check_dxgw_association(client,
- module,
- gateway_id=dx_gateway_id)
- if resp["directConnectGatewayAssociations"] != []:
- time.sleep(15)
- else:
- break
-
- try:
- resp = client.delete_direct_connect_gateway(
- directConnectGatewayId=dx_gateway_id
- )
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
- result = resp['directConnectGateway']
- return changed
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(state=dict(default='present', choices=['present', 'absent']),
- name=dict(),
- amazon_asn=dict(),
- virtual_gateway_id=dict(),
- direct_connect_gateway_id=dict(),
- wait_timeout=dict(type='int', default=320)))
- required_if = [('state', 'present', ['name', 'amazon_asn']),
- ('state', 'absent', ['direct_connect_gateway_id'])]
- module = AnsibleModule(argument_spec=argument_spec,
- required_if=required_if)
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required for this module')
-
- state = module.params.get('state')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- client = boto3_conn(module, conn_type='client', resource='directconnect', region=region, endpoint=ec2_url, **aws_connect_kwargs)
-
- if state == 'present':
- (changed, results) = ensure_present(client, module)
- elif state == 'absent':
- changed = ensure_absent(client, module)
- results = {}
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(results))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_direct_connect_link_aggregation_group.py b/lib/ansible/modules/cloud/amazon/aws_direct_connect_link_aggregation_group.py
deleted file mode 100644
index 4fd5b2b21f..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_direct_connect_link_aggregation_group.py
+++ /dev/null
@@ -1,470 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: aws_direct_connect_link_aggregation_group
-short_description: Manage Direct Connect LAG bundles
-description:
- - Create, delete, or modify a Direct Connect link aggregation group.
-version_added: "2.4"
-author: "Sloane Hertel (@s-hertel)"
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - boto3
- - botocore
-options:
- state:
- description:
- - The state of the Direct Connect link aggregation group.
- choices:
- - present
- - absent
- type: str
- required: true
- name:
- description:
- - The name of the Direct Connect link aggregation group.
- type: str
- link_aggregation_group_id:
- description:
- - The ID of the Direct Connect link aggregation group.
- type: str
- num_connections:
- description:
- - The number of connections with which to initialize the link aggregation group.
- type: int
- min_links:
- description:
- - The minimum number of physical connections that must be operational for the LAG itself to be operational.
- type: int
- location:
- description:
- - The location of the link aggregation group.
- type: str
- bandwidth:
- description:
- - The bandwidth of the link aggregation group.
- type: str
- force_delete:
- description:
- - This allows the minimum number of links to be set to 0, any hosted connections disassociated,
- and any virtual interfaces associated to the LAG deleted.
- type: bool
- connection_id:
- description:
- - A connection ID to link with the link aggregation group upon creation.
- type: str
- delete_with_disassociation:
- description:
- - To be used with I(state=absent) to delete connections after disassociating them with the LAG.
- type: bool
- wait:
- description:
- - Whether or not to wait for the operation to complete.
- - May be useful when waiting for virtual interfaces to be deleted.
- - The time to wait can be controlled by setting I(wait_timeout).
- type: bool
- wait_timeout:
- description:
- - The duration in seconds to wait if I(wait=true).
- default: 120
- type: int
-"""
-
-EXAMPLES = """
-
-# create a Direct Connect connection
-- aws_direct_connect_link_aggregation_group:
- state: present
- location: EqDC2
- lag_id: dxlag-xxxxxxxx
- bandwidth: 1Gbps
-
-"""
-
-RETURN = """
-changed:
- type: str
- description: Whether or not the LAG has changed.
- returned: always
-aws_device:
- type: str
- description: The AWS Direct Connection endpoint that hosts the LAG.
- sample: "EqSe2-1bwfvazist2k0"
- returned: when I(state=present)
-connections:
- type: list
- description: A list of connections bundled by this LAG.
- sample:
- "connections": [
- {
- "aws_device": "EqSe2-1bwfvazist2k0",
- "bandwidth": "1Gbps",
- "connection_id": "dxcon-fgzjah5a",
- "connection_name": "Requested Connection 1 for Lag dxlag-fgtoh97h",
- "connection_state": "down",
- "lag_id": "dxlag-fgnsp4rq",
- "location": "EqSe2",
- "owner_account": "448830907657",
- "region": "us-west-2"
- }
- ]
- returned: when I(state=present)
-connections_bandwidth:
- type: str
- description: The individual bandwidth of the physical connections bundled by the LAG.
- sample: "1Gbps"
- returned: when I(state=present)
-lag_id:
- type: str
- description: Unique identifier for the link aggregation group.
- sample: "dxlag-fgnsp4rq"
- returned: when I(state=present)
-lag_name:
- type: str
- description: User-provided name for the link aggregation group.
- returned: when I(state=present)
-lag_state:
- type: str
- description: State of the LAG.
- sample: "pending"
- returned: when I(state=present)
-location:
- type: str
- description: Where the connection is located.
- sample: "EqSe2"
- returned: when I(state=present)
-minimum_links:
- type: int
- description: The minimum number of physical connections that must be operational for the LAG itself to be operational.
- returned: when I(state=present)
-number_of_connections:
- type: int
- description: The number of physical connections bundled by the LAG.
- returned: when I(state=present)
-owner_account:
- type: str
- description: Owner account ID of the LAG.
- returned: when I(state=present)
-region:
- type: str
- description: The region in which the LAG exists.
- returned: when I(state=present)
-"""
-
-from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, ec2_argument_spec, HAS_BOTO3,
- get_aws_connection_info, boto3_conn, AWSRetry)
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.aws.direct_connect import (DirectConnectError,
- delete_connection,
- delete_virtual_interface,
- disassociate_connection_and_lag)
-import traceback
-import time
-
-try:
- import botocore
-except Exception:
- pass
- # handled by imported HAS_BOTO3
-
-
-def lag_status(client, lag_id):
- return lag_exists(client, lag_id=lag_id, lag_name=None, verify=False)
-
-
-def lag_exists(client, lag_id=None, lag_name=None, verify=True):
- """ If verify=True, returns the LAG ID or None
- If verify=False, returns the LAG's data (or an empty dict)
- """
- try:
- if lag_id:
- response = client.describe_lags(lagId=lag_id)
- else:
- response = client.describe_lags()
- except botocore.exceptions.ClientError as e:
- if lag_id and verify:
- return False
- elif lag_id:
- return {}
- else:
- failed_op = "Failed to describe DirectConnect link aggregation groups."
- raise DirectConnectError(msg=failed_op,
- last_traceback=traceback.format_exc(),
- exception=e)
-
- match = [] # List of LAG IDs that are exact matches
- lag = [] # List of LAG data that are exact matches
-
- # look for matching connections
- if len(response.get('lags', [])) == 1 and lag_id:
- if response['lags'][0]['lagState'] != 'deleted':
- match.append(response['lags'][0]['lagId'])
- lag.append(response['lags'][0])
- else:
- for each in response.get('lags', []):
- if each['lagState'] != 'deleted':
- if not lag_id:
- if lag_name == each['lagName']:
- match.append(each['lagId'])
- else:
- match.append(each['lagId'])
-
- # verifying if the connections exists; if true, return connection identifier, otherwise return False
- if verify and len(match) == 1:
- return match[0]
- elif verify:
- return False
-
- # not verifying if the connection exists; just return current connection info
- else:
- if len(lag) == 1:
- return lag[0]
- else:
- return {}
-
-
-def create_lag(client, num_connections, location, bandwidth, name, connection_id):
- if not name:
- raise DirectConnectError(msg="Failed to create a Direct Connect link aggregation group: name required.",
- last_traceback=None,
- exception="")
-
- parameters = dict(numberOfConnections=num_connections,
- location=location,
- connectionsBandwidth=bandwidth,
- lagName=name)
- if connection_id:
- parameters.update(connectionId=connection_id)
- try:
- lag = client.create_lag(**parameters)
- except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Failed to create DirectConnect link aggregation group {0}".format(name),
- last_traceback=traceback.format_exc(),
- exception=e)
-
- return lag['lagId']
-
-
-def delete_lag(client, lag_id):
- try:
- client.delete_lag(lagId=lag_id)
- except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id),
- last_traceback=traceback.format_exc(),
- exception=e)
-
-
-@AWSRetry.backoff(tries=5, delay=2, backoff=2.0, catch_extra_error_codes=['DirectConnectClientException'])
-def _update_lag(client, lag_id, lag_name, min_links):
- params = {}
- if min_links:
- params.update(minimumLinks=min_links)
- if lag_name:
- params.update(lagName=lag_name)
-
- client.update_lag(lagId=lag_id, **params)
-
-
-def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout):
- start = time.time()
-
- if min_links and min_links > num_connections:
- raise DirectConnectError(
- msg="The number of connections {0} must be greater than the minimum number of links "
- "{1} to update the LAG {2}".format(num_connections, min_links, lag_id),
- last_traceback=None,
- exception=None
- )
-
- while True:
- try:
- _update_lag(client, lag_id, lag_name, min_links)
- except botocore.exceptions.ClientError as e:
- if wait and time.time() - start <= wait_timeout:
- continue
- msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id)
- if "MinimumLinks cannot be set higher than the number of connections" in e.response['Error']['Message']:
- msg += "Unable to set the min number of links to {0} while the LAG connections are being requested".format(min_links)
- raise DirectConnectError(msg=msg,
- last_traceback=traceback.format_exc(),
- exception=e)
- else:
- break
-
-
-def lag_changed(current_status, name, min_links):
- """ Determines if a modifiable link aggregation group attribute has been modified. """
- return (name and name != current_status['lagName']) or (min_links and min_links != current_status['minimumLinks'])
-
-
-def ensure_present(client, num_connections, lag_id, lag_name, location, bandwidth, connection_id, min_links, wait, wait_timeout):
- exists = lag_exists(client, lag_id, lag_name)
- if not exists and lag_id:
- raise DirectConnectError(msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id),
- last_traceback=None,
- exception="")
-
- # the connection is found; get the latest state and see if it needs to be updated
- if exists:
- lag_id = exists
- latest_state = lag_status(client, lag_id)
- if lag_changed(latest_state, lag_name, min_links):
- update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout)
- return True, lag_id
- return False, lag_id
-
- # no connection found; create a new one
- else:
- lag_id = create_lag(client, num_connections, location, bandwidth, lag_name, connection_id)
- update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_timeout)
- return True, lag_id
-
-
-def describe_virtual_interfaces(client, lag_id):
- try:
- response = client.describe_virtual_interfaces(connectionId=lag_id)
- except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id),
- last_traceback=traceback.format_exc(),
- exception=e)
- return response.get('virtualInterfaces', [])
-
-
-def get_connections_and_virtual_interfaces(client, lag_id):
- virtual_interfaces = describe_virtual_interfaces(client, lag_id)
- connections = lag_status(client, lag_id=lag_id).get('connections', [])
- return virtual_interfaces, connections
-
-
-def disassociate_vis(client, lag_id, virtual_interfaces):
- for vi in virtual_interfaces:
- delete_virtual_interface(client, vi['virtualInterfaceId'])
- try:
- response = client.delete_virtual_interface(virtualInterfaceId=vi['virtualInterfaceId'])
- except botocore.exceptions.ClientError as e:
- raise DirectConnectError(msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id),
- last_traceback=traceback.format_exc(),
- exception=e)
-
-
-def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassociation, wait, wait_timeout):
- lag_id = lag_exists(client, lag_id, lag_name)
- if not lag_id:
- return False
-
- latest_status = lag_status(client, lag_id)
-
- # determine the associated connections and virtual interfaces to disassociate
- virtual_interfaces, connections = get_connections_and_virtual_interfaces(client, lag_id)
-
- # If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete
- if any((latest_status['minimumLinks'], virtual_interfaces, connections)) and not force_delete:
- raise DirectConnectError(msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. "
- "To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). "
- "Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True "
- "and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id),
- last_traceback=None,
- exception=None)
-
- # update min_links to be 0 so we can remove the LAG
- update_lag(client, lag_id, None, 0, len(connections), wait, wait_timeout)
-
- # if virtual_interfaces and not delete_vi_with_disassociation: Raise failure; can't delete while vi attached
- for connection in connections:
- disassociate_connection_and_lag(client, connection['connectionId'], lag_id)
- if delete_with_disassociation:
- delete_connection(client, connection['connectionId'])
-
- for vi in virtual_interfaces:
- delete_virtual_interface(client, vi['virtualInterfaceId'])
-
- start_time = time.time()
- while True:
- try:
- delete_lag(client, lag_id)
- except DirectConnectError as e:
- if ('until its Virtual Interfaces are deleted' in e.exception) and (time.time() - start_time < wait_timeout) and wait:
- continue
- else:
- return True
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(required=True, choices=['present', 'absent']),
- name=dict(),
- link_aggregation_group_id=dict(),
- num_connections=dict(type='int'),
- min_links=dict(type='int'),
- location=dict(),
- bandwidth=dict(),
- connection_id=dict(),
- delete_with_disassociation=dict(type='bool', default=False),
- force_delete=dict(type='bool', default=False),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=120),
- ))
-
- module = AnsibleModule(argument_spec=argument_spec,
- required_one_of=[('link_aggregation_group_id', 'name')],
- required_if=[('state', 'present', ('location', 'bandwidth'))])
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- if not region:
- module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")
-
- connection = boto3_conn(module, conn_type='client',
- resource='directconnect', region=region,
- endpoint=ec2_url, **aws_connect_kwargs)
-
- state = module.params.get('state')
- response = {}
- try:
- if state == 'present':
- changed, lag_id = ensure_present(connection,
- num_connections=module.params.get("num_connections"),
- lag_id=module.params.get("link_aggregation_group_id"),
- lag_name=module.params.get("name"),
- location=module.params.get("location"),
- bandwidth=module.params.get("bandwidth"),
- connection_id=module.params.get("connection_id"),
- min_links=module.params.get("min_links"),
- wait=module.params.get("wait"),
- wait_timeout=module.params.get("wait_timeout"))
- response = lag_status(connection, lag_id)
- elif state == "absent":
- changed = ensure_absent(connection,
- lag_id=module.params.get("link_aggregation_group_id"),
- lag_name=module.params.get("name"),
- force_delete=module.params.get("force_delete"),
- delete_with_disassociation=module.params.get("delete_with_disassociation"),
- wait=module.params.get('wait'),
- wait_timeout=module.params.get('wait_timeout'))
- except DirectConnectError as e:
- if e.last_traceback:
- module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception))
- else:
- module.fail_json(msg=e.msg)
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_direct_connect_virtual_interface.py b/lib/ansible/modules/cloud/amazon/aws_direct_connect_virtual_interface.py
deleted file mode 100644
index b73685d763..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_direct_connect_virtual_interface.py
+++ /dev/null
@@ -1,500 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: aws_direct_connect_virtual_interface
-short_description: Manage Direct Connect virtual interfaces
-description:
- - Create, delete, or modify a Direct Connect public or private virtual interface.
-version_added: "2.5"
-author: "Sloane Hertel (@s-hertel)"
-requirements:
- - boto3
- - botocore
-options:
- state:
- description:
- - The desired state of the Direct Connect virtual interface.
- choices: [present, absent]
- type: str
- required: true
- id_to_associate:
- description:
- - The ID of the link aggregation group or connection to associate with the virtual interface.
- aliases: [link_aggregation_group_id, connection_id]
- type: str
- required: true
- public:
- description:
- - The type of virtual interface.
- type: bool
- name:
- description:
- - The name of the virtual interface.
- type: str
- vlan:
- description:
- - The VLAN ID.
- default: 100
- type: int
- bgp_asn:
- description:
- - The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
- default: 65000
- type: int
- authentication_key:
- description:
- - The authentication key for BGP configuration.
- type: str
- amazon_address:
- description:
- - The amazon address CIDR with which to create the virtual interface.
- type: str
- customer_address:
- description:
- - The customer address CIDR with which to create the virtual interface.
- type: str
- address_type:
- description:
- - The type of IP address for the BGP peer.
- type: str
- cidr:
- description:
- - A list of route filter prefix CIDRs with which to create the public virtual interface.
- type: list
- elements: str
- virtual_gateway_id:
- description:
- - The virtual gateway ID required for creating a private virtual interface.
- type: str
- virtual_interface_id:
- description:
- - The virtual interface ID.
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-RETURN = '''
-address_family:
- description: The address family for the BGP peer.
- returned: always
- type: str
- sample: ipv4
-amazon_address:
- description: IP address assigned to the Amazon interface.
- returned: always
- type: str
- sample: 169.254.255.1/30
-asn:
- description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
- returned: always
- type: int
- sample: 65000
-auth_key:
- description: The authentication key for BGP configuration.
- returned: always
- type: str
- sample: 0xZ59Y1JZ2oDOSh6YriIlyRE
-bgp_peers:
- description: A list of the BGP peers configured on this virtual interface.
- returned: always
- type: complex
- contains:
- address_family:
- description: The address family for the BGP peer.
- returned: always
- type: str
- sample: ipv4
- amazon_address:
- description: IP address assigned to the Amazon interface.
- returned: always
- type: str
- sample: 169.254.255.1/30
- asn:
- description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
- returned: always
- type: int
- sample: 65000
- auth_key:
- description: The authentication key for BGP configuration.
- returned: always
- type: str
- sample: 0xZ59Y1JZ2oDOSh6YriIlyRE
- bgp_peer_state:
- description: The state of the BGP peer (verifying, pending, available)
- returned: always
- type: str
- sample: available
- bgp_status:
- description: The up/down state of the BGP peer.
- returned: always
- type: str
- sample: up
- customer_address:
- description: IP address assigned to the customer interface.
- returned: always
- type: str
- sample: 169.254.255.2/30
-changed:
- description: Indicated if the virtual interface has been created/modified/deleted
- returned: always
- type: bool
- sample: false
-connection_id:
- description:
- - The ID of the connection. This field is also used as the ID type for operations that
- use multiple connection types (LAG, interconnect, and/or connection).
- returned: always
- type: str
- sample: dxcon-fgb175av
-customer_address:
- description: IP address assigned to the customer interface.
- returned: always
- type: str
- sample: 169.254.255.2/30
-customer_router_config:
- description: Information for generating the customer router configuration.
- returned: always
- type: str
-location:
- description: Where the connection is located.
- returned: always
- type: str
- sample: EqDC2
-owner_account:
- description: The AWS account that will own the new virtual interface.
- returned: always
- type: str
- sample: '123456789012'
-route_filter_prefixes:
- description: A list of routes to be advertised to the AWS network in this region (public virtual interface).
- returned: always
- type: complex
- contains:
- cidr:
- description: A routes to be advertised to the AWS network in this region.
- returned: always
- type: str
- sample: 54.227.92.216/30
-virtual_gateway_id:
- description: The ID of the virtual private gateway to a VPC. This only applies to private virtual interfaces.
- returned: when I(public=False)
- type: str
- sample: vgw-f3ce259a
-virtual_interface_id:
- description: The ID of the virtual interface.
- returned: always
- type: str
- sample: dxvif-fh0w7cex
-virtual_interface_name:
- description: The name of the virtual interface assigned by the customer.
- returned: always
- type: str
- sample: test_virtual_interface
-virtual_interface_state:
- description: State of the virtual interface (confirming, verifying, pending, available, down, rejected).
- returned: always
- type: str
- sample: available
-virtual_interface_type:
- description: The type of virtual interface (private, public).
- returned: always
- type: str
- sample: private
-vlan:
- description: The VLAN ID.
- returned: always
- type: int
- sample: 100
-'''
-
-EXAMPLES = '''
----
-- name: create an association between a LAG and connection
- aws_direct_connect_virtual_interface:
- state: present
- name: "{{ name }}"
- link_aggregation_group_id: LAG-XXXXXXXX
- connection_id: dxcon-XXXXXXXX
-
-- name: remove an association between a connection and virtual interface
- aws_direct_connect_virtual_interface:
- state: absent
- connection_id: dxcon-XXXXXXXX
- virtual_interface_id: dxv-XXXXXXXX
-
-'''
-
-import traceback
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.direct_connect import DirectConnectError, delete_virtual_interface
-from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- # handled by AnsibleAWSModule
- pass
-
-
-def try_except_ClientError(failure_msg):
- '''
- Wrapper for boto3 calls that uses AWSRetry and handles exceptions
- '''
- def wrapper(f):
- def run_func(*args, **kwargs):
- try:
- result = AWSRetry.backoff(tries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs)
- except (ClientError, BotoCoreError) as e:
- raise DirectConnectError(failure_msg, traceback.format_exc(), e)
- return result
- return run_func
- return wrapper
-
-
-def find_unique_vi(client, connection_id, virtual_interface_id, name):
- '''
- Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found.
- If multiple matches are found False is returned. If no matches are found None is returned.
- '''
-
- # Get the virtual interfaces, filtering by the ID if provided.
- vi_params = {}
- if virtual_interface_id:
- vi_params = {'virtualInterfaceId': virtual_interface_id}
-
- virtual_interfaces = try_except_ClientError(
- failure_msg="Failed to describe virtual interface")(
- client.describe_virtual_interfaces)(**vi_params).get('virtualInterfaces')
-
- # Remove deleting/deleted matches from the results.
- virtual_interfaces = [vi for vi in virtual_interfaces if vi['virtualInterfaceState'] not in ('deleting', 'deleted')]
-
- matching_virtual_interfaces = filter_virtual_interfaces(virtual_interfaces, name, connection_id)
- return exact_match(matching_virtual_interfaces)
-
-
-def exact_match(virtual_interfaces):
- '''
- Returns the virtual interface ID if one was found,
- None if the virtual interface ID needs to be created,
- False if an exact match was not found
- '''
-
- if not virtual_interfaces:
- return None
- if len(virtual_interfaces) == 1:
- return virtual_interfaces[0]['virtualInterfaceId']
- else:
- return False
-
-
-def filter_virtual_interfaces(virtual_interfaces, name, connection_id):
- '''
- Filters the available virtual interfaces to try to find a unique match
- '''
- # Filter by name if provided.
- if name:
- matching_by_name = find_virtual_interface_by_name(virtual_interfaces, name)
- if len(matching_by_name) == 1:
- return matching_by_name
- else:
- matching_by_name = virtual_interfaces
-
- # If there isn't a unique match filter by connection ID as last resort (because connection_id may be a connection yet to be associated)
- if connection_id and len(matching_by_name) > 1:
- matching_by_connection_id = find_virtual_interface_by_connection_id(matching_by_name, connection_id)
- if len(matching_by_connection_id) == 1:
- return matching_by_connection_id
- else:
- matching_by_connection_id = matching_by_name
-
- return matching_by_connection_id
-
-
-def find_virtual_interface_by_connection_id(virtual_interfaces, connection_id):
- '''
- Return virtual interfaces that have the connection_id associated
- '''
- return [vi for vi in virtual_interfaces if vi['connectionId'] == connection_id]
-
-
-def find_virtual_interface_by_name(virtual_interfaces, name):
- '''
- Return virtual interfaces that match the provided name
- '''
- return [vi for vi in virtual_interfaces if vi['virtualInterfaceName'] == name]
-
-
-def vi_state(client, virtual_interface_id):
- '''
- Returns the state of the virtual interface.
- '''
- err_msg = "Failed to describe virtual interface: {0}".format(virtual_interface_id)
- vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)(virtualInterfaceId=virtual_interface_id)
- return vi['virtualInterfaces'][0]
-
-
-def assemble_params_for_creating_vi(params):
- '''
- Returns kwargs to use in the call to create the virtual interface
-
- Params for public virtual interfaces:
- virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr
- Params for private virtual interfaces:
- virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId
- '''
-
- public = params['public']
- name = params['name']
- vlan = params['vlan']
- bgp_asn = params['bgp_asn']
- auth_key = params['authentication_key']
- amazon_addr = params['amazon_address']
- customer_addr = params['customer_address']
- family_addr = params['address_type']
- cidr = params['cidr']
- virtual_gateway_id = params['virtual_gateway_id']
-
- parameters = dict(virtualInterfaceName=name, vlan=vlan, asn=bgp_asn)
- opt_params = dict(authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr)
-
- for name, value in opt_params.items():
- if value:
- parameters[name] = value
-
- # virtual interface type specific parameters
- if public and cidr:
- parameters['routeFilterPrefixes'] = [{'cidr': c} for c in cidr]
- if not public:
- parameters['virtualGatewayId'] = virtual_gateway_id
-
- return parameters
-
-
-def create_vi(client, public, associated_id, creation_params):
- '''
- :param public: a boolean
- :param associated_id: a link aggregation group ID or connection ID to associate
- with the virtual interface.
- :param creation_params: a dict of parameters to use in the boto call
- :return The ID of the created virtual interface
- '''
- err_msg = "Failed to create virtual interface"
- if public:
- vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)(connectionId=associated_id,
- newPublicVirtualInterface=creation_params)
- else:
- vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)(connectionId=associated_id,
- newPrivateVirtualInterface=creation_params)
- return vi['virtualInterfaceId']
-
-
-def modify_vi(client, virtual_interface_id, connection_id):
- '''
- Associate a new connection ID
- '''
- err_msg = "Unable to associate {0} with virtual interface {1}".format(connection_id, virtual_interface_id)
- try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)(virtualInterfaceId=virtual_interface_id,
- connectionId=connection_id)
-
-
-def needs_modification(client, virtual_interface_id, connection_id):
- '''
- Determine if the associated connection ID needs to be updated
- '''
- return vi_state(client, virtual_interface_id).get('connectionId') != connection_id
-
-
-def ensure_state(connection, module):
- changed = False
-
- state = module.params['state']
- connection_id = module.params['id_to_associate']
- public = module.params['public']
- name = module.params['name']
-
- virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get('virtual_interface_id'), name)
-
- if virtual_interface_id is False:
- module.fail_json(msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, "
- "and connection_id options if applicable to find a unique match.")
-
- if state == 'present':
-
- if not virtual_interface_id and module.params['virtual_interface_id']:
- module.fail_json(msg="The virtual interface {0} does not exist.".format(module.params['virtual_interface_id']))
-
- elif not virtual_interface_id:
- assembled_params = assemble_params_for_creating_vi(module.params)
- virtual_interface_id = create_vi(connection, public, connection_id, assembled_params)
- changed = True
-
- if needs_modification(connection, virtual_interface_id, connection_id):
- modify_vi(connection, virtual_interface_id, connection_id)
- changed = True
-
- latest_state = vi_state(connection, virtual_interface_id)
-
- else:
- if virtual_interface_id:
- delete_virtual_interface(connection, virtual_interface_id)
- changed = True
-
- latest_state = {}
-
- return changed, latest_state
-
-
-def main():
- argument_spec = dict(
- state=dict(required=True, choices=['present', 'absent']),
- id_to_associate=dict(required=True, aliases=['link_aggregation_group_id', 'connection_id']),
- public=dict(type='bool'),
- name=dict(),
- vlan=dict(type='int', default=100),
- bgp_asn=dict(type='int', default=65000),
- authentication_key=dict(),
- amazon_address=dict(),
- customer_address=dict(),
- address_type=dict(),
- cidr=dict(type='list'),
- virtual_gateway_id=dict(),
- virtual_interface_id=dict()
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_one_of=[['virtual_interface_id', 'name']],
- required_if=[['state', 'present', ['public']],
- ['public', False, ['virtual_gateway_id']],
- ['public', True, ['amazon_address']],
- ['public', True, ['customer_address']],
- ['public', True, ['cidr']]])
-
- connection = module.client('directconnect')
-
- try:
- changed, latest_state = ensure_state(connection, module)
- except DirectConnectError as e:
- if e.exception:
- module.fail_json_aws(exception=e.exception, msg=e.msg)
- else:
- module.fail_json(msg=e.msg)
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(latest_state))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_eks_cluster.py b/lib/ansible/modules/cloud/amazon/aws_eks_cluster.py
deleted file mode 100644
index ae0f9d1a0f..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_eks_cluster.py
+++ /dev/null
@@ -1,307 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = '''
----
-module: aws_eks_cluster
-short_description: Manage Elastic Kubernetes Service Clusters
-description:
- - Manage Elastic Kubernetes Service Clusters
-version_added: "2.7"
-
-author: Will Thames (@willthames)
-
-options:
- name:
- description: Name of EKS cluster
- required: True
- type: str
- version:
- description: Kubernetes version - defaults to latest
- type: str
- role_arn:
- description: ARN of IAM role used by the EKS cluster
- type: str
- subnets:
- description: list of subnet IDs for the Kubernetes cluster
- type: list
- elements: str
- security_groups:
- description: list of security group names or IDs
- type: list
- elements: str
- state:
- description: desired state of the EKS cluster
- choices:
- - absent
- - present
- default: present
- type: str
- wait:
- description: >-
- Specifies whether the module waits until the cluster is active or deleted
- before moving on. It takes "usually less than 10 minutes" per AWS documentation.
- type: bool
- default: false
- wait_timeout:
- description: >-
- The duration in seconds to wait for the cluster to become active. Defaults
- to 1200 seconds (20 minutes).
- default: 1200
- type: int
-
-requirements: [ 'botocore', 'boto3' ]
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Create an EKS cluster
- aws_eks_cluster:
- name: my_cluster
- version: 1.14
- role_arn: my_eks_role
- subnets:
- - subnet-aaaa1111
- security_groups:
- - my_eks_sg
- - sg-abcd1234
- register: caller_facts
-
-- name: Remove an EKS cluster
- aws_eks_cluster:
- name: my_cluster
- wait: yes
- state: absent
-'''
-
-RETURN = '''
-arn:
- description: ARN of the EKS cluster
- returned: when state is present
- type: str
- sample: arn:aws:eks:us-west-2:111111111111:cluster/my-eks-cluster
-certificate_authority:
- description: Dictionary containing Certificate Authority Data for cluster
- returned: after creation
- type: complex
- contains:
- data:
- description: Base-64 encoded Certificate Authority Data for cluster
- returned: when the cluster has been created and is active
- type: str
-endpoint:
- description: Kubernetes API server endpoint
- returned: when the cluster has been created and is active
- type: str
- sample: https://API_SERVER_ENDPOINT.yl4.us-west-2.eks.amazonaws.com
-created_at:
- description: Cluster creation date and time
- returned: when state is present
- type: str
- sample: '2018-06-06T11:56:56.242000+00:00'
-name:
- description: EKS cluster name
- returned: when state is present
- type: str
- sample: my-eks-cluster
-resources_vpc_config:
- description: VPC configuration of the cluster
- returned: when state is present
- type: complex
- contains:
- security_group_ids:
- description: List of security group IDs
- returned: always
- type: list
- sample:
- - sg-abcd1234
- - sg-aaaa1111
- subnet_ids:
- description: List of subnet IDs
- returned: always
- type: list
- sample:
- - subnet-abcdef12
- - subnet-345678ab
- - subnet-cdef1234
- vpc_id:
- description: VPC id
- returned: always
- type: str
- sample: vpc-a1b2c3d4
-role_arn:
- description: ARN of the IAM role used by the cluster
- returned: when state is present
- type: str
- sample: arn:aws:iam::111111111111:role/aws_eks_cluster_role
-status:
- description: status of the EKS cluster
- returned: when state is present
- type: str
- sample:
- - CREATING
- - ACTIVE
-version:
- description: Kubernetes version of the cluster
- returned: when state is present
- type: str
- sample: '1.10'
-'''
-
-
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names
-from ansible.module_utils.aws.waiters import get_waiter
-
-try:
- import botocore.exceptions
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def ensure_present(client, module):
- name = module.params.get('name')
- subnets = module.params['subnets']
- groups = module.params['security_groups']
- wait = module.params.get('wait')
- cluster = get_cluster(client, module)
- try:
- ec2 = module.client('ec2')
- vpc_id = ec2.describe_subnets(SubnetIds=[subnets[0]])['Subnets'][0]['VpcId']
- groups = get_ec2_security_group_ids_from_names(groups, ec2, vpc_id)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't lookup security groups")
-
- if cluster:
- if set(cluster['resourcesVpcConfig']['subnetIds']) != set(subnets):
- module.fail_json(msg="Cannot modify subnets of existing cluster")
- if set(cluster['resourcesVpcConfig']['securityGroupIds']) != set(groups):
- module.fail_json(msg="Cannot modify security groups of existing cluster")
- if module.params.get('version') and module.params.get('version') != cluster['version']:
- module.fail_json(msg="Cannot modify version of existing cluster")
-
- if wait:
- wait_until(client, module, 'cluster_active')
- # Ensure that fields that are only available for active clusters are
- # included in the returned value
- cluster = get_cluster(client, module)
-
- module.exit_json(changed=False, **camel_dict_to_snake_dict(cluster))
-
- if module.check_mode:
- module.exit_json(changed=True)
- try:
- params = dict(name=name,
- roleArn=module.params['role_arn'],
- resourcesVpcConfig=dict(
- subnetIds=subnets,
- securityGroupIds=groups),
- clientRequestToken='ansible-create-%s' % name)
- if module.params['version']:
- params['version'] = module.params['version']
- cluster = client.create_cluster(**params)['cluster']
- except botocore.exceptions.EndpointConnectionError as e:
- module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't create cluster %s" % name)
-
- if wait:
- wait_until(client, module, 'cluster_active')
- # Ensure that fields that are only available for active clusters are
- # included in the returned value
- cluster = get_cluster(client, module)
-
- module.exit_json(changed=True, **camel_dict_to_snake_dict(cluster))
-
-
-def ensure_absent(client, module):
- name = module.params.get('name')
- existing = get_cluster(client, module)
- wait = module.params.get('wait')
- if not existing:
- module.exit_json(changed=False)
- if not module.check_mode:
- try:
- client.delete_cluster(name=module.params['name'])
- except botocore.exceptions.EndpointConnectionError as e:
- module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't delete cluster %s" % name)
-
- if wait:
- wait_until(client, module, 'cluster_deleted')
-
- module.exit_json(changed=True)
-
-
-def get_cluster(client, module):
- name = module.params.get('name')
- try:
- return client.describe_cluster(name=name)['cluster']
- except is_boto3_error_code('ResourceNotFoundException'):
- return None
- except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except
- module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Couldn't get cluster %s" % name)
-
-
-def wait_until(client, module, waiter_name='cluster_active'):
- name = module.params.get('name')
- wait_timeout = module.params.get('wait_timeout')
-
- waiter = get_waiter(client, waiter_name)
- attempts = 1 + int(wait_timeout / waiter.config.delay)
- waiter.wait(name=name, WaiterConfig={'MaxAttempts': attempts})
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- version=dict(),
- role_arn=dict(),
- subnets=dict(type='list'),
- security_groups=dict(type='list'),
- state=dict(choices=['absent', 'present'], default='present'),
- wait=dict(default=False, type='bool'),
- wait_timeout=dict(default=1200, type='int')
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- required_if=[['state', 'present', ['role_arn', 'subnets', 'security_groups']]],
- supports_check_mode=True,
- )
-
- if not module.botocore_at_least("1.10.32"):
- module.fail_json(msg='aws_eks_cluster module requires botocore >= 1.10.32')
-
- if (not module.botocore_at_least("1.12.38") and
- module.params.get('state') == 'absent' and
- module.params.get('wait')):
- module.fail_json(msg='aws_eks_cluster: wait=yes when state=absent requires botocore >= 1.12.38')
-
- client = module.client('eks')
-
- if module.params.get('state') == 'present':
- ensure_present(client, module)
- else:
- ensure_absent(client, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_elasticbeanstalk_app.py b/lib/ansible/modules/cloud/amazon/aws_elasticbeanstalk_app.py
deleted file mode 100644
index ec22f2c873..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_elasticbeanstalk_app.py
+++ /dev/null
@@ -1,228 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
- }
-
-DOCUMENTATION = '''
----
-module: aws_elasticbeanstalk_app
-
-short_description: Create, update, and delete an elastic beanstalk application
-
-version_added: "2.5"
-
-description:
- - Creates, updates, deletes beanstalk applications if app_name is provided.
-
-options:
- app_name:
- description:
- - Name of the beanstalk application you wish to manage.
- aliases: [ 'name' ]
- type: str
- description:
- description:
- - The description of the application.
- type: str
- state:
- description:
- - Whether to ensure the application is present or absent.
- default: present
- choices: ['absent','present']
- type: str
- terminate_by_force:
- description:
- - When I(terminate_by_force=true), running environments will be terminated before deleting the application.
- default: false
- type: bool
-author:
- - Harpreet Singh (@hsingh)
- - Stephen Granger (@viper233)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Create or update an application
-- aws_elasticbeanstalk_app:
- app_name: Sample_App
- description: "Hello World App"
- state: present
-
-# Delete application
-- aws_elasticbeanstalk_app:
- app_name: Sample_App
- state: absent
-
-'''
-
-RETURN = '''
-app:
- description: Beanstalk application.
- returned: always
- type: dict
- sample: {
- "ApplicationName": "app-name",
- "ConfigurationTemplates": [],
- "DateCreated": "2016-12-28T14:50:03.185000+00:00",
- "DateUpdated": "2016-12-28T14:50:03.185000+00:00",
- "Description": "description",
- "Versions": [
- "1.0.0",
- "1.0.1"
- ]
- }
-output:
- description: Message indicating what change will occur.
- returned: in check mode
- type: str
- sample: App is up-to-date
-'''
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-
-
-def describe_app(ebs, app_name, module):
- apps = list_apps(ebs, app_name, module)
-
- return None if len(apps) != 1 else apps[0]
-
-
-def list_apps(ebs, app_name, module):
- try:
- if app_name is not None:
- apps = ebs.describe_applications(ApplicationNames=[app_name])
- else:
- apps = ebs.describe_applications()
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not describe application")
-
- return apps.get("Applications", [])
-
-
-def check_app(ebs, app, module):
- app_name = module.params['app_name']
- description = module.params['description']
- state = module.params['state']
- terminate_by_force = module.params['terminate_by_force']
-
- result = {}
-
- if state == 'present' and app is None:
- result = dict(changed=True, output="App would be created")
- elif state == 'present' and app.get("Description", None) != description:
- result = dict(changed=True, output="App would be updated", app=app)
- elif state == 'present' and app.get("Description", None) == description:
- result = dict(changed=False, output="App is up-to-date", app=app)
- elif state == 'absent' and app is None:
- result = dict(changed=False, output="App does not exist", app={})
- elif state == 'absent' and app is not None:
- result = dict(changed=True, output="App will be deleted", app=app)
- elif state == 'absent' and app is not None and terminate_by_force is True:
- result = dict(changed=True, output="Running environments terminated before the App will be deleted", app=app)
-
- module.exit_json(**result)
-
-
-def filter_empty(**kwargs):
- retval = {}
- for k, v in kwargs.items():
- if v:
- retval[k] = v
- return retval
-
-
-def main():
- argument_spec = dict(
- app_name=dict(aliases=['name'], type='str', required=False),
- description=dict(),
- state=dict(choices=['present', 'absent'], default='present'),
- terminate_by_force=dict(type='bool', default=False, required=False)
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
-
- app_name = module.params['app_name']
- description = module.params['description']
- state = module.params['state']
- terminate_by_force = module.params['terminate_by_force']
-
- if app_name is None:
- module.fail_json(msg='Module parameter "app_name" is required')
-
- result = {}
-
- ebs = module.client('elasticbeanstalk')
-
- app = describe_app(ebs, app_name, module)
-
- if module.check_mode:
- check_app(ebs, app, module)
- module.fail_json(msg='ASSERTION FAILURE: check_app() should not return control.')
-
- if state == 'present':
- if app is None:
- try:
- create_app = ebs.create_application(**filter_empty(ApplicationName=app_name,
- Description=description))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not create application")
-
- app = describe_app(ebs, app_name, module)
-
- result = dict(changed=True, app=app)
- else:
- if app.get("Description", None) != description:
- try:
- if not description:
- ebs.update_application(ApplicationName=app_name)
- else:
- ebs.update_application(ApplicationName=app_name, Description=description)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not update application")
-
- app = describe_app(ebs, app_name, module)
-
- result = dict(changed=True, app=app)
- else:
- result = dict(changed=False, app=app)
-
- else:
- if app is None:
- result = dict(changed=False, output='Application not found', app={})
- else:
- try:
- if terminate_by_force:
- # Running environments will be terminated before deleting the application
- ebs.delete_application(ApplicationName=app_name, TerminateEnvByForce=terminate_by_force)
- else:
- ebs.delete_application(ApplicationName=app_name)
- changed = True
- except BotoCoreError as e:
- module.fail_json_aws(e, msg="Cannot terminate app")
- except ClientError as e:
- if 'It is currently pending deletion.' not in e.response['Error']['Message']:
- module.fail_json_aws(e, msg="Cannot terminate app")
- else:
- changed = False
-
- result = dict(changed=changed, app=app)
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_glue_connection.py b/lib/ansible/modules/cloud/amazon/aws_glue_connection.py
deleted file mode 100644
index 4909abb240..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_glue_connection.py
+++ /dev/null
@@ -1,337 +0,0 @@
-#!/usr/bin/python
-# Copyright: (c) 2018, Rob White (@wimnat)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: aws_glue_connection
-short_description: Manage an AWS Glue connection
-description:
- - Manage an AWS Glue connection. See U(https://aws.amazon.com/glue/) for details.
-version_added: "2.6"
-requirements: [ boto3 ]
-author: "Rob White (@wimnat)"
-options:
- catalog_id:
- description:
- - The ID of the Data Catalog in which to create the connection. If none is supplied,
- the AWS account ID is used by default.
- type: str
- connection_properties:
- description:
- - A dict of key-value pairs used as parameters for this connection.
- - Required when I(state=present).
- type: dict
- connection_type:
- description:
- - The type of the connection. Currently, only JDBC is supported; SFTP is not supported.
- default: JDBC
- choices: [ 'JDBC', 'SFTP' ]
- type: str
- description:
- description:
- - The description of the connection.
- type: str
- match_criteria:
- description:
- - A list of UTF-8 strings that specify the criteria that you can use in selecting this connection.
- type: list
- elements: str
- name:
- description:
- - The name of the connection.
- required: true
- type: str
- security_groups:
- description:
- - A list of security groups to be used by the connection. Use either security group name or ID.
- type: list
- elements: str
- state:
- description:
- - Create or delete the AWS Glue connection.
- required: true
- choices: [ 'present', 'absent' ]
- type: str
- subnet_id:
- description:
- - The subnet ID used by the connection.
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Create an AWS Glue connection
-- aws_glue_connection:
- name: my-glue-connection
- connection_properties:
- JDBC_CONNECTION_URL: jdbc:mysql://mydb:3306/databasename
- USERNAME: my-username
- PASSWORD: my-password
- state: present
-
-# Delete an AWS Glue connection
-- aws_glue_connection:
- name: my-glue-connection
- state: absent
-
-'''
-
-RETURN = '''
-connection_properties:
- description: A dict of key-value pairs used as parameters for this connection.
- returned: when state is present
- type: dict
- sample: {'JDBC_CONNECTION_URL':'jdbc:mysql://mydb:3306/databasename','USERNAME':'x','PASSWORD':'y'}
-connection_type:
- description: The type of the connection.
- returned: when state is present
- type: str
- sample: JDBC
-creation_time:
- description: The time this connection definition was created.
- returned: when state is present
- type: str
- sample: "2018-04-21T05:19:58.326000+00:00"
-description:
- description: Description of the job being defined.
- returned: when state is present
- type: str
- sample: My first Glue job
-last_updated_time:
- description: The last time this connection definition was updated.
- returned: when state is present
- type: str
- sample: "2018-04-21T05:19:58.326000+00:00"
-match_criteria:
- description: A list of criteria that can be used in selecting this connection.
- returned: when state is present
- type: list
- sample: []
-name:
- description: The name of the connection definition.
- returned: when state is present
- type: str
- sample: my-glue-connection
-physical_connection_requirements:
- description: A dict of physical connection requirements, such as VPC and SecurityGroup,
- needed for making this connection successfully.
- returned: when state is present
- type: dict
- sample: {'subnet-id':'subnet-aabbccddee'}
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names
-
-# Non-ansible imports
-import copy
-import time
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass
-
-
-def _get_glue_connection(connection, module):
- """
- Get an AWS Glue connection based on name. If not found, return None.
-
- :param connection: AWS boto3 glue connection
- :param module: Ansible module
- :return: boto3 Glue connection dict or None if not found
- """
-
- connection_name = module.params.get("name")
- connection_catalog_id = module.params.get("catalog_id")
-
- params = {'Name': connection_name}
- if connection_catalog_id is not None:
- params['CatalogId'] = connection_catalog_id
-
- try:
- return connection.get_connection(**params)['Connection']
- except (BotoCoreError, ClientError) as e:
- if e.response['Error']['Code'] == 'EntityNotFoundException':
- return None
- else:
- raise e
-
-
-def _compare_glue_connection_params(user_params, current_params):
- """
- Compare Glue connection params. If there is a difference, return True immediately else return False
-
- :param user_params: the Glue connection parameters passed by the user
- :param current_params: the Glue connection parameters currently configured
- :return: True if any parameter is mismatched else False
- """
-
- # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description
- # To counter this, add the key if it's missing with a blank value
-
- if 'Description' not in current_params:
- current_params['Description'] = ""
- if 'MatchCriteria' not in current_params:
- current_params['MatchCriteria'] = list()
- if 'PhysicalConnectionRequirements' not in current_params:
- current_params['PhysicalConnectionRequirements'] = dict()
- current_params['PhysicalConnectionRequirements']['SecurityGroupIdList'] = []
- current_params['PhysicalConnectionRequirements']['SubnetId'] = ""
-
- if 'ConnectionProperties' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionProperties'] \
- != current_params['ConnectionProperties']:
- return True
- if 'ConnectionType' in user_params['ConnectionInput'] and user_params['ConnectionInput']['ConnectionType'] \
- != current_params['ConnectionType']:
- return True
- if 'Description' in user_params['ConnectionInput'] and user_params['ConnectionInput']['Description'] != current_params['Description']:
- return True
- if 'MatchCriteria' in user_params['ConnectionInput'] and set(user_params['ConnectionInput']['MatchCriteria']) != set(current_params['MatchCriteria']):
- return True
- if 'PhysicalConnectionRequirements' in user_params['ConnectionInput']:
- if 'SecurityGroupIdList' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \
- set(user_params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList']) \
- != set(current_params['PhysicalConnectionRequirements']['SecurityGroupIdList']):
- return True
- if 'SubnetId' in user_params['ConnectionInput']['PhysicalConnectionRequirements'] and \
- user_params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] \
- != current_params['PhysicalConnectionRequirements']['SubnetId']:
- return True
-
- return False
-
-
-def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection):
- """
- Create or update an AWS Glue connection
-
- :param connection: AWS boto3 glue connection
- :param module: Ansible module
- :param glue_connection: a dict of AWS Glue connection parameters or None
- :return:
- """
-
- changed = False
- params = dict()
- params['ConnectionInput'] = dict()
- params['ConnectionInput']['Name'] = module.params.get("name")
- params['ConnectionInput']['ConnectionType'] = module.params.get("connection_type")
- params['ConnectionInput']['ConnectionProperties'] = module.params.get("connection_properties")
- if module.params.get("catalog_id") is not None:
- params['CatalogId'] = module.params.get("catalog_id")
- if module.params.get("description") is not None:
- params['ConnectionInput']['Description'] = module.params.get("description")
- if module.params.get("match_criteria") is not None:
- params['ConnectionInput']['MatchCriteria'] = module.params.get("match_criteria")
- if module.params.get("security_groups") is not None or module.params.get("subnet_id") is not None:
- params['ConnectionInput']['PhysicalConnectionRequirements'] = dict()
- if module.params.get("security_groups") is not None:
- # Get security group IDs from names
- security_group_ids = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection_ec2, boto3=True)
- params['ConnectionInput']['PhysicalConnectionRequirements']['SecurityGroupIdList'] = security_group_ids
- if module.params.get("subnet_id") is not None:
- params['ConnectionInput']['PhysicalConnectionRequirements']['SubnetId'] = module.params.get("subnet_id")
-
- # If glue_connection is not None then check if it needs to be modified, else create it
- if glue_connection:
- if _compare_glue_connection_params(params, glue_connection):
- try:
- # We need to slightly modify the params for an update
- update_params = copy.deepcopy(params)
- update_params['Name'] = update_params['ConnectionInput']['Name']
- connection.update_connection(**update_params)
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
- else:
- try:
- connection.create_connection(**params)
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
-
- # If changed, get the Glue connection again
- if changed:
- glue_connection = None
- for i in range(10):
- glue_connection = _get_glue_connection(connection, module)
- if glue_connection is not None:
- break
- time.sleep(10)
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_connection))
-
-
-def delete_glue_connection(connection, module, glue_connection):
- """
- Delete an AWS Glue connection
-
- :param connection: AWS boto3 glue connection
- :param module: Ansible module
- :param glue_connection: a dict of AWS Glue connection parameters or None
- :return:
- """
-
- changed = False
-
- params = {'ConnectionName': module.params.get("name")}
- if module.params.get("catalog_id") is not None:
- params['CatalogId'] = module.params.get("catalog_id")
-
- if glue_connection:
- try:
- connection.delete_connection(**params)
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
-
- module.exit_json(changed=changed)
-
-
-def main():
-
- argument_spec = (
- dict(
- catalog_id=dict(type='str'),
- connection_properties=dict(type='dict'),
- connection_type=dict(type='str', default='JDBC', choices=['JDBC', 'SFTP']),
- description=dict(type='str'),
- match_criteria=dict(type='list'),
- name=dict(required=True, type='str'),
- security_groups=dict(type='list'),
- state=dict(required=True, choices=['present', 'absent'], type='str'),
- subnet_id=dict(type='str')
- )
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[
- ('state', 'present', ['connection_properties'])
- ]
- )
-
- connection_glue = module.client('glue')
- connection_ec2 = module.client('ec2')
-
- glue_connection = _get_glue_connection(connection_glue, module)
-
- if module.params.get("state") == 'present':
- create_or_update_glue_connection(connection_glue, connection_ec2, module, glue_connection)
- else:
- delete_glue_connection(connection_glue, module, glue_connection)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_glue_job.py b/lib/ansible/modules/cloud/amazon/aws_glue_job.py
deleted file mode 100644
index 9fa61ac1a4..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_glue_job.py
+++ /dev/null
@@ -1,373 +0,0 @@
-#!/usr/bin/python
-# Copyright: (c) 2018, Rob White (@wimnat)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: aws_glue_job
-short_description: Manage an AWS Glue job
-description:
- - Manage an AWS Glue job. See U(https://aws.amazon.com/glue/) for details.
-version_added: "2.6"
-requirements: [ boto3 ]
-author: "Rob White (@wimnat)"
-options:
- allocated_capacity:
- description:
- - The number of AWS Glue data processing units (DPUs) to allocate to this Job. From 2 to 100 DPUs
- can be allocated; the default is 10. A DPU is a relative measure of processing power that consists
- of 4 vCPUs of compute capacity and 16 GB of memory.
- type: int
- command_name:
- description:
- - The name of the job command. This must be 'glueetl'.
- default: glueetl
- type: str
- command_script_location:
- description:
- - The S3 path to a script that executes a job.
- - Required when I(state=present).
- type: str
- connections:
- description:
- - A list of Glue connections used for this job.
- type: list
- elements: str
- default_arguments:
- description:
- - A dict of default arguments for this job. You can specify arguments here that your own job-execution
- script consumes, as well as arguments that AWS Glue itself consumes.
- type: dict
- description:
- description:
- - Description of the job being defined.
- type: str
- max_concurrent_runs:
- description:
- - The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when
- this threshold is reached. The maximum value you can specify is controlled by a service limit.
- type: int
- max_retries:
- description:
- - The maximum number of times to retry this job if it fails.
- type: int
- name:
- description:
- - The name you assign to this job definition. It must be unique in your account.
- required: true
- type: str
- role:
- description:
- - The name or ARN of the IAM role associated with this job.
- - Required when I(state=present).
- type: str
- state:
- description:
- - Create or delete the AWS Glue job.
- required: true
- choices: [ 'present', 'absent' ]
- type: str
- timeout:
- description:
- - The job timeout in minutes.
- type: int
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Create an AWS Glue job
-- aws_glue_job:
- command_script_location: s3bucket/script.py
- name: my-glue-job
- role: my-iam-role
- state: present
-
-# Delete an AWS Glue job
-- aws_glue_job:
- name: my-glue-job
- state: absent
-
-'''
-
-RETURN = '''
-allocated_capacity:
- description: The number of AWS Glue data processing units (DPUs) allocated to runs of this job. From 2 to
- 100 DPUs can be allocated; the default is 10. A DPU is a relative measure of processing power
- that consists of 4 vCPUs of compute capacity and 16 GB of memory.
- returned: when state is present
- type: int
- sample: 10
-command:
- description: The JobCommand that executes this job.
- returned: when state is present
- type: complex
- contains:
- name:
- description: The name of the job command.
- returned: when state is present
- type: str
- sample: glueetl
- script_location:
- description: Specifies the S3 path to a script that executes a job.
- returned: when state is present
- type: str
- sample: mybucket/myscript.py
-connections:
- description: The connections used for this job.
- returned: when state is present
- type: dict
- sample: "{ Connections: [ 'list', 'of', 'connections' ] }"
-created_on:
- description: The time and date that this job definition was created.
- returned: when state is present
- type: str
- sample: "2018-04-21T05:19:58.326000+00:00"
-default_arguments:
- description: The default arguments for this job, specified as name-value pairs.
- returned: when state is present
- type: dict
- sample: "{ 'mykey1': 'myvalue1' }"
-description:
- description: Description of the job being defined.
- returned: when state is present
- type: str
- sample: My first Glue job
-job_name:
- description: The name of the AWS Glue job.
- returned: always
- type: str
- sample: my-glue-job
-execution_property:
- description: An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job.
- returned: always
- type: complex
- contains:
- max_concurrent_runs:
- description: The maximum number of concurrent runs allowed for the job. The default is 1. An error is
- returned when this threshold is reached. The maximum value you can specify is controlled by
- a service limit.
- returned: when state is present
- type: int
- sample: 1
-last_modified_on:
- description: The last point in time when this job definition was modified.
- returned: when state is present
- type: str
- sample: "2018-04-21T05:19:58.326000+00:00"
-max_retries:
- description: The maximum number of times to retry this job after a JobRun fails.
- returned: when state is present
- type: int
- sample: 5
-name:
- description: The name assigned to this job definition.
- returned: when state is present
- type: str
- sample: my-glue-job
-role:
- description: The name or ARN of the IAM role associated with this job.
- returned: when state is present
- type: str
- sample: my-iam-role
-timeout:
- description: The job timeout in minutes.
- returned: when state is present
- type: int
- sample: 300
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-# Non-ansible imports
-import copy
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass
-
-
-def _get_glue_job(connection, module, glue_job_name):
- """
- Get an AWS Glue job based on name. If not found, return None.
-
- :param connection: AWS boto3 glue connection
- :param module: Ansible module
- :param glue_job_name: Name of Glue job to get
- :return: boto3 Glue job dict or None if not found
- """
-
- try:
- return connection.get_job(JobName=glue_job_name)['Job']
- except (BotoCoreError, ClientError) as e:
- if e.response['Error']['Code'] == 'EntityNotFoundException':
- return None
- else:
- module.fail_json_aws(e)
-
-
-def _compare_glue_job_params(user_params, current_params):
- """
- Compare Glue job params. If there is a difference, return True immediately else return False
-
- :param user_params: the Glue job parameters passed by the user
- :param current_params: the Glue job parameters currently configured
- :return: True if any parameter is mismatched else False
- """
-
- # Weirdly, boto3 doesn't return some keys if the value is empty e.g. Description
- # To counter this, add the key if it's missing with a blank value
-
- if 'Description' not in current_params:
- current_params['Description'] = ""
- if 'DefaultArguments' not in current_params:
- current_params['DefaultArguments'] = dict()
-
- if 'AllocatedCapacity' in user_params and user_params['AllocatedCapacity'] != current_params['AllocatedCapacity']:
- return True
- if 'Command' in user_params and user_params['Command']['ScriptLocation'] != current_params['Command']['ScriptLocation']:
- return True
- if 'Connections' in user_params and set(user_params['Connections']) != set(current_params['Connections']):
- return True
- if 'DefaultArguments' in user_params and set(user_params['DefaultArguments']) != set(current_params['DefaultArguments']):
- return True
- if 'Description' in user_params and user_params['Description'] != current_params['Description']:
- return True
- if 'ExecutionProperty' in user_params and user_params['ExecutionProperty']['MaxConcurrentRuns'] != current_params['ExecutionProperty']['MaxConcurrentRuns']:
- return True
- if 'MaxRetries' in user_params and user_params['MaxRetries'] != current_params['MaxRetries']:
- return True
- if 'Timeout' in user_params and user_params['Timeout'] != current_params['Timeout']:
- return True
-
- return False
-
-
-def create_or_update_glue_job(connection, module, glue_job):
- """
- Create or update an AWS Glue job
-
- :param connection: AWS boto3 glue connection
- :param module: Ansible module
- :param glue_job: a dict of AWS Glue job parameters or None
- :return:
- """
-
- changed = False
- params = dict()
- params['Name'] = module.params.get("name")
- params['Role'] = module.params.get("role")
- if module.params.get("allocated_capacity") is not None:
- params['AllocatedCapacity'] = module.params.get("allocated_capacity")
- if module.params.get("command_script_location") is not None:
- params['Command'] = {'Name': module.params.get("command_name"), 'ScriptLocation': module.params.get("command_script_location")}
- if module.params.get("connections") is not None:
- params['Connections'] = {'Connections': module.params.get("connections")}
- if module.params.get("default_arguments") is not None:
- params['DefaultArguments'] = module.params.get("default_arguments")
- if module.params.get("description") is not None:
- params['Description'] = module.params.get("description")
- if module.params.get("max_concurrent_runs") is not None:
- params['ExecutionProperty'] = {'MaxConcurrentRuns': module.params.get("max_concurrent_runs")}
- if module.params.get("max_retries") is not None:
- params['MaxRetries'] = module.params.get("max_retries")
- if module.params.get("timeout") is not None:
- params['Timeout'] = module.params.get("timeout")
-
- # If glue_job is not None then check if it needs to be modified, else create it
- if glue_job:
- if _compare_glue_job_params(params, glue_job):
- try:
- # Update job needs slightly modified params
- update_params = {'JobName': params['Name'], 'JobUpdate': copy.deepcopy(params)}
- del update_params['JobUpdate']['Name']
- connection.update_job(**update_params)
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
- else:
- try:
- connection.create_job(**params)
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
-
- # If changed, get the Glue job again
- if changed:
- glue_job = _get_glue_job(connection, module, params['Name'])
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(glue_job))
-
-
-def delete_glue_job(connection, module, glue_job):
- """
- Delete an AWS Glue job
-
- :param connection: AWS boto3 glue connection
- :param module: Ansible module
- :param glue_job: a dict of AWS Glue job parameters or None
- :return:
- """
-
- changed = False
-
- if glue_job:
- try:
- connection.delete_job(JobName=glue_job['Name'])
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
-
- module.exit_json(changed=changed)
-
-
-def main():
-
- argument_spec = (
- dict(
- allocated_capacity=dict(type='int'),
- command_name=dict(type='str', default='glueetl'),
- command_script_location=dict(type='str'),
- connections=dict(type='list'),
- default_arguments=dict(type='dict'),
- description=dict(type='str'),
- max_concurrent_runs=dict(type='int'),
- max_retries=dict(type='int'),
- name=dict(required=True, type='str'),
- role=dict(type='str'),
- state=dict(required=True, choices=['present', 'absent'], type='str'),
- timeout=dict(type='int')
- )
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[
- ('state', 'present', ['role', 'command_script_location'])
- ]
- )
-
- connection = module.client('glue')
-
- state = module.params.get("state")
-
- glue_job = _get_glue_job(connection, module, module.params.get("name"))
-
- if state == 'present':
- create_or_update_glue_job(connection, module, glue_job)
- else:
- delete_glue_job(connection, module, glue_job)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_inspector_target.py b/lib/ansible/modules/cloud/amazon/aws_inspector_target.py
deleted file mode 100644
index a5607b765b..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_inspector_target.py
+++ /dev/null
@@ -1,248 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2018 Dennis Conrad for Sainsbury's
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: aws_inspector_target
-short_description: Create, Update and Delete Amazon Inspector Assessment
- Targets
-description: Creates, updates, or deletes Amazon Inspector Assessment Targets
- and manages the required Resource Groups.
-version_added: "2.6"
-author: "Dennis Conrad (@dennisconrad)"
-options:
- name:
- description:
- - The user-defined name that identifies the assessment target. The name
- must be unique within the AWS account.
- required: true
- type: str
- state:
- description:
- - The state of the assessment target.
- choices:
- - absent
- - present
- default: present
- type: str
- tags:
- description:
- - Tags of the EC2 instances to be added to the assessment target.
- - Required if C(state=present).
- type: dict
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - boto3
- - botocore
-'''
-
-EXAMPLES = '''
-- name: Create my_target Assessment Target
- aws_inspector_target:
- name: my_target
- tags:
- role: scan_target
-
-- name: Update Existing my_target Assessment Target with Additional Tags
- aws_inspector_target:
- name: my_target
- tags:
- env: dev
- role: scan_target
-
-- name: Delete my_target Assessment Target
- aws_inspector_target:
- name: my_target
- state: absent
-'''
-
-RETURN = '''
-arn:
- description: The ARN that specifies the Amazon Inspector assessment target.
- returned: success
- type: str
- sample: "arn:aws:inspector:eu-west-1:123456789012:target/0-O4LnL7n1"
-created_at:
- description: The time at which the assessment target was created.
- returned: success
- type: str
- sample: "2018-01-29T13:48:51.958000+00:00"
-name:
- description: The name of the Amazon Inspector assessment target.
- returned: success
- type: str
- sample: "my_target"
-resource_group_arn:
- description: The ARN that specifies the resource group that is associated
- with the assessment target.
- returned: success
- type: str
- sample: "arn:aws:inspector:eu-west-1:123456789012:resourcegroup/0-qY4gDel8"
-tags:
- description: The tags of the resource group that is associated with the
- assessment target.
- returned: success
- type: list
- sample: {"role": "scan_target", "env": "dev"}
-updated_at:
- description: The time at which the assessment target was last updated.
- returned: success
- type: str
- sample: "2018-01-29T13:48:51.958000+00:00"
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import AWSRetry
-from ansible.module_utils.ec2 import (
- ansible_dict_to_boto3_tag_list,
- boto3_tag_list_to_ansible_dict,
- camel_dict_to_snake_dict,
- compare_aws_tags,
-)
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def main():
- argument_spec = dict(
- name=dict(required=True),
- state=dict(choices=['absent', 'present'], default='present'),
- tags=dict(type='dict'),
- )
-
- required_if = [['state', 'present', ['tags']]]
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=False,
- required_if=required_if,
- )
-
- name = module.params.get('name')
- state = module.params.get('state').lower()
- tags = module.params.get('tags')
- if tags:
- tags = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
-
- client = module.client('inspector')
-
- try:
- existing_target_arn = client.list_assessment_targets(
- filter={'assessmentTargetNamePattern': name},
- ).get('assessmentTargetArns')[0]
-
- existing_target = camel_dict_to_snake_dict(
- client.describe_assessment_targets(
- assessmentTargetArns=[existing_target_arn],
- ).get('assessmentTargets')[0]
- )
-
- existing_resource_group_arn = existing_target.get('resource_group_arn')
- existing_resource_group_tags = client.describe_resource_groups(
- resourceGroupArns=[existing_resource_group_arn],
- ).get('resourceGroups')[0].get('tags')
-
- target_exists = True
- except (
- botocore.exceptions.BotoCoreError,
- botocore.exceptions.ClientError,
- ) as e:
- module.fail_json_aws(e, msg="trying to retrieve targets")
- except IndexError:
- target_exists = False
-
- if state == 'present' and target_exists:
- ansible_dict_tags = boto3_tag_list_to_ansible_dict(tags)
- ansible_dict_existing_tags = boto3_tag_list_to_ansible_dict(
- existing_resource_group_tags
- )
- tags_to_add, tags_to_remove = compare_aws_tags(
- ansible_dict_tags,
- ansible_dict_existing_tags
- )
- if not (tags_to_add or tags_to_remove):
- existing_target.update({'tags': ansible_dict_existing_tags})
- module.exit_json(changed=False, **existing_target)
- else:
- try:
- updated_resource_group_arn = client.create_resource_group(
- resourceGroupTags=tags,
- ).get('resourceGroupArn')
-
- client.update_assessment_target(
- assessmentTargetArn=existing_target_arn,
- assessmentTargetName=name,
- resourceGroupArn=updated_resource_group_arn,
- )
-
- updated_target = camel_dict_to_snake_dict(
- client.describe_assessment_targets(
- assessmentTargetArns=[existing_target_arn],
- ).get('assessmentTargets')[0]
- )
-
- updated_target.update({'tags': ansible_dict_tags})
- module.exit_json(changed=True, **updated_target),
- except (
- botocore.exceptions.BotoCoreError,
- botocore.exceptions.ClientError,
- ) as e:
- module.fail_json_aws(e, msg="trying to update target")
-
- elif state == 'present' and not target_exists:
- try:
- new_resource_group_arn = client.create_resource_group(
- resourceGroupTags=tags,
- ).get('resourceGroupArn')
-
- new_target_arn = client.create_assessment_target(
- assessmentTargetName=name,
- resourceGroupArn=new_resource_group_arn,
- ).get('assessmentTargetArn')
-
- new_target = camel_dict_to_snake_dict(
- client.describe_assessment_targets(
- assessmentTargetArns=[new_target_arn],
- ).get('assessmentTargets')[0]
- )
-
- new_target.update({'tags': boto3_tag_list_to_ansible_dict(tags)})
- module.exit_json(changed=True, **new_target)
- except (
- botocore.exceptions.BotoCoreError,
- botocore.exceptions.ClientError,
- ) as e:
- module.fail_json_aws(e, msg="trying to create target")
-
- elif state == 'absent' and target_exists:
- try:
- client.delete_assessment_target(
- assessmentTargetArn=existing_target_arn,
- )
- module.exit_json(changed=True)
- except (
- botocore.exceptions.BotoCoreError,
- botocore.exceptions.ClientError,
- ) as e:
- module.fail_json_aws(e, msg="trying to delete target")
-
- elif state == 'absent' and not target_exists:
- module.exit_json(changed=False)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_kms.py b/lib/ansible/modules/cloud/amazon/aws_kms.py
deleted file mode 100644
index 8a906a9f3d..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_kms.py
+++ /dev/null
@@ -1,1072 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: aws_kms
-short_description: Perform various KMS management tasks.
-description:
- - Manage role/user access to a KMS key. Not designed for encrypting/decrypting.
-version_added: "2.3"
-options:
- alias:
- description: An alias for a key. For safety, even though KMS does not require keys
- to have an alias, this module expects all new keys to be given an alias
- to make them easier to manage. Existing keys without an alias may be
- referred to by I(key_id). Use M(aws_kms_info) to find key ids. Required
- if I(key_id) is not given. Note that passing a I(key_id) and I(alias)
- will only cause a new alias to be added, an alias will never be renamed.
- The 'alias/' prefix is optional.
- required: false
- aliases:
- - key_alias
- type: str
- key_id:
- description:
- - Key ID or ARN of the key.
- - One of I(alias) or I(key_id) are required.
- required: false
- aliases:
- - key_arn
- type: str
- enable_key_rotation:
- description:
- - Whether the key should be automatically rotated every year.
- required: false
- type: bool
- version_added: '2.10'
- policy_mode:
- description:
- - (deprecated) Grant or deny access.
- - Used for modifying the Key Policy rather than modifying a grant and only
- works on the default policy created through the AWS Console.
- - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
- default: grant
- choices: [ grant, deny ]
- aliases:
- - mode
- type: str
- policy_role_name:
- description:
- - (deprecated) Role to allow/deny access.
- - One of I(policy_role_name) or I(policy_role_arn) are required.
- - Used for modifying the Key Policy rather than modifying a grant and only
- works on the default policy created through the AWS Console.
- - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
- required: false
- aliases:
- - role_name
- type: str
- policy_role_arn:
- description:
- - (deprecated) ARN of role to allow/deny access.
- - One of I(policy_role_name) or I(policy_role_arn) are required.
- - Used for modifying the Key Policy rather than modifying a grant and only
- works on the default policy created through the AWS Console.
- - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
- type: str
- required: false
- aliases:
- - role_arn
- policy_grant_types:
- description:
- - (deprecated) List of grants to give to user/role. Likely "role,role grant" or "role,role grant,admin".
- - Required when I(policy_mode=grant).
- - Used for modifying the Key Policy rather than modifying a grant and only
- works on the default policy created through the AWS Console.
- - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
- required: false
- aliases:
- - grant_types
- type: list
- elements: str
- policy_clean_invalid_entries:
- description:
- - (deprecated) If adding/removing a role and invalid grantees are found, remove them. These entries will cause an update to fail in all known cases.
- - Only cleans if changes are being made.
- - Used for modifying the Key Policy rather than modifying a grant and only
- works on the default policy created through the AWS Console.
- - This option has been deprecated, and will be removed in 2.13. Use I(policy) instead.
- type: bool
- default: true
- aliases:
- - clean_invalid_entries
- state:
- description: Whether a key should be present or absent. Note that making an
- existing key absent only schedules a key for deletion. Passing a key that
- is scheduled for deletion with state present will cancel key deletion.
- required: False
- choices:
- - present
- - absent
- default: present
- version_added: 2.8
- type: str
- enabled:
- description: Whether or not a key is enabled
- default: True
- version_added: 2.8
- type: bool
- description:
- description:
- A description of the CMK. Use a description that helps you decide
- whether the CMK is appropriate for a task.
- version_added: 2.8
- type: str
- tags:
- description: A dictionary of tags to apply to a key.
- version_added: 2.8
- type: dict
- purge_tags:
- description: Whether the I(tags) argument should cause tags not in the list to
- be removed
- version_added: 2.8
- default: False
- type: bool
- purge_grants:
- description: Whether the I(grants) argument should cause grants not in the list to
- be removed
- default: False
- version_added: 2.8
- type: bool
- grants:
- description:
- - A list of grants to apply to the key. Each item must contain I(grantee_principal).
- Each item can optionally contain I(retiring_principal), I(operations), I(constraints),
- I(name).
- - I(grantee_principal) and I(retiring_principal) must be ARNs
- - 'For full documentation of suboptions see the boto3 documentation:'
- - 'U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.create_grant)'
- version_added: 2.8
- type: list
- elements: dict
- suboptions:
- grantee_principal:
- description: The full ARN of the principal being granted permissions.
- required: true
- type: str
- retiring_principal:
- description: The full ARN of the principal permitted to revoke/retire the grant.
- type: str
- operations:
- type: list
- elements: str
- description:
- - A list of operations that the grantee may perform using the CMK.
- choices: ['Decrypt', 'Encrypt', 'GenerateDataKey', 'GenerateDataKeyWithoutPlaintext', 'ReEncryptFrom', 'ReEncryptTo',
- 'CreateGrant', 'RetireGrant', 'DescribeKey', 'Verify', 'Sign']
- constraints:
- description:
- - Constraints is a dict containing C(encryption_context_subset) or C(encryption_context_equals),
- either or both being a dict specifying an encryption context match.
- See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) or
- U(https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.create_grant)
- type: dict
- policy:
- description:
- - policy to apply to the KMS key
- - See U(https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)
- type: str
- version_added: 2.8
-author:
- - Ted Timmons (@tedder)
- - Will Thames (@willthames)
- - Mark Chappell (@tremble)
-extends_documentation_fragment:
-- aws
-- ec2
-'''
-
-EXAMPLES = '''
-# Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile
-# and has been deprecated in favour of the policy option.
-- name: grant user-style access to production secrets
- aws_kms:
- args:
- alias: "alias/my_production_secrets"
- policy_mode: grant
- policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L"
- policy_grant_types: "role,role grant"
-- name: remove access to production secrets from role
- aws_kms:
- args:
- alias: "alias/my_production_secrets"
- policy_mode: deny
- policy_role_name: "prod-appServerRole-1R5AQG2BSEL6L"
-
-# Create a new KMS key
-- aws_kms:
- alias: mykey
- tags:
- Name: myKey
- Purpose: protect_stuff
-
-# Update previous key with more tags
-- aws_kms:
- alias: mykey
- tags:
- Name: myKey
- Purpose: protect_stuff
- Owner: security_team
-
-# Update a known key with grants allowing an instance with the billing-prod IAM profile
-# to decrypt data encrypted with the environment: production, application: billing
-# encryption context
-- aws_kms:
- key_id: abcd1234-abcd-1234-5678-ef1234567890
- grants:
- - name: billing_prod
- grantee_principal: arn:aws:iam::1234567890123:role/billing_prod
- constraints:
- encryption_context_equals:
- environment: production
- application: billing
- operations:
- - Decrypt
- - RetireGrant
-'''
-
-RETURN = '''
-key_id:
- description: ID of key
- type: str
- returned: always
- sample: abcd1234-abcd-1234-5678-ef1234567890
-key_arn:
- description: ARN of key
- type: str
- returned: always
- sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
-key_state:
- description: The state of the key
- type: str
- returned: always
- sample: PendingDeletion
-key_usage:
- description: The cryptographic operations for which you can use the key.
- type: str
- returned: always
- sample: ENCRYPT_DECRYPT
-origin:
- description: The source of the key's key material. When this value is C(AWS_KMS),
- AWS KMS created the key material. When this value is C(EXTERNAL), the
- key material was imported or the CMK lacks key material.
- type: str
- returned: always
- sample: AWS_KMS
-aws_account_id:
- description: The AWS Account ID that the key belongs to
- type: str
- returned: always
- sample: 1234567890123
-creation_date:
- description: Date of creation of the key
- type: str
- returned: always
- sample: "2017-04-18T15:12:08.551000+10:00"
-description:
- description: Description of the key
- type: str
- returned: always
- sample: "My Key for Protecting important stuff"
-enabled:
- description: Whether the key is enabled. True if C(KeyState) is true.
- type: str
- returned: always
- sample: false
-aliases:
- description: list of aliases associated with the key
- type: list
- returned: always
- sample:
- - aws/acm
- - aws/ebs
-policies:
- description: list of policy documents for the keys. Empty when access is denied even if there are policies.
- type: list
- returned: always
- sample:
- Version: "2012-10-17"
- Id: "auto-ebs-2"
- Statement:
- - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
- Effect: "Allow"
- Principal:
- AWS: "*"
- Action:
- - "kms:Encrypt"
- - "kms:Decrypt"
- - "kms:ReEncrypt*"
- - "kms:GenerateDataKey*"
- - "kms:CreateGrant"
- - "kms:DescribeKey"
- Resource: "*"
- Condition:
- StringEquals:
- kms:CallerAccount: "111111111111"
- kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
- - Sid: "Allow direct access to key metadata to the account"
- Effect: "Allow"
- Principal:
- AWS: "arn:aws:iam::111111111111:root"
- Action:
- - "kms:Describe*"
- - "kms:Get*"
- - "kms:List*"
- - "kms:RevokeGrant"
- Resource: "*"
-tags:
- description: dictionary of tags applied to the key
- type: dict
- returned: always
- sample:
- Name: myKey
- Purpose: protecting_stuff
-grants:
- description: list of grants associated with a key
- type: complex
- returned: always
- contains:
- constraints:
- description: Constraints on the encryption context that the grant allows.
- See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details
- type: dict
- returned: always
- sample:
- encryption_context_equals:
- "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz"
- creation_date:
- description: Date of creation of the grant
- type: str
- returned: always
- sample: "2017-04-18T15:12:08+10:00"
- grant_id:
- description: The unique ID for the grant
- type: str
- returned: always
- sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234
- grantee_principal:
- description: The principal that receives the grant's permissions
- type: str
- returned: always
- sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
- issuing_account:
- description: The AWS account under which the grant was issued
- type: str
- returned: always
- sample: arn:aws:iam::01234567890:root
- key_id:
- description: The key ARN to which the grant applies.
- type: str
- returned: always
- sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
- name:
- description: The friendly name that identifies the grant
- type: str
- returned: always
- sample: xyz
- operations:
- description: The list of operations permitted by the grant
- type: list
- returned: always
- sample:
- - Decrypt
- - RetireGrant
- retiring_principal:
- description: The principal that can retire the grant
- type: str
- returned: always
- sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
-changes_needed:
- description: grant types that would be changed/were changed.
- type: dict
- returned: always
- sample: { "role": "add", "role grant": "add" }
-had_invalid_entries:
- description: there are invalid (non-ARN) entries in the KMS entry. These don't count as a change, but will be removed if any changes are being made.
- type: bool
- returned: always
-'''
-
-# these mappings are used to go from simple labels to the actual 'Sid' values returned
-# by get_policy. They seem to be magic values.
-statement_label = {
- 'role': 'Allow use of the key',
- 'role grant': 'Allow attachment of persistent resources',
- 'admin': 'Allow access for Key Administrators'
-}
-
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict
-from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
-from ansible.module_utils.ec2 import compare_aws_tags, compare_policies
-from ansible.module_utils.six import string_types
-
-import json
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_iam_roles_with_backoff(connection):
- paginator = connection.get_paginator('list_roles')
- return paginator.paginate().build_full_result()
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_kms_keys_with_backoff(connection):
- paginator = connection.get_paginator('list_keys')
- return paginator.paginate().build_full_result()
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_kms_aliases_with_backoff(connection):
- paginator = connection.get_paginator('list_aliases')
- return paginator.paginate().build_full_result()
-
-
-def get_kms_aliases_lookup(connection):
- _aliases = dict()
- for alias in get_kms_aliases_with_backoff(connection)['Aliases']:
- # Not all aliases are actually associated with a key
- if 'TargetKeyId' in alias:
- # strip off leading 'alias/' and add it to key's aliases
- if alias['TargetKeyId'] in _aliases:
- _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:])
- else:
- _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]]
- return _aliases
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_kms_tags_with_backoff(connection, key_id, **kwargs):
- return connection.list_resource_tags(KeyId=key_id, **kwargs)
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_kms_grants_with_backoff(connection, key_id):
- params = dict(KeyId=key_id)
- paginator = connection.get_paginator('list_grants')
- return paginator.paginate(**params).build_full_result()
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_kms_metadata_with_backoff(connection, key_id):
- return connection.describe_key(KeyId=key_id)
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def list_key_policies_with_backoff(connection, key_id):
- paginator = connection.get_paginator('list_key_policies')
- return paginator.paginate(KeyId=key_id).build_full_result()
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_key_policy_with_backoff(connection, key_id, policy_name):
- return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name)
-
-
-def get_kms_tags(connection, module, key_id):
- # Handle pagination here as list_resource_tags does not have
- # a paginator
- kwargs = {}
- tags = []
- more = True
- while more:
- try:
- tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs)
- tags.extend(tag_response['Tags'])
- except is_boto3_error_code('AccessDeniedException'):
- tag_response = {}
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Failed to obtain key tags")
- if tag_response.get('NextMarker'):
- kwargs['Marker'] = tag_response['NextMarker']
- else:
- more = False
- return tags
-
-
-def get_kms_policies(connection, module, key_id):
- try:
- policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames']
- return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for
- policy in policies]
- except is_boto3_error_code('AccessDeniedException'):
- return []
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Failed to obtain key policies")
-
-
-def camel_to_snake_grant(grant):
- ''' camel_to_snake_grant snakifies everything except the encryption context '''
- constraints = grant.get('Constraints', {})
- result = camel_dict_to_snake_dict(grant)
- if 'EncryptionContextEquals' in constraints:
- result['constraints']['encryption_context_equals'] = constraints['EncryptionContextEquals']
- if 'EncryptionContextSubset' in constraints:
- result['constraints']['encryption_context_subset'] = constraints['EncryptionContextSubset']
- return result
-
-
-def get_key_details(connection, module, key_id):
- try:
- result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to obtain key metadata")
- result['KeyArn'] = result.pop('Arn')
-
- try:
- aliases = get_kms_aliases_lookup(connection)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to obtain aliases")
-
- current_rotation_status = connection.get_key_rotation_status(KeyId=key_id)
- result['enable_key_rotation'] = current_rotation_status.get('KeyRotationEnabled')
- result['aliases'] = aliases.get(result['KeyId'], [])
-
- result = camel_dict_to_snake_dict(result)
-
- # grants and tags get snakified differently
- try:
- result['grants'] = [camel_to_snake_grant(grant) for grant in
- get_kms_grants_with_backoff(connection, key_id)['Grants']]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to obtain key grants")
- tags = get_kms_tags(connection, module, key_id)
- result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue')
- result['policies'] = get_kms_policies(connection, module, key_id)
- return result
-
-
-def get_kms_facts(connection, module):
- try:
- keys = get_kms_keys_with_backoff(connection)['Keys']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to obtain keys")
-
- return [get_key_details(connection, module, key['KeyId']) for key in keys]
-
-
-def convert_grant_params(grant, key):
- grant_params = dict(KeyId=key['key_arn'],
- GranteePrincipal=grant['grantee_principal'])
- if grant.get('operations'):
- grant_params['Operations'] = grant['operations']
- if grant.get('retiring_principal'):
- grant_params['RetiringPrincipal'] = grant['retiring_principal']
- if grant.get('name'):
- grant_params['Name'] = grant['name']
- if grant.get('constraints'):
- grant_params['Constraints'] = dict()
- if grant['constraints'].get('encryption_context_subset'):
- grant_params['Constraints']['EncryptionContextSubset'] = grant['constraints']['encryption_context_subset']
- if grant['constraints'].get('encryption_context_equals'):
- grant_params['Constraints']['EncryptionContextEquals'] = grant['constraints']['encryption_context_equals']
- return grant_params
-
-
-def different_grant(existing_grant, desired_grant):
- if existing_grant.get('grantee_principal') != desired_grant.get('grantee_principal'):
- return True
- if existing_grant.get('retiring_principal') != desired_grant.get('retiring_principal'):
- return True
- if set(existing_grant.get('operations', [])) != set(desired_grant.get('operations')):
- return True
- if existing_grant.get('constraints') != desired_grant.get('constraints'):
- return True
- return False
-
-
-def compare_grants(existing_grants, desired_grants, purge_grants=False):
- existing_dict = dict((eg['name'], eg) for eg in existing_grants)
- desired_dict = dict((dg['name'], dg) for dg in desired_grants)
- to_add_keys = set(desired_dict.keys()) - set(existing_dict.keys())
- if purge_grants:
- to_remove_keys = set(existing_dict.keys()) - set(desired_dict.keys())
- else:
- to_remove_keys = set()
- to_change_candidates = set(existing_dict.keys()) & set(desired_dict.keys())
- for candidate in to_change_candidates:
- if different_grant(existing_dict[candidate], desired_dict[candidate]):
- to_add_keys.add(candidate)
- to_remove_keys.add(candidate)
-
- to_add = []
- to_remove = []
- for key in to_add_keys:
- grant = desired_dict[key]
- to_add.append(grant)
- for key in to_remove_keys:
- grant = existing_dict[key]
- to_remove.append(grant)
- return to_add, to_remove
-
-
-def start_key_deletion(connection, module, key_metadata):
- if key_metadata['KeyState'] == 'PendingDeletion':
- return False
-
- if module.check_mode:
- return True
-
- try:
- connection.schedule_key_deletion(KeyId=key_metadata['Arn'])
- return True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to schedule key for deletion")
-
-
-def cancel_key_deletion(connection, module, key):
- key_id = key['key_arn']
- if key['key_state'] != 'PendingDeletion':
- return False
-
- if module.check_mode:
- return True
-
- try:
- connection.cancel_key_deletion(KeyId=key_id)
- # key is disabled after deletion cancellation
- # set this so that ensure_enabled_disabled works correctly
- key['key_state'] = 'Disabled'
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to cancel key deletion")
-
- return True
-
-
-def ensure_enabled_disabled(connection, module, key, enabled):
- desired_state = 'Enabled'
- if not enabled:
- desired_state = 'Disabled'
-
- if key['key_state'] == desired_state:
- return False
-
- key_id = key['key_arn']
- if not module.check_mode:
- if enabled:
- try:
- connection.enable_key(KeyId=key_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to enable key")
- else:
- try:
- connection.disable_key(KeyId=key_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to disable key")
-
- return True
-
-
-def update_alias(connection, module, key, alias):
- alias = canonicalize_alias_name(alias)
-
- if alias is None:
- return False
-
- key_id = key['key_arn']
- aliases = get_kms_aliases_with_backoff(connection)['Aliases']
- # We will only add new aliases, not rename existing ones
- if alias in [_alias['AliasName'] for _alias in aliases]:
- return False
-
- if not module.check_mode:
- try:
- connection.create_alias(TargetKeyId=key_id, AliasName=alias)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed create key alias")
-
- return True
-
-
-def update_description(connection, module, key, description):
- if description is None:
- return False
- if key['description'] == description:
- return False
-
- key_id = key['key_arn']
- if not module.check_mode:
- try:
- connection.update_key_description(KeyId=key_id, Description=description)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to update key description")
-
- return True
-
-
-def update_tags(connection, module, key, desired_tags, purge_tags):
- # purge_tags needs to be explicitly set, so an empty tags list means remove
- # all tags
-
- to_add, to_remove = compare_aws_tags(key['tags'], desired_tags, purge_tags)
- if not (bool(to_add) or bool(to_remove)):
- return False
-
- key_id = key['key_arn']
- if not module.check_mode:
- if to_remove:
- try:
- connection.untag_resource(KeyId=key_id, TagKeys=to_remove)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to remove tag")
- if to_add:
- try:
- tags = ansible_dict_to_boto3_tag_list(module.params['tags'], tag_name_key_name='TagKey', tag_value_key_name='TagValue')
- connection.tag_resource(KeyId=key_id, Tags=tags)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to add tag to key")
-
- return True
-
-
-def update_policy(connection, module, key, policy):
- if policy is None:
- return False
- try:
- new_policy = json.loads(policy)
- except ValueError as e:
- module.fail_json_aws(e, msg="Unable to parse new policy as JSON")
-
- key_id = key['key_arn']
- try:
- keyret = connection.get_key_policy(KeyId=key_id, PolicyName='default')
- original_policy = json.loads(keyret['Policy'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
- # If we can't fetch the current policy assume we're making a change
- # Could occur if we have PutKeyPolicy without GetKeyPolicy
- original_policy = {}
-
- if not compare_policies(original_policy, new_policy):
- return False
-
- if not module.check_mode:
- try:
- connection.put_key_policy(KeyId=key_id, PolicyName='default', Policy=policy)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to update key policy")
-
- return True
-
-
-def update_key_rotation(connection, module, key, enable_key_rotation):
- if enable_key_rotation is None:
- return False
- key_id = key['key_arn']
- current_rotation_status = connection.get_key_rotation_status(KeyId=key_id)
- if current_rotation_status.get('KeyRotationEnabled') == enable_key_rotation:
- return False
-
- if enable_key_rotation:
- connection.enable_key_rotation(KeyId=key_id)
- else:
- connection.disable_key_rotation(KeyId=key_id)
- return True
-
-
-def update_grants(connection, module, key, desired_grants, purge_grants):
- existing_grants = key['grants']
-
- to_add, to_remove = compare_grants(existing_grants, desired_grants, purge_grants)
- if not (bool(to_add) or bool(to_remove)):
- return False
-
- key_id = key['key_arn']
- if not module.check_mode:
- for grant in to_remove:
- try:
- connection.retire_grant(KeyId=key_id, GrantId=grant['grant_id'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to retire grant")
- for grant in to_add:
- grant_params = convert_grant_params(grant, key)
- try:
- connection.create_grant(**grant_params)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to create grant")
-
- return True
-
-
-def update_key(connection, module, key):
- changed = False
-
- changed |= cancel_key_deletion(connection, module, key)
- changed |= ensure_enabled_disabled(connection, module, key, module.params['enabled'])
- changed |= update_alias(connection, module, key, module.params['alias'])
- changed |= update_description(connection, module, key, module.params['description'])
- changed |= update_tags(connection, module, key, module.params['tags'], module.params.get('purge_tags'))
- changed |= update_policy(connection, module, key, module.params.get('policy'))
- changed |= update_grants(connection, module, key, module.params.get('grants'), module.params.get('purge_grants'))
- changed |= update_key_rotation(connection, module, key, module.params.get('enable_key_rotation'))
-
- # make results consistent with kms_facts before returning
- result = get_key_details(connection, module, key['key_arn'])
- result['changed'] = changed
- return result
-
-
-def create_key(connection, module):
- params = dict(BypassPolicyLockoutSafetyCheck=False,
- Tags=ansible_dict_to_boto3_tag_list(module.params['tags'], tag_name_key_name='TagKey', tag_value_key_name='TagValue'),
- KeyUsage='ENCRYPT_DECRYPT',
- Origin='AWS_KMS')
- if module.params.get('description'):
- params['Description'] = module.params['description']
- if module.params.get('policy'):
- params['Policy'] = module.params['policy']
-
- try:
- result = connection.create_key(**params)['KeyMetadata']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to create initial key")
- key = get_key_details(connection, module, result['KeyId'])
-
- update_alias(connection, module, key, module.params['alias'])
- update_key_rotation(connection, module, key, module.params.get('enable_key_rotation'))
-
- ensure_enabled_disabled(connection, module, key, module.params.get('enabled'))
- update_grants(connection, module, key, module.params.get('grants'), False)
-
- # make results consistent with kms_facts
- result = get_key_details(connection, module, key['key_id'])
- result['changed'] = True
- return result
-
-
-def delete_key(connection, module, key_metadata):
- changed = False
-
- changed |= start_key_deletion(connection, module, key_metadata)
-
- result = get_key_details(connection, module, key_metadata['Arn'])
- result['changed'] = changed
- return result
-
-
-def get_arn_from_role_name(iam, rolename):
- ret = iam.get_role(RoleName=rolename)
- if ret.get('Role') and ret['Role'].get('Arn'):
- return ret['Role']['Arn']
- raise Exception('could not find arn for name {0}.'.format(rolename))
-
-
-def _clean_statement_principals(statement, clean_invalid_entries):
-
- # create Principal and 'AWS' so we can safely use them later.
- if not isinstance(statement.get('Principal'), dict):
- statement['Principal'] = dict()
-
- # If we have a single AWS Principal, ensure we still have a list (to manipulate)
- if 'AWS' in statement['Principal'] and isinstance(statement['Principal']['AWS'], string_types):
- statement['Principal']['AWS'] = [statement['Principal']['AWS']]
- if not isinstance(statement['Principal'].get('AWS'), list):
- statement['Principal']['AWS'] = list()
-
- invalid_entries = [item for item in statement['Principal']['AWS'] if not item.startswith('arn:aws:iam::')]
- valid_entries = [item for item in statement['Principal']['AWS'] if item.startswith('arn:aws:iam::')]
-
- if bool(invalid_entries) and clean_invalid_entries:
- statement['Principal']['AWS'] = valid_entries
- return True
-
- return False
-
-
-def _do_statement_grant(statement, role_arn, grant_types, mode, grant_type):
-
- if mode == 'grant':
- if grant_type in grant_types:
- if role_arn not in statement['Principal']['AWS']: # needs to be added.
- statement['Principal']['AWS'].append(role_arn)
- return 'add'
- elif role_arn in statement['Principal']['AWS']: # not one the places the role should be
- statement['Principal']['AWS'].remove(role_arn)
- return 'remove'
- return None
-
- if mode == 'deny' and role_arn in statement['Principal']['AWS']:
- # we don't selectively deny. that's a grant with a
- # smaller list. so deny=remove all of this arn.
- statement['Principal']['AWS'].remove(role_arn)
- return 'remove'
- return None
-
-
-def do_policy_grant(module, kms, keyarn, role_arn, grant_types, mode='grant', dry_run=True, clean_invalid_entries=True):
- ret = {}
- policy = json.loads(get_key_policy_with_backoff(kms, keyarn, 'default')['Policy'])
-
- changes_needed = {}
- assert_policy_shape(module, policy)
- had_invalid_entries = False
- for statement in policy['Statement']:
- # We already tested that these are the only types in the statements
- for grant_type in statement_label:
- # Are we on this grant type's statement?
- if statement['Sid'] != statement_label[grant_type]:
- continue
-
- had_invalid_entries |= _clean_statement_principals(statement, clean_invalid_entries)
- change = _do_statement_grant(statement, role_arn, grant_types, mode, grant_type)
- if change:
- changes_needed[grant_type] = change
-
- ret['changes_needed'] = changes_needed
- ret['had_invalid_entries'] = had_invalid_entries
- ret['new_policy'] = policy
- ret['changed'] = bool(changes_needed)
-
- if dry_run or not ret['changed']:
- return ret
-
- try:
- policy_json_string = json.dumps(policy)
- kms.put_key_policy(KeyId=keyarn, PolicyName='default', Policy=policy_json_string)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not update key_policy', new_policy=policy_json_string)
-
- return ret
-
-
-def assert_policy_shape(module, policy):
- '''Since the policy seems a little, uh, fragile, make sure we know approximately what we're looking at.'''
- errors = []
- if policy['Version'] != "2012-10-17":
- errors.append('Unknown version/date ({0}) of policy. Things are probably different than we assumed they were.'.format(policy['Version']))
-
- found_statement_type = {}
- for statement in policy['Statement']:
- for label, sidlabel in statement_label.items():
- if statement['Sid'] == sidlabel:
- found_statement_type[label] = True
-
- for statementtype in statement_label:
- if not found_statement_type.get(statementtype):
- errors.append('Policy is missing {0}.'.format(statementtype))
-
- if errors:
- module.fail_json(msg='Problems asserting policy shape. Cowardly refusing to modify it', errors=errors, policy=policy)
-
-
-def canonicalize_alias_name(alias):
- if alias is None:
- return None
- if alias.startswith('alias/'):
- return alias
- return 'alias/' + alias
-
-
-def fetch_key_metadata(connection, module, key_id, alias):
-
- alias = canonicalize_alias_name(module.params.get('alias'))
-
- try:
- # Fetch by key_id where possible
- if key_id:
- return get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
- # Or try alias as a backup
- return get_kms_metadata_with_backoff(connection, alias)['KeyMetadata']
-
- except connection.exceptions.NotFoundException:
- return None
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, 'Failed to fetch key metadata.')
-
-
-def update_policy_grants(connection, module, key_metadata, mode):
- iam = module.client('iam')
- key_id = key_metadata['Arn']
-
- if module.params.get('policy_role_name') and not module.params.get('policy_role_arn'):
- module.params['policy_role_arn'] = get_arn_from_role_name(iam, module.params['policy_role_name'])
- if not module.params.get('policy_role_arn'):
- module.fail_json(msg='policy_role_arn or policy_role_name is required to {0}'.format(module.params['policy_mode']))
-
- # check the grant types for 'grant' only.
- if mode == 'grant':
- for grant_type in module.params['policy_grant_types']:
- if grant_type not in statement_label:
- module.fail_json(msg='{0} is an unknown grant type.'.format(grant_type))
-
- return do_policy_grant(module, connection,
- key_id,
- module.params['policy_role_arn'],
- module.params['policy_grant_types'],
- mode=mode,
- dry_run=module.check_mode,
- clean_invalid_entries=module.params['policy_clean_invalid_entries'])
-
-
-def main():
- argument_spec = dict(
- alias=dict(aliases=['key_alias']),
- policy_mode=dict(aliases=['mode'], choices=['grant', 'deny'], default='grant'),
- policy_role_name=dict(aliases=['role_name']),
- policy_role_arn=dict(aliases=['role_arn']),
- policy_grant_types=dict(aliases=['grant_types'], type='list'),
- policy_clean_invalid_entries=dict(aliases=['clean_invalid_entries'], type='bool', default=True),
- key_id=dict(aliases=['key_arn']),
- description=dict(),
- enabled=dict(type='bool', default=True),
- tags=dict(type='dict', default={}),
- purge_tags=dict(type='bool', default=False),
- grants=dict(type='list', default=[]),
- policy=dict(),
- purge_grants=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent']),
- enable_key_rotation=(dict(type='bool'))
- )
-
- module = AnsibleAWSModule(
- supports_check_mode=True,
- argument_spec=argument_spec,
- required_one_of=[['alias', 'key_id']],
- )
-
- mode = module.params['policy_mode']
-
- kms = module.client('kms')
-
- key_metadata = fetch_key_metadata(kms, module, module.params.get('key_id'), module.params.get('alias'))
- # We can't create keys with a specific ID, if we can't access the key we'll have to fail
- if module.params.get('state') == 'present' and module.params.get('key_id') and not key_metadata:
- module.fail_json(msg="Could not find key with id %s to update")
-
- if module.params.get('policy_grant_types') or mode == 'deny':
- module.deprecate('Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile'
- ' and has been deprecated in favour of the policy option.', version='2.13')
- result = update_policy_grants(kms, module, key_metadata, mode)
- module.exit_json(**result)
-
- if module.params.get('state') == 'absent':
- if key_metadata is None:
- module.exit_json(changed=False)
- result = delete_key(kms, module, key_metadata)
- module.exit_json(**result)
-
- if key_metadata:
- key_details = get_key_details(kms, module, key_metadata['Arn'])
- result = update_key(kms, module, key_details)
- module.exit_json(**result)
-
- result = create_key(kms, module)
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_kms_info.py b/lib/ansible/modules/cloud/amazon/aws_kms_info.py
deleted file mode 100644
index e8988b45b1..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_kms_info.py
+++ /dev/null
@@ -1,433 +0,0 @@
-#!/usr/bin/python
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: aws_kms_info
-short_description: Gather information about AWS KMS keys
-description:
- - Gather information about AWS KMS keys including tags and grants
- - This module was called C(aws_kms_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.5"
-author: "Will Thames (@willthames)"
-options:
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- The filters aren't natively supported by boto3, but are supported to provide similar
- functionality to other modules. Standard tag filters (C(tag-key), C(tag-value) and
- C(tag:tagName)) are available, as are C(key-id) and C(alias)
- type: dict
- pending_deletion:
- description: Whether to get full details (tags, grants etc.) of keys pending deletion
- default: False
- type: bool
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all KMS keys
-- aws_kms_info:
-
-# Gather information about all keys with a Name tag
-- aws_kms_info:
- filters:
- tag-key: Name
-
-# Gather information about all keys with a specific name
-- aws_kms_info:
- filters:
- "tag:Name": Example
-'''
-
-RETURN = '''
-keys:
- description: list of keys
- type: complex
- returned: always
- contains:
- key_id:
- description: ID of key
- type: str
- returned: always
- sample: abcd1234-abcd-1234-5678-ef1234567890
- key_arn:
- description: ARN of key
- type: str
- returned: always
- sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
- key_state:
- description: The state of the key
- type: str
- returned: always
- sample: PendingDeletion
- key_usage:
- description: The cryptographic operations for which you can use the key.
- type: str
- returned: always
- sample: ENCRYPT_DECRYPT
- origin:
- description:
- The source of the key's key material. When this value is C(AWS_KMS),
- AWS KMS created the key material. When this value is C(EXTERNAL), the
- key material was imported or the CMK lacks key material.
- type: str
- returned: always
- sample: AWS_KMS
- aws_account_id:
- description: The AWS Account ID that the key belongs to
- type: str
- returned: always
- sample: 1234567890123
- creation_date:
- description: Date of creation of the key
- type: str
- returned: always
- sample: "2017-04-18T15:12:08.551000+10:00"
- description:
- description: Description of the key
- type: str
- returned: always
- sample: "My Key for Protecting important stuff"
- enabled:
- description: Whether the key is enabled. True if C(KeyState) is true.
- type: str
- returned: always
- sample: false
- enable_key_rotation:
- description: Whether the automatically key rotation every year is enabled.
- type: bool
- returned: always
- sample: false
- aliases:
- description: list of aliases associated with the key
- type: list
- returned: always
- sample:
- - aws/acm
- - aws/ebs
- tags:
- description: dictionary of tags applied to the key. Empty when access is denied even if there are tags.
- type: dict
- returned: always
- sample:
- Name: myKey
- Purpose: protecting_stuff
- policies:
- description: list of policy documents for the keys. Empty when access is denied even if there are policies.
- type: list
- returned: always
- sample:
- Version: "2012-10-17"
- Id: "auto-ebs-2"
- Statement:
- - Sid: "Allow access through EBS for all principals in the account that are authorized to use EBS"
- Effect: "Allow"
- Principal:
- AWS: "*"
- Action:
- - "kms:Encrypt"
- - "kms:Decrypt"
- - "kms:ReEncrypt*"
- - "kms:GenerateDataKey*"
- - "kms:CreateGrant"
- - "kms:DescribeKey"
- Resource: "*"
- Condition:
- StringEquals:
- kms:CallerAccount: "111111111111"
- kms:ViaService: "ec2.ap-southeast-2.amazonaws.com"
- - Sid: "Allow direct access to key metadata to the account"
- Effect: "Allow"
- Principal:
- AWS: "arn:aws:iam::111111111111:root"
- Action:
- - "kms:Describe*"
- - "kms:Get*"
- - "kms:List*"
- - "kms:RevokeGrant"
- Resource: "*"
- grants:
- description: list of grants associated with a key
- type: complex
- returned: always
- contains:
- constraints:
- description: Constraints on the encryption context that the grant allows.
- See U(https://docs.aws.amazon.com/kms/latest/APIReference/API_GrantConstraints.html) for further details
- type: dict
- returned: always
- sample:
- encryption_context_equals:
- "aws:lambda:_function_arn": "arn:aws:lambda:ap-southeast-2:012345678912:function:xyz"
- creation_date:
- description: Date of creation of the grant
- type: str
- returned: always
- sample: "2017-04-18T15:12:08+10:00"
- grant_id:
- description: The unique ID for the grant
- type: str
- returned: always
- sample: abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234abcd1234
- grantee_principal:
- description: The principal that receives the grant's permissions
- type: str
- returned: always
- sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
- issuing_account:
- description: The AWS account under which the grant was issued
- type: str
- returned: always
- sample: arn:aws:iam::01234567890:root
- key_id:
- description: The key ARN to which the grant applies.
- type: str
- returned: always
- sample: arn:aws:kms:ap-southeast-2:123456789012:key/abcd1234-abcd-1234-5678-ef1234567890
- name:
- description: The friendly name that identifies the grant
- type: str
- returned: always
- sample: xyz
- operations:
- description: The list of operations permitted by the grant
- type: list
- returned: always
- sample:
- - Decrypt
- - RetireGrant
- retiring_principal:
- description: The principal that can retire the grant
- type: str
- returned: always
- sample: arn:aws:sts::0123456789012:assumed-role/lambda_xyz/xyz
-'''
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
-from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, HAS_BOTO3
-from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict
-
-import traceback
-
-try:
- import botocore
-except ImportError:
- pass # caught by imported HAS_BOTO3
-
-# Caching lookup for aliases
-_aliases = dict()
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_kms_keys_with_backoff(connection):
- paginator = connection.get_paginator('list_keys')
- return paginator.paginate().build_full_result()
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_kms_aliases_with_backoff(connection):
- paginator = connection.get_paginator('list_aliases')
- return paginator.paginate().build_full_result()
-
-
-def get_kms_aliases_lookup(connection):
- if not _aliases:
- for alias in get_kms_aliases_with_backoff(connection)['Aliases']:
- # Not all aliases are actually associated with a key
- if 'TargetKeyId' in alias:
- # strip off leading 'alias/' and add it to key's aliases
- if alias['TargetKeyId'] in _aliases:
- _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:])
- else:
- _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]]
- return _aliases
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_kms_tags_with_backoff(connection, key_id, **kwargs):
- return connection.list_resource_tags(KeyId=key_id, **kwargs)
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_kms_grants_with_backoff(connection, key_id, **kwargs):
- params = dict(KeyId=key_id)
- if kwargs.get('tokens'):
- params['GrantTokens'] = kwargs['tokens']
- paginator = connection.get_paginator('list_grants')
- return paginator.paginate(**params).build_full_result()
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_kms_metadata_with_backoff(connection, key_id):
- return connection.describe_key(KeyId=key_id)
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def list_key_policies_with_backoff(connection, key_id):
- paginator = connection.get_paginator('list_key_policies')
- return paginator.paginate(KeyId=key_id).build_full_result()
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_key_policy_with_backoff(connection, key_id, policy_name):
- return connection.get_key_policy(KeyId=key_id, PolicyName=policy_name)
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def get_enable_key_rotation_with_backoff(connection, key_id):
- current_rotation_status = connection.get_key_rotation_status(KeyId=key_id)
- return current_rotation_status.get('KeyRotationEnabled')
-
-
-def get_kms_tags(connection, module, key_id):
- # Handle pagination here as list_resource_tags does not have
- # a paginator
- kwargs = {}
- tags = []
- more = True
- while more:
- try:
- tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs)
- tags.extend(tag_response['Tags'])
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] != 'AccessDeniedException':
- module.fail_json(msg="Failed to obtain key tags",
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- else:
- tag_response = {}
- if tag_response.get('NextMarker'):
- kwargs['Marker'] = tag_response['NextMarker']
- else:
- more = False
- return tags
-
-
-def get_kms_policies(connection, module, key_id):
- try:
- policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames']
- return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for
- policy in policies]
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] != 'AccessDeniedException':
- module.fail_json(msg="Failed to obtain key policies",
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- else:
- return []
-
-
-def key_matches_filter(key, filtr):
- if filtr[0] == 'key-id':
- return filtr[1] == key['key_id']
- if filtr[0] == 'tag-key':
- return filtr[1] in key['tags']
- if filtr[0] == 'tag-value':
- return filtr[1] in key['tags'].values()
- if filtr[0] == 'alias':
- return filtr[1] in key['aliases']
- if filtr[0].startswith('tag:'):
- return key['tags'][filtr[0][4:]] == filtr[1]
-
-
-def key_matches_filters(key, filters):
- if not filters:
- return True
- else:
- return all([key_matches_filter(key, filtr) for filtr in filters.items()])
-
-
-def get_key_details(connection, module, key_id, tokens=None):
- if not tokens:
- tokens = []
- try:
- result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata']
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to obtain key metadata",
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- result['KeyArn'] = result.pop('Arn')
-
- try:
- aliases = get_kms_aliases_lookup(connection)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to obtain aliases",
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- result['aliases'] = aliases.get(result['KeyId'], [])
- result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id)
-
- if module.params.get('pending_deletion'):
- return camel_dict_to_snake_dict(result)
-
- try:
- result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants']
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to obtain key grants",
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- tags = get_kms_tags(connection, module, key_id)
-
- result = camel_dict_to_snake_dict(result)
- result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue')
- result['policies'] = get_kms_policies(connection, module, key_id)
- return result
-
-
-def get_kms_info(connection, module):
- try:
- keys = get_kms_keys_with_backoff(connection)['Keys']
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to obtain keys",
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- return [get_key_details(connection, module, key['KeyId']) for key in keys]
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- filters=dict(type='dict'),
- pending_deletion=dict(type='bool', default=False)
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
- if module._name == 'aws_kms_facts':
- module.deprecate("The 'aws_kms_facts' module has been renamed to 'aws_kms_info'", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 and botocore are required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
-
- if region:
- connection = boto3_conn(module, conn_type='client', resource='kms', region=region, endpoint=ec2_url, **aws_connect_params)
- else:
- module.fail_json(msg="region must be specified")
-
- all_keys = get_kms_info(connection, module)
- module.exit_json(keys=[key for key in all_keys if key_matches_filters(key, module.params['filters'])])
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_region_info.py b/lib/ansible/modules/cloud/amazon/aws_region_info.py
deleted file mode 100644
index 8e1ae21681..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_region_info.py
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'supported_by': 'community',
- 'status': ['preview']
-}
-
-DOCUMENTATION = '''
-module: aws_region_info
-short_description: Gather information about AWS regions.
-description:
- - Gather information about AWS regions.
- - This module was called C(aws_region_facts) before Ansible 2.9. The usage did not change.
-version_added: '2.5'
-author: 'Henrique Rodrigues (@Sodki)'
-options:
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
- U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for
- possible filters. Filter names and values are case sensitive. You can also use underscores
- instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
- default: {}
- type: dict
-extends_documentation_fragment:
- - aws
- - ec2
-requirements: [botocore, boto3]
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all regions
-- aws_region_info:
-
-# Gather information about a single region
-- aws_region_info:
- filters:
- region-name: eu-west-1
-'''
-
-RETURN = '''
-regions:
- returned: on success
- description: >
- Regions that match the provided filters. Each element consists of a dict with all the information related
- to that region.
- type: list
- sample: "[{
- 'endpoint': 'ec2.us-west-1.amazonaws.com',
- 'region_name': 'us-west-1'
- }]"
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-
-def main():
- argument_spec = dict(
- filters=dict(default={}, type='dict')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
- if module._name == 'aws_region_facts':
- module.deprecate("The 'aws_region_facts' module has been renamed to 'aws_region_info'", version='2.13')
-
- connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
-
- # Replace filter key underscores with dashes, for compatibility
- sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items())
-
- try:
- regions = connection.describe_regions(
- Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
- )
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to describe regions.")
-
- module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions['Regions']])
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_s3_bucket_info.py b/lib/ansible/modules/cloud/amazon/aws_s3_bucket_info.py
deleted file mode 100644
index 8b5c63f9f1..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_s3_bucket_info.py
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: aws_s3_bucket_info
-short_description: Lists S3 buckets in AWS
-requirements:
- - boto3 >= 1.4.4
- - python >= 2.6
-description:
- - Lists S3 buckets in AWS
- - This module was called C(aws_s3_bucket_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(aws_s3_bucket_info) module no longer returns C(ansible_facts)!
-version_added: "2.4"
-author: "Gerben Geijteman (@hyperized)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Note: Only AWS S3 is currently supported
-
-# Lists all s3 buckets
-- aws_s3_bucket_info:
- register: result
-
-- name: List buckets
- debug:
- msg: "{{ result['buckets'] }}"
-'''
-
-RETURN = '''
-buckets:
- description: "List of buckets"
- returned: always
- sample:
- - creation_date: 2017-07-06 15:05:12 +00:00
- name: my_bucket
- type: list
-'''
-
-import traceback
-
-try:
- import botocore
-except ImportError:
- pass # will be detected by imported HAS_BOTO3
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_native
-from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, HAS_BOTO3, camel_dict_to_snake_dict,
- get_aws_connection_info)
-
-
-def get_bucket_list(module, connection):
- """
- Return result of list_buckets json encoded
- :param module:
- :param connection:
- :return:
- """
- try:
- buckets = camel_dict_to_snake_dict(connection.list_buckets())['buckets']
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
- return buckets
-
-
-def main():
- """
- Get list of S3 buckets
- :return:
- """
-
- # Ensure we have an empty dict
- result = {}
-
- # Including ec2 argument spec
- module = AnsibleModule(argument_spec=ec2_argument_spec(), supports_check_mode=True)
- is_old_facts = module._name == 'aws_s3_bucket_facts'
- if is_old_facts:
- module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', "
- "and the renamed one no longer returns ansible_facts", version='2.13')
-
- # Verify Boto3 is used
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- # Set up connection
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=HAS_BOTO3)
- connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url,
- **aws_connect_params)
-
- # Gather results
- result['buckets'] = get_bucket_list(module, connection)
-
- # Send exit
- if is_old_facts:
- module.exit_json(msg="Retrieved s3 facts.", ansible_facts=result)
- else:
- module.exit_json(msg="Retrieved s3 info.", **result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_s3_cors.py b/lib/ansible/modules/cloud/amazon/aws_s3_cors.py
deleted file mode 100644
index 451fdd9e3e..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_s3_cors.py
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: aws_s3_cors
-short_description: Manage CORS for S3 buckets in AWS
-description:
- - Manage CORS for S3 buckets in AWS
-version_added: "2.5"
-author: "Oyvind Saltvik (@fivethreeo)"
-options:
- name:
- description:
- - Name of the s3 bucket
- required: true
- type: str
- rules:
- description:
- - Cors rules to put on the s3 bucket
- type: list
- state:
- description:
- - Create or remove cors on the s3 bucket
- required: true
- choices: [ 'present', 'absent' ]
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Create a simple cors for s3 bucket
-- aws_s3_cors:
- name: mys3bucket
- state: present
- rules:
- - allowed_origins:
- - http://www.example.com/
- allowed_methods:
- - GET
- - POST
- allowed_headers:
- - Authorization
- expose_headers:
- - x-amz-server-side-encryption
- - x-amz-request-id
- max_age_seconds: 30000
-
-# Remove cors for s3 bucket
-- aws_s3_cors:
- name: mys3bucket
- state: absent
-'''
-
-RETURN = '''
-changed:
- description: check to see if a change was made to the rules
- returned: always
- type: bool
- sample: true
-name:
- description: name of bucket
- returned: always
- type: str
- sample: 'bucket-name'
-rules:
- description: list of current rules
- returned: always
- type: list
- sample: [
- {
- "allowed_headers": [
- "Authorization"
- ],
- "allowed_methods": [
- "GET"
- ],
- "allowed_origins": [
- "*"
- ],
- "max_age_seconds": 30000
- }
- ]
-'''
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except Exception:
- pass # Handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import snake_dict_to_camel_dict, compare_policies
-
-
-def create_or_update_bucket_cors(connection, module):
-
- name = module.params.get("name")
- rules = module.params.get("rules", [])
- changed = False
-
- try:
- current_camel_rules = connection.get_bucket_cors(Bucket=name)['CORSRules']
- except ClientError:
- current_camel_rules = []
-
- new_camel_rules = snake_dict_to_camel_dict(rules, capitalize_first=True)
- # compare_policies() takes two dicts and makes them hashable for comparison
- if compare_policies(new_camel_rules, current_camel_rules):
- changed = True
-
- if changed:
- try:
- cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={'CORSRules': new_camel_rules})
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to update CORS for bucket {0}".format(name))
-
- module.exit_json(changed=changed, name=name, rules=rules)
-
-
-def destroy_bucket_cors(connection, module):
-
- name = module.params.get("name")
- changed = False
-
- try:
- cors = connection.delete_bucket_cors(Bucket=name)
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to delete CORS for bucket {0}".format(name))
-
- module.exit_json(changed=changed)
-
-
-def main():
-
- argument_spec = dict(
- name=dict(required=True, type='str'),
- rules=dict(type='list'),
- state=dict(type='str', choices=['present', 'absent'], required=True)
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
-
- client = module.client('s3')
-
- state = module.params.get("state")
-
- if state == 'present':
- create_or_update_bucket_cors(client, module)
- elif state == 'absent':
- destroy_bucket_cors(client, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_secret.py b/lib/ansible/modules/cloud/amazon/aws_secret.py
deleted file mode 100644
index 91a0bef471..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_secret.py
+++ /dev/null
@@ -1,404 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2018, REY Remi
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = r'''
----
-module: aws_secret
-short_description: Manage secrets stored in AWS Secrets Manager.
-description:
- - Create, update, and delete secrets stored in AWS Secrets Manager.
-author: "REY Remi (@rrey)"
-version_added: "2.8"
-requirements: [ 'botocore>=1.10.0', 'boto3' ]
-options:
- name:
- description:
- - Friendly name for the secret you are creating.
- required: true
- type: str
- state:
- description:
- - Whether the secret should be exist or not.
- default: 'present'
- choices: ['present', 'absent']
- type: str
- recovery_window:
- description:
- - Only used if state is absent.
- - Specifies the number of days that Secrets Manager waits before it can delete the secret.
- - If set to 0, the deletion is forced without recovery.
- default: 30
- type: int
- description:
- description:
- - Specifies a user-provided description of the secret.
- type: str
- kms_key_id:
- description:
- - Specifies the ARN or alias of the AWS KMS customer master key (CMK) to be
- used to encrypt the `secret_string` or `secret_binary` values in the versions stored in this secret.
- type: str
- secret_type:
- description:
- - Specifies the type of data that you want to encrypt.
- choices: ['binary', 'string']
- default: 'string'
- type: str
- secret:
- description:
- - Specifies string or binary data that you want to encrypt and store in the new version of the secret.
- default: ""
- type: str
- tags:
- description:
- - Specifies a list of user-defined tags that are attached to the secret.
- type: dict
- rotation_lambda:
- description:
- - Specifies the ARN of the Lambda function that can rotate the secret.
- type: str
- rotation_interval:
- description:
- - Specifies the number of days between automatic scheduled rotations of the secret.
- default: 30
- type: int
-extends_documentation_fragment:
- - ec2
- - aws
-'''
-
-
-EXAMPLES = r'''
-- name: Add string to AWS Secrets Manager
- aws_secret:
- name: 'test_secret_string'
- state: present
- secret_type: 'string'
- secret: "{{ super_secret_string }}"
-
-- name: remove string from AWS Secrets Manager
- aws_secret:
- name: 'test_secret_string'
- state: absent
- secret_type: 'string'
- secret: "{{ super_secret_string }}"
-'''
-
-
-RETURN = r'''
-secret:
- description: The secret information
- returned: always
- type: complex
- contains:
- arn:
- description: The ARN of the secret
- returned: always
- type: str
- sample: arn:aws:secretsmanager:eu-west-1:xxxxxxxxxx:secret:xxxxxxxxxxx
- last_accessed_date:
- description: The date the secret was last accessed
- returned: always
- type: str
- sample: '2018-11-20T01:00:00+01:00'
- last_changed_date:
- description: The date the secret was last modified.
- returned: always
- type: str
- sample: '2018-11-20T12:16:38.433000+01:00'
- name:
- description: The secret name.
- returned: always
- type: str
- sample: my_secret
- rotation_enabled:
- description: The secret rotation status.
- returned: always
- type: bool
- sample: false
- version_ids_to_stages:
- description: Provide the secret version ids and the associated secret stage.
- returned: always
- type: dict
- sample: { "dc1ed59b-6d8e-4450-8b41-536dfe4600a9": [ "AWSCURRENT" ] }
-'''
-
-from ansible.module_utils._text import to_bytes
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
-from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-
-class Secret(object):
- """An object representation of the Secret described by the self.module args"""
- def __init__(self, name, secret_type, secret, description="", kms_key_id=None,
- tags=None, lambda_arn=None, rotation_interval=None):
- self.name = name
- self.description = description
- self.kms_key_id = kms_key_id
- if secret_type == "binary":
- self.secret_type = "SecretBinary"
- else:
- self.secret_type = "SecretString"
- self.secret = secret
- self.tags = tags or {}
- self.rotation_enabled = False
- if lambda_arn:
- self.rotation_enabled = True
- self.rotation_lambda_arn = lambda_arn
- self.rotation_rules = {"AutomaticallyAfterDays": int(rotation_interval)}
-
- @property
- def create_args(self):
- args = {
- "Name": self.name
- }
- if self.description:
- args["Description"] = self.description
- if self.kms_key_id:
- args["KmsKeyId"] = self.kms_key_id
- if self.tags:
- args["Tags"] = ansible_dict_to_boto3_tag_list(self.tags)
- args[self.secret_type] = self.secret
- return args
-
- @property
- def update_args(self):
- args = {
- "SecretId": self.name
- }
- if self.description:
- args["Description"] = self.description
- if self.kms_key_id:
- args["KmsKeyId"] = self.kms_key_id
- args[self.secret_type] = self.secret
- return args
-
- @property
- def boto3_tags(self):
- return ansible_dict_to_boto3_tag_list(self.Tags)
-
- def as_dict(self):
- result = self.__dict__
- result.pop("tags")
- return snake_dict_to_camel_dict(result)
-
-
-class SecretsManagerInterface(object):
- """An interface with SecretsManager"""
-
- def __init__(self, module):
- self.module = module
- self.client = self.module.client('secretsmanager')
-
- def get_secret(self, name):
- try:
- secret = self.client.describe_secret(SecretId=name)
- except self.client.exceptions.ResourceNotFoundException:
- secret = None
- except Exception as e:
- self.module.fail_json_aws(e, msg="Failed to describe secret")
- return secret
-
- def create_secret(self, secret):
- if self.module.check_mode:
- self.module.exit_json(changed=True)
- try:
- created_secret = self.client.create_secret(**secret.create_args)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Failed to create secret")
-
- if secret.rotation_enabled:
- response = self.update_rotation(secret)
- created_secret["VersionId"] = response.get("VersionId")
- return created_secret
-
- def update_secret(self, secret):
- if self.module.check_mode:
- self.module.exit_json(changed=True)
-
- try:
- response = self.client.update_secret(**secret.update_args)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Failed to update secret")
- return response
-
- def restore_secret(self, name):
- if self.module.check_mode:
- self.module.exit_json(changed=True)
- try:
- response = self.client.restore_secret(SecretId=name)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Failed to restore secret")
- return response
-
- def delete_secret(self, name, recovery_window):
- if self.module.check_mode:
- self.module.exit_json(changed=True)
- try:
- if recovery_window == 0:
- response = self.client.delete_secret(SecretId=name, ForceDeleteWithoutRecovery=True)
- else:
- response = self.client.delete_secret(SecretId=name, RecoveryWindowInDays=recovery_window)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Failed to delete secret")
- return response
-
- def update_rotation(self, secret):
- if secret.rotation_enabled:
- try:
- response = self.client.rotate_secret(
- SecretId=secret.name,
- RotationLambdaARN=secret.rotation_lambda_arn,
- RotationRules=secret.rotation_rules)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Failed to rotate secret secret")
- else:
- try:
- response = self.client.cancel_rotate_secret(SecretId=secret.name)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Failed to cancel rotation")
- return response
-
- def tag_secret(self, secret_name, tags):
- try:
- self.client.tag_resource(SecretId=secret_name, Tags=tags)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Failed to add tag(s) to secret")
-
- def untag_secret(self, secret_name, tag_keys):
- try:
- self.client.untag_resource(SecretId=secret_name, TagKeys=tag_keys)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Failed to remove tag(s) from secret")
-
- def secrets_match(self, desired_secret, current_secret):
- """Compare secrets except tags and rotation
-
- Args:
- desired_secret: camel dict representation of the desired secret state.
- current_secret: secret reference as returned by the secretsmanager api.
-
- Returns: bool
- """
- if desired_secret.description != current_secret.get("Description", ""):
- return False
- if desired_secret.kms_key_id != current_secret.get("KmsKeyId"):
- return False
- current_secret_value = self.client.get_secret_value(SecretId=current_secret.get("Name"))
- if desired_secret.secret_type == 'SecretBinary':
- desired_value = to_bytes(desired_secret.secret)
- else:
- desired_value = desired_secret.secret
- if desired_value != current_secret_value.get(desired_secret.secret_type):
- return False
- return True
-
-
-def rotation_match(desired_secret, current_secret):
- """Compare secrets rotation configuration
-
- Args:
- desired_secret: camel dict representation of the desired secret state.
- current_secret: secret reference as returned by the secretsmanager api.
-
- Returns: bool
- """
- if desired_secret.rotation_enabled != current_secret.get("RotationEnabled", False):
- return False
- if desired_secret.rotation_enabled:
- if desired_secret.rotation_lambda_arn != current_secret.get("RotationLambdaARN"):
- return False
- if desired_secret.rotation_rules != current_secret.get("RotationRules"):
- return False
- return True
-
-
-def main():
- module = AnsibleAWSModule(
- argument_spec={
- 'name': dict(required=True),
- 'state': dict(choices=['present', 'absent'], default='present'),
- 'description': dict(default=""),
- 'kms_key_id': dict(),
- 'secret_type': dict(choices=['binary', 'string'], default="string"),
- 'secret': dict(default=""),
- 'tags': dict(type='dict', default={}),
- 'rotation_lambda': dict(),
- 'rotation_interval': dict(type='int', default=30),
- 'recovery_window': dict(type='int', default=30),
- },
- supports_check_mode=True,
- )
-
- changed = False
- state = module.params.get('state')
- secrets_mgr = SecretsManagerInterface(module)
- recovery_window = module.params.get('recovery_window')
- secret = Secret(
- module.params.get('name'),
- module.params.get('secret_type'),
- module.params.get('secret'),
- description=module.params.get('description'),
- kms_key_id=module.params.get('kms_key_id'),
- tags=module.params.get('tags'),
- lambda_arn=module.params.get('rotation_lambda'),
- rotation_interval=module.params.get('rotation_interval')
- )
-
- current_secret = secrets_mgr.get_secret(secret.name)
-
- if state == 'absent':
- if current_secret:
- if not current_secret.get("DeletedDate"):
- result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window))
- changed = True
- elif current_secret.get("DeletedDate") and recovery_window == 0:
- result = camel_dict_to_snake_dict(secrets_mgr.delete_secret(secret.name, recovery_window=recovery_window))
- changed = True
- else:
- result = "secret does not exist"
- if state == 'present':
- if current_secret is None:
- result = secrets_mgr.create_secret(secret)
- changed = True
- else:
- if current_secret.get("DeletedDate"):
- secrets_mgr.restore_secret(secret.name)
- changed = True
- if not secrets_mgr.secrets_match(secret, current_secret):
- result = secrets_mgr.update_secret(secret)
- changed = True
- if not rotation_match(secret, current_secret):
- result = secrets_mgr.update_rotation(secret)
- changed = True
- current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', []))
- tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags)
- if tags_to_add:
- secrets_mgr.tag_secret(secret.name, ansible_dict_to_boto3_tag_list(tags_to_add))
- changed = True
- if tags_to_remove:
- secrets_mgr.untag_secret(secret.name, tags_to_remove)
- changed = True
- result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name))
- result.pop("response_metadata")
- module.exit_json(changed=changed, secret=result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_ses_identity.py b/lib/ansible/modules/cloud/amazon/aws_ses_identity.py
deleted file mode 100644
index 50ffef74de..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_ses_identity.py
+++ /dev/null
@@ -1,546 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = '''
----
-module: aws_ses_identity
-short_description: Manages SES email and domain identity
-description:
- - This module allows the user to manage verified email and domain identity for SES.
- - This covers verifying and removing identities as well as setting up complaint, bounce
- and delivery notification settings.
-version_added: "2.5"
-author: Ed Costello (@orthanc)
-
-options:
- identity:
- description:
- - This is the email address or domain to verify / delete.
- - If this contains an '@' then it will be considered an email. Otherwise it will be considered a domain.
- required: true
- type: str
- state:
- description: Whether to create(or update) or delete the identity.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- bounce_notifications:
- description:
- - Setup the SNS topic used to report bounce notifications.
- - If omitted, bounce notifications will not be delivered to a SNS topic.
- - If bounce notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
- suboptions:
- topic:
- description:
- - The ARN of the topic to send notifications to.
- - If omitted, notifications will not be delivered to a SNS topic.
- include_headers:
- description:
- - Whether or not to include headers when delivering to the SNS topic.
- - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
- type: bool
- default: No
- type: dict
- complaint_notifications:
- description:
- - Setup the SNS topic used to report complaint notifications.
- - If omitted, complaint notifications will not be delivered to a SNS topic.
- - If complaint notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
- suboptions:
- topic:
- description:
- - The ARN of the topic to send notifications to.
- - If omitted, notifications will not be delivered to a SNS topic.
- include_headers:
- description:
- - Whether or not to include headers when delivering to the SNS topic.
- - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
- type: bool
- default: No
- type: dict
- delivery_notifications:
- description:
- - Setup the SNS topic used to report delivery notifications.
- - If omitted, delivery notifications will not be delivered to a SNS topic.
- suboptions:
- topic:
- description:
- - The ARN of the topic to send notifications to.
- - If omitted, notifications will not be delivered to a SNS topic.
- include_headers:
- description:
- - Whether or not to include headers when delivering to the SNS topic.
- - If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
- type: bool
- default: No
- type: dict
- feedback_forwarding:
- description:
- - Whether or not to enable feedback forwarding.
- - This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics.
- type: 'bool'
- default: True
-requirements: [ 'botocore', 'boto3' ]
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Ensure example@example.com email identity exists
- aws_ses_identity:
- identity: example@example.com
- state: present
-
-- name: Delete example@example.com email identity
- aws_ses_identity:
- email: example@example.com
- state: absent
-
-- name: Ensure example.com domain identity exists
- aws_ses_identity:
- identity: example.com
- state: present
-
-# Create an SNS topic and send bounce and complaint notifications to it
-# instead of emailing the identity owner
-- name: Ensure complaints-topic exists
- sns_topic:
- name: "complaints-topic"
- state: present
- purge_subscriptions: False
- register: topic_info
-
-- name: Deliver feedback to topic instead of owner email
- aws_ses_identity:
- identity: example@example.com
- state: present
- complaint_notifications:
- topic: "{{ topic_info.sns_arn }}"
- include_headers: True
- bounce_notifications:
- topic: "{{ topic_info.sns_arn }}"
- include_headers: False
- feedback_forwarding: False
-
-# Create an SNS topic for delivery notifications and leave complaints
-# Being forwarded to the identity owner email
-- name: Ensure delivery-notifications-topic exists
- sns_topic:
- name: "delivery-notifications-topic"
- state: present
- purge_subscriptions: False
- register: topic_info
-
-- name: Delivery notifications to topic
- aws_ses_identity:
- identity: example@example.com
- state: present
- delivery_notifications:
- topic: "{{ topic_info.sns_arn }}"
-'''
-
-RETURN = '''
-identity:
- description: The identity being modified.
- returned: success
- type: str
- sample: example@example.com
-identity_arn:
- description: The arn of the identity being modified.
- returned: success
- type: str
- sample: arn:aws:ses:us-east-1:12345678:identity/example@example.com
-verification_attributes:
- description: The verification information for the identity.
- returned: success
- type: complex
- sample: {
- "verification_status": "Pending",
- "verification_token": "...."
- }
- contains:
- verification_status:
- description: The verification status of the identity.
- type: str
- sample: "Pending"
- verification_token:
- description: The verification token for a domain identity.
- type: str
-notification_attributes:
- description: The notification setup for the identity.
- returned: success
- type: complex
- sample: {
- "bounce_topic": "arn:aws:sns:....",
- "complaint_topic": "arn:aws:sns:....",
- "delivery_topic": "arn:aws:sns:....",
- "forwarding_enabled": false,
- "headers_in_bounce_notifications_enabled": true,
- "headers_in_complaint_notifications_enabled": true,
- "headers_in_delivery_notifications_enabled": true
- }
- contains:
- bounce_topic:
- description:
- - The ARN of the topic bounce notifications are delivered to.
- - Omitted if bounce notifications are not delivered to a topic.
- type: str
- complaint_topic:
- description:
- - The ARN of the topic complaint notifications are delivered to.
- - Omitted if complaint notifications are not delivered to a topic.
- type: str
- delivery_topic:
- description:
- - The ARN of the topic delivery notifications are delivered to.
- - Omitted if delivery notifications are not delivered to a topic.
- type: str
- forwarding_enabled:
- description: Whether or not feedback forwarding is enabled.
- type: bool
- headers_in_bounce_notifications_enabled:
- description: Whether or not headers are included in messages delivered to the bounce topic.
- type: bool
- headers_in_complaint_notifications_enabled:
- description: Whether or not headers are included in messages delivered to the complaint topic.
- type: bool
- headers_in_delivery_notifications_enabled:
- description: Whether or not headers are included in messages delivered to the delivery topic.
- type: bool
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, get_aws_connection_info
-
-import time
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10):
- # Unpredictably get_identity_verification_attributes doesn't include the identity even when we've
- # just registered it. Suspect this is an eventual consistency issue on AWS side.
- # Don't want this complexity exposed users of the module as they'd have to retry to ensure
- # a consistent return from the module.
- # To avoid this we have an internal retry that we use only after registering the identity.
- for attempt in range(0, retries + 1):
- try:
- response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to retrieve identity verification attributes for {identity}'.format(identity=identity))
- identity_verification = response['VerificationAttributes']
- if identity in identity_verification:
- break
- time.sleep(retryDelay)
- if identity not in identity_verification:
- return None
- return identity_verification[identity]
-
-
-def get_identity_notifications(connection, module, identity, retries=0, retryDelay=10):
- # Unpredictably get_identity_notifications doesn't include the notifications when we've
- # just registered the identity.
- # Don't want this complexity exposed users of the module as they'd have to retry to ensure
- # a consistent return from the module.
- # To avoid this we have an internal retry that we use only when getting the current notification
- # status for return.
- for attempt in range(0, retries + 1):
- try:
- response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to retrieve identity notification attributes for {identity}'.format(identity=identity))
- notification_attributes = response['NotificationAttributes']
-
- # No clear AWS docs on when this happens, but it appears sometimes identities are not included in
- # in the notification attributes when the identity is first registered. Suspect that this is caused by
- # eventual consistency within the AWS services. It's been observed in builds so we need to handle it.
- #
- # When this occurs, just return None and we'll assume no identity notification settings have been changed
- # from the default which is reasonable if this is just eventual consistency on creation.
- # See: https://github.com/ansible/ansible/issues/36065
- if identity in notification_attributes:
- break
- else:
- # Paranoia check for coding errors, we only requested one identity, so if we get a different one
- # something has gone very wrong.
- if len(notification_attributes) != 0:
- module.fail_json(
- msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format(
- identity,
- notification_attributes.keys(),
- )
- )
- time.sleep(retryDelay)
- if identity not in notification_attributes:
- return None
- return notification_attributes[identity]
-
-
-def desired_topic(module, notification_type):
- arg_dict = module.params.get(notification_type.lower() + '_notifications')
- if arg_dict:
- return arg_dict.get('topic', None)
- else:
- return None
-
-
-def update_notification_topic(connection, module, identity, identity_notifications, notification_type):
- topic_key = notification_type + 'Topic'
- if identity_notifications is None:
- # If there is no configuration for notifications cannot be being sent to topics
- # hence assume None as the current state.
- current = None
- elif topic_key in identity_notifications:
- current = identity_notifications[topic_key]
- else:
- # If there is information on the notifications setup but no information on the
- # particular notification topic it's pretty safe to assume there's no topic for
- # this notification. AWS API docs suggest this information will always be
- # included but best to be defensive
- current = None
-
- required = desired_topic(module, notification_type)
-
- if current != required:
- try:
- if not module.check_mode:
- connection.set_identity_notification_topic(Identity=identity, NotificationType=notification_type, SnsTopic=required, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to set identity notification topic for {identity} {notification_type}'.format(
- identity=identity,
- notification_type=notification_type,
- ))
- return True
- return False
-
-
-def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type):
- arg_dict = module.params.get(notification_type.lower() + '_notifications')
- header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
- if identity_notifications is None:
- # If there is no configuration for topic notifications, headers cannot be being
- # forwarded, hence assume false.
- current = False
- elif header_key in identity_notifications:
- current = identity_notifications[header_key]
- else:
- # AWS API doc indicates that the headers in fields are optional. Unfortunately
- # it's not clear on what this means. But it's a pretty safe assumption that it means
- # headers are not included since most API consumers would interpret absence as false.
- current = False
-
- if arg_dict is not None and 'include_headers' in arg_dict:
- required = arg_dict['include_headers']
- else:
- required = False
-
- if current != required:
- try:
- if not module.check_mode:
- connection.set_identity_headers_in_notifications_enabled(Identity=identity, NotificationType=notification_type, Enabled=required,
- aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to set identity headers in notification for {identity} {notification_type}'.format(
- identity=identity,
- notification_type=notification_type,
- ))
- return True
- return False
-
-
-def update_feedback_forwarding(connection, module, identity, identity_notifications):
- if identity_notifications is None:
- # AWS requires feedback forwarding to be enabled unless bounces and complaints
- # are being handled by SNS topics. So in the absence of identity_notifications
- # information existing feedback forwarding must be on.
- current = True
- elif 'ForwardingEnabled' in identity_notifications:
- current = identity_notifications['ForwardingEnabled']
- else:
- # If there is information on the notifications setup but no information on the
- # forwarding state it's pretty safe to assume forwarding is off. AWS API docs
- # suggest this information will always be included but best to be defensive
- current = False
-
- required = module.params.get('feedback_forwarding')
-
- if current != required:
- try:
- if not module.check_mode:
- connection.set_identity_feedback_forwarding_enabled(Identity=identity, ForwardingEnabled=required, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to set identity feedback forwarding for {identity}'.format(identity=identity))
- return True
- return False
-
-
-def create_mock_notifications_response(module):
- resp = {
- "ForwardingEnabled": module.params.get('feedback_forwarding'),
- }
- for notification_type in ('Bounce', 'Complaint', 'Delivery'):
- arg_dict = module.params.get(notification_type.lower() + '_notifications')
- if arg_dict is not None and 'topic' in arg_dict:
- resp[notification_type + 'Topic'] = arg_dict['topic']
-
- header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
- if arg_dict is not None and 'include_headers' in arg_dict:
- resp[header_key] = arg_dict['include_headers']
- else:
- resp[header_key] = False
- return resp
-
-
-def update_identity_notifications(connection, module):
- identity = module.params.get('identity')
- changed = False
- identity_notifications = get_identity_notifications(connection, module, identity)
-
- for notification_type in ('Bounce', 'Complaint', 'Delivery'):
- changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type)
- changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type)
-
- changed |= update_feedback_forwarding(connection, module, identity, identity_notifications)
-
- if changed or identity_notifications is None:
- if module.check_mode:
- identity_notifications = create_mock_notifications_response(module)
- else:
- identity_notifications = get_identity_notifications(connection, module, identity, retries=4)
- return changed, identity_notifications
-
-
-def validate_params_for_identity_present(module):
- if module.params.get('feedback_forwarding') is False:
- if not (desired_topic(module, 'Bounce') and desired_topic(module, 'Complaint')):
- module.fail_json(msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires "
- "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics")
-
-
-def create_or_update_identity(connection, module, region, account_id):
- identity = module.params.get('identity')
- changed = False
- verification_attributes = get_verification_attributes(connection, module, identity)
- if verification_attributes is None:
- try:
- if not module.check_mode:
- if '@' in identity:
- connection.verify_email_identity(EmailAddress=identity, aws_retry=True)
- else:
- connection.verify_domain_identity(Domain=identity, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to verify identity {identity}'.format(identity=identity))
- if module.check_mode:
- verification_attributes = {
- "VerificationStatus": "Pending",
- }
- else:
- verification_attributes = get_verification_attributes(connection, module, identity, retries=4)
- changed = True
- elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'):
- module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'],
- verification_attributes=camel_dict_to_snake_dict(verification_attributes))
-
- if verification_attributes is None:
- module.fail_json(msg='Unable to load identity verification attributes after registering identity.')
-
- notifications_changed, notification_attributes = update_identity_notifications(connection, module)
- changed |= notifications_changed
-
- if notification_attributes is None:
- module.fail_json(msg='Unable to load identity notification attributes.')
-
- identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity
-
- module.exit_json(
- changed=changed,
- identity=identity,
- identity_arn=identity_arn,
- verification_attributes=camel_dict_to_snake_dict(verification_attributes),
- notification_attributes=camel_dict_to_snake_dict(notification_attributes),
- )
-
-
-def destroy_identity(connection, module):
- identity = module.params.get('identity')
- changed = False
- verification_attributes = get_verification_attributes(connection, module, identity)
- if verification_attributes is not None:
- try:
- if not module.check_mode:
- connection.delete_identity(Identity=identity, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to delete identity {identity}'.format(identity=identity))
- changed = True
-
- module.exit_json(
- changed=changed,
- identity=identity,
- )
-
-
-def get_account_id(module):
- sts = module.client('sts')
- try:
- caller_identity = sts.get_caller_identity()
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to retrieve caller identity')
- return caller_identity['Account']
-
-
-def main():
- module = AnsibleAWSModule(
- argument_spec={
- "identity": dict(required=True, type='str'),
- "state": dict(default='present', choices=['present', 'absent']),
- "bounce_notifications": dict(type='dict'),
- "complaint_notifications": dict(type='dict'),
- "delivery_notifications": dict(type='dict'),
- "feedback_forwarding": dict(default=True, type='bool'),
- },
- supports_check_mode=True,
- )
-
- for notification_type in ('bounce', 'complaint', 'delivery'):
- param_name = notification_type + '_notifications'
- arg_dict = module.params.get(param_name)
- if arg_dict:
- extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')]
- if extra_keys:
- module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers')
-
- # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
- # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
- # the ansible build runs multiple instances of the test in parallel that's caused throttling
- # failures so apply a jittered backoff to call SES calls.
- connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
-
- state = module.params.get("state")
-
- if state == 'present':
- region = get_aws_connection_info(module, boto3=True)[0]
- account_id = get_account_id(module)
- validate_params_for_identity_present(module)
- create_or_update_identity(connection, module, region, account_id)
- else:
- destroy_identity(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_ses_identity_policy.py b/lib/ansible/modules/cloud/amazon/aws_ses_identity_policy.py
deleted file mode 100644
index 49e950e71c..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_ses_identity_policy.py
+++ /dev/null
@@ -1,201 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = '''
----
-module: aws_ses_identity_policy
-short_description: Manages SES sending authorization policies
-description:
- - This module allows the user to manage sending authorization policies associated with an SES identity (email or domain).
- - SES authorization sending policies can be used to control what actors are able to send email
- on behalf of the validated identity and what conditions must be met by the sent emails.
-version_added: "2.6"
-author: Ed Costello (@orthanc)
-
-options:
- identity:
- description: |
- The SES identity to attach or remove a policy from. This can be either the full ARN or just
- the verified email or domain.
- required: true
- type: str
- policy_name:
- description: The name used to identify the policy within the scope of the identity it's attached to.
- required: true
- type: str
- policy:
- description: A properly formatted JSON sending authorization policy. Required when I(state=present).
- type: json
- state:
- description: Whether to create(or update) or delete the authorization policy on the identity.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
-requirements: [ 'botocore', 'boto3' ]
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: add sending authorization policy to domain identity
- aws_ses_identity_policy:
- identity: example.com
- policy_name: ExamplePolicy
- policy: "{{ lookup('template', 'policy.json.j2') }}"
- state: present
-
-- name: add sending authorization policy to email identity
- aws_ses_identity_policy:
- identity: example@example.com
- policy_name: ExamplePolicy
- policy: "{{ lookup('template', 'policy.json.j2') }}"
- state: present
-
-- name: add sending authorization policy to identity using ARN
- aws_ses_identity_policy:
- identity: "arn:aws:ses:us-east-1:12345678:identity/example.com"
- policy_name: ExamplePolicy
- policy: "{{ lookup('template', 'policy.json.j2') }}"
- state: present
-
-- name: remove sending authorization policy
- aws_ses_identity_policy:
- identity: example.com
- policy_name: ExamplePolicy
- state: absent
-'''
-
-RETURN = '''
-policies:
- description: A list of all policies present on the identity after the operation.
- returned: success
- type: list
- sample: [ExamplePolicy]
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import compare_policies, AWSRetry
-
-import json
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def get_identity_policy(connection, module, identity, policy_name):
- try:
- response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to retrieve identity policy {policy}'.format(policy=policy_name))
- policies = response['Policies']
- if policy_name in policies:
- return policies[policy_name]
- return None
-
-
-def create_or_update_identity_policy(connection, module):
- identity = module.params.get('identity')
- policy_name = module.params.get('policy_name')
- required_policy = module.params.get('policy')
- required_policy_dict = json.loads(required_policy)
-
- changed = False
- policy = get_identity_policy(connection, module, identity, policy_name)
- policy_dict = json.loads(policy) if policy else None
- if compare_policies(policy_dict, required_policy_dict):
- changed = True
- try:
- if not module.check_mode:
- connection.put_identity_policy(Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to put identity policy {policy}'.format(policy=policy_name))
-
- # Load the list of applied policies to include in the response.
- # In principle we should be able to just return the response, but given
- # eventual consistency behaviours in AWS it's plausible that we could
- # end up with a list that doesn't contain the policy we just added.
- # So out of paranoia check for this case and if we're missing the policy
- # just make sure it's present.
- #
- # As a nice side benefit this also means the return is correct in check mode
- try:
- policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to list identity policies')
- if policy_name is not None and policy_name not in policies_present:
- policies_present = list(policies_present)
- policies_present.append(policy_name)
- module.exit_json(
- changed=changed,
- policies=policies_present,
- )
-
-
-def delete_identity_policy(connection, module):
- identity = module.params.get('identity')
- policy_name = module.params.get('policy_name')
-
- changed = False
- try:
- policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to list identity policies')
- if policy_name in policies_present:
- try:
- if not module.check_mode:
- connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to delete identity policy {policy}'.format(policy=policy_name))
- changed = True
- policies_present = list(policies_present)
- policies_present.remove(policy_name)
-
- module.exit_json(
- changed=changed,
- policies=policies_present,
- )
-
-
-def main():
- module = AnsibleAWSModule(
- argument_spec={
- 'identity': dict(required=True, type='str'),
- 'state': dict(default='present', choices=['present', 'absent']),
- 'policy_name': dict(required=True, type='str'),
- 'policy': dict(type='json', default=None),
- },
- required_if=[['state', 'present', ['policy']]],
- supports_check_mode=True,
- )
-
- # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
- # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
- # the ansible build runs multiple instances of the test in parallel that's caused throttling
- # failures so apply a jittered backoff to call SES calls.
- connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
-
- state = module.params.get("state")
-
- if state == 'present':
- create_or_update_identity_policy(connection, module)
- else:
- delete_identity_policy(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_ses_rule_set.py b/lib/ansible/modules/cloud/amazon/aws_ses_rule_set.py
deleted file mode 100644
index a22a4136f6..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_ses_rule_set.py
+++ /dev/null
@@ -1,254 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017, Ben Tomasik <ben@tomasik.io>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: aws_ses_rule_set
-short_description: Manages SES inbound receipt rule sets
-description:
- - The M(aws_ses_rule_set) module allows you to create, delete, and manage SES receipt rule sets
-version_added: 2.8
-author:
- - "Ben Tomasik (@tomislacker)"
- - "Ed Costello (@orthanc)"
-requirements: [ boto3, botocore ]
-options:
- name:
- description:
- - The name of the receipt rule set.
- required: True
- type: str
- state:
- description:
- - Whether to create (or update) or destroy the receipt rule set.
- required: False
- default: present
- choices: ["absent", "present"]
- type: str
- active:
- description:
- - Whether or not this rule set should be the active rule set. Only has an impact if I(state) is C(present).
- - If omitted, the active rule set will not be changed.
- - If C(True) then this rule set will be made active and all others inactive.
- - if C(False) then this rule set will be deactivated. Be careful with this as you can end up with no active rule set.
- type: bool
- required: False
- force:
- description:
- - When deleting a rule set, deactivate it first (AWS prevents deletion of the active rule set).
- type: bool
- required: False
- default: False
-extends_documentation_fragment:
- - aws
- - ec2
-"""
-
-EXAMPLES = """
-# Note: None of these examples set aws_access_key, aws_secret_key, or region.
-# It is assumed that their matching environment variables are set.
----
-- name: Create default rule set and activate it if not already
- aws_ses_rule_set:
- name: default-rule-set
- state: present
- active: yes
-
-- name: Create some arbitrary rule set but do not activate it
- aws_ses_rule_set:
- name: arbitrary-rule-set
- state: present
-
-- name: Explicitly deactivate the default rule set leaving no active rule set
- aws_ses_rule_set:
- name: default-rule-set
- state: present
- active: no
-
-- name: Remove an arbitrary inactive rule set
- aws_ses_rule_set:
- name: arbitrary-rule-set
- state: absent
-
-- name: Remove an ruleset even if we have to first deactivate it to remove it
- aws_ses_rule_set:
- name: default-rule-set
- state: absent
- force: yes
-"""
-
-RETURN = """
-active:
- description: if the SES rule set is active
- returned: success if I(state) is C(present)
- type: bool
- sample: true
-rule_sets:
- description: The list of SES receipt rule sets that exist after any changes.
- returned: success
- type: list
- sample: [{
- "created_timestamp": "2018-02-25T01:20:32.690000+00:00",
- "name": "default-rule-set"
- }]
-"""
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-
-def list_rule_sets(client, module):
- try:
- response = client.list_receipt_rule_sets(aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't list rule sets.")
- return response['RuleSets']
-
-
-def rule_set_in(name, rule_sets):
- return any([s for s in rule_sets if s['Name'] == name])
-
-
-def ruleset_active(client, module, name):
- try:
- active_rule_set = client.describe_active_receipt_rule_set(aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't get the active rule set.")
- if active_rule_set is not None and 'Metadata' in active_rule_set:
- return name == active_rule_set['Metadata']['Name']
- else:
- # Metadata was not set meaning there is no active rule set
- return False
-
-
-def deactivate_rule_set(client, module):
- try:
- # No ruleset name deactivates all rulesets
- client.set_active_receipt_rule_set(aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't set active rule set to None.")
-
-
-def update_active_rule_set(client, module, name, desired_active):
- check_mode = module.check_mode
-
- active = ruleset_active(client, module, name)
-
- changed = False
- if desired_active is not None:
- if desired_active and not active:
- if not check_mode:
- try:
- client.set_active_receipt_rule_set(RuleSetName=name, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't set active rule set to {0}.".format(name))
- changed = True
- active = True
- elif not desired_active and active:
- if not check_mode:
- deactivate_rule_set(client, module)
- changed = True
- active = False
- return changed, active
-
-
-def create_or_update_rule_set(client, module):
- name = module.params.get('name')
- check_mode = module.check_mode
- changed = False
-
- rule_sets = list_rule_sets(client, module)
- if not rule_set_in(name, rule_sets):
- if not check_mode:
- try:
- client.create_receipt_rule_set(RuleSetName=name, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't create rule set {0}.".format(name))
- changed = True
- rule_sets = list(rule_sets)
- rule_sets.append({
- 'Name': name,
- })
-
- (active_changed, active) = update_active_rule_set(client, module, name, module.params.get('active'))
- changed |= active_changed
-
- module.exit_json(
- changed=changed,
- active=active,
- rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets],
- )
-
-
-def remove_rule_set(client, module):
- name = module.params.get('name')
- check_mode = module.check_mode
- changed = False
-
- rule_sets = list_rule_sets(client, module)
- if rule_set_in(name, rule_sets):
- active = ruleset_active(client, module, name)
- if active and not module.params.get('force'):
- module.fail_json(
- msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format(name),
- error={
- "code": "CannotDelete",
- "message": "Cannot delete active rule set: {0}".format(name),
- }
- )
- if not check_mode:
- if active and module.params.get('force'):
- deactivate_rule_set(client, module)
- try:
- client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't delete rule set {0}.".format(name))
- changed = True
- rule_sets = [x for x in rule_sets if x['Name'] != name]
-
- module.exit_json(
- changed=changed,
- rule_sets=[camel_dict_to_snake_dict(x) for x in rule_sets],
- )
-
-
-def main():
- argument_spec = dict(
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- active=dict(type='bool'),
- force=dict(type='bool', default=False),
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
-
- state = module.params.get('state')
-
- # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
- # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
- # the ansible build runs multiple instances of the test in parallel that's caused throttling
- # failures so apply a jittered backoff to call SES calls.
- client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())
-
- if state == 'absent':
- remove_rule_set(client, module)
- else:
- create_or_update_rule_set(client, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_sgw_info.py b/lib/ansible/modules/cloud/amazon/aws_sgw_info.py
deleted file mode 100644
index fdb0a6dede..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_sgw_info.py
+++ /dev/null
@@ -1,361 +0,0 @@
-#!/usr/bin/python
-# Copyright: (c) 2018, Loic BLOT (@nerzhul) <loic.blot@unix-experience.fr>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# This module is sponsored by E.T.A.I. (www.etai.fr)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: aws_sgw_info
-short_description: Fetch AWS Storage Gateway information
-description:
- - Fetch AWS Storage Gateway information
- - This module was called C(aws_sgw_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.6"
-requirements: [ boto3 ]
-author: Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
-options:
- gather_local_disks:
- description:
- - Gather local disks attached to the storage gateway.
- type: bool
- required: false
- default: true
- gather_tapes:
- description:
- - Gather tape information for storage gateways in tape mode.
- type: bool
- required: false
- default: true
- gather_file_shares:
- description:
- - Gather file share information for storage gateways in s3 mode.
- type: bool
- required: false
- default: true
- gather_volumes:
- description:
- - Gather volume information for storage gateways in iSCSI (cached & stored) modes.
- type: bool
- required: false
- default: true
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-RETURN = '''
-gateways:
- description: list of gateway objects
- returned: always
- type: complex
- contains:
- gateway_arn:
- description: "Storage Gateway ARN"
- returned: always
- type: str
- sample: "arn:aws:storagegateway:eu-west-1:367709993819:gateway/sgw-9999F888"
- gateway_id:
- description: "Storage Gateway ID"
- returned: always
- type: str
- sample: "sgw-9999F888"
- gateway_name:
- description: "Storage Gateway friendly name"
- returned: always
- type: str
- sample: "my-sgw-01"
- gateway_operational_state:
- description: "Storage Gateway operational state"
- returned: always
- type: str
- sample: "ACTIVE"
- gateway_type:
- description: "Storage Gateway type"
- returned: always
- type: str
- sample: "FILE_S3"
- file_shares:
- description: "Storage gateway file shares"
- returned: when gateway_type == "FILE_S3"
- type: complex
- contains:
- file_share_arn:
- description: "File share ARN"
- returned: always
- type: str
- sample: "arn:aws:storagegateway:eu-west-1:399805793479:share/share-AF999C88"
- file_share_id:
- description: "File share ID"
- returned: always
- type: str
- sample: "share-AF999C88"
- file_share_status:
- description: "File share status"
- returned: always
- type: str
- sample: "AVAILABLE"
- tapes:
- description: "Storage Gateway tapes"
- returned: when gateway_type == "VTL"
- type: complex
- contains:
- tape_arn:
- description: "Tape ARN"
- returned: always
- type: str
- sample: "arn:aws:storagegateway:eu-west-1:399805793479:tape/tape-AF999C88"
- tape_barcode:
- description: "Tape ARN"
- returned: always
- type: str
- sample: "tape-AF999C88"
- tape_size_in_bytes:
- description: "Tape ARN"
- returned: always
- type: int
- sample: 555887569
- tape_status:
- description: "Tape ARN"
- returned: always
- type: str
- sample: "AVAILABLE"
- local_disks:
- description: "Storage gateway local disks"
- returned: always
- type: complex
- contains:
- disk_allocation_type:
- description: "Disk allocation type"
- returned: always
- type: str
- sample: "CACHE STORAGE"
- disk_id:
- description: "Disk ID on the system"
- returned: always
- type: str
- sample: "pci-0000:00:1f.0"
- disk_node:
- description: "Disk parent block device"
- returned: always
- type: str
- sample: "/dev/sdb"
- disk_path:
- description: "Disk path used for the cache"
- returned: always
- type: str
- sample: "/dev/nvme1n1"
- disk_size_in_bytes:
- description: "Disk size in bytes"
- returned: always
- type: int
- sample: 107374182400
- disk_status:
- description: "Disk status"
- returned: always
- type: str
- sample: "present"
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: "Get AWS storage gateway information"
- aws_sgw_info:
-
-- name: "Get AWS storage gateway information for region eu-west-3"
- aws_sgw_info:
- region: eu-west-3
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-class SGWInformationManager(object):
- def __init__(self, client, module):
- self.client = client
- self.module = module
- self.name = self.module.params.get('name')
-
- def fetch(self):
- gateways = self.list_gateways()
- for gateway in gateways:
- if self.module.params.get('gather_local_disks'):
- self.list_local_disks(gateway)
- # File share gateway
- if gateway["gateway_type"] == "FILE_S3" and self.module.params.get('gather_file_shares'):
- self.list_gateway_file_shares(gateway)
- # Volume tape gateway
- elif gateway["gateway_type"] == "VTL" and self.module.params.get('gather_tapes'):
- self.list_gateway_vtl(gateway)
- # iSCSI gateway
- elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get('gather_volumes'):
- self.list_gateway_volumes(gateway)
-
- self.module.exit_json(gateways=gateways)
-
- """
- List all storage gateways for the AWS endpoint.
- """
- def list_gateways(self):
- try:
- paginator = self.client.get_paginator('list_gateways')
- response = paginator.paginate(
- PaginationConfig={
- 'PageSize': 100,
- }
- ).build_full_result()
-
- gateways = []
- for gw in response["Gateways"]:
- gateways.append(camel_dict_to_snake_dict(gw))
-
- return gateways
-
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Couldn't list storage gateways")
-
- """
- Read file share objects from AWS API response.
- Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
- """
- @staticmethod
- def _read_gateway_fileshare_response(fileshares, aws_reponse):
- for share in aws_reponse["FileShareInfoList"]:
- share_obj = camel_dict_to_snake_dict(share)
- if "gateway_arn" in share_obj:
- del share_obj["gateway_arn"]
- fileshares.append(share_obj)
-
- return aws_reponse["NextMarker"] if "NextMarker" in aws_reponse else None
-
- """
- List file shares attached to AWS storage gateway when in S3 mode.
- """
- def list_gateway_file_shares(self, gateway):
- try:
- response = self.client.list_file_shares(
- GatewayARN=gateway["gateway_arn"],
- Limit=100
- )
-
- gateway["file_shares"] = []
- marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
-
- while marker is not None:
- response = self.client.list_file_shares(
- GatewayARN=gateway["gateway_arn"],
- Marker=marker,
- Limit=100
- )
-
- marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Couldn't list gateway file shares")
-
- """
- List storage gateway local disks
- """
- def list_local_disks(self, gateway):
- try:
- gateway['local_disks'] = [camel_dict_to_snake_dict(disk) for disk in
- self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])['Disks']]
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks")
-
- """
- Read tape objects from AWS API response.
- Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
- """
- @staticmethod
- def _read_gateway_tape_response(tapes, aws_response):
- for tape in aws_response["TapeInfos"]:
- tape_obj = camel_dict_to_snake_dict(tape)
- if "gateway_arn" in tape_obj:
- del tape_obj["gateway_arn"]
- tapes.append(tape_obj)
-
- return aws_response["Marker"] if "Marker" in aws_response else None
-
- """
- List VTL & VTS attached to AWS storage gateway in VTL mode
- """
- def list_gateway_vtl(self, gateway):
- try:
- response = self.client.list_tapes(
- Limit=100
- )
-
- gateway["tapes"] = []
- marker = self._read_gateway_tape_response(gateway["tapes"], response)
-
- while marker is not None:
- response = self.client.list_tapes(
- Marker=marker,
- Limit=100
- )
-
- marker = self._read_gateway_tape_response(gateway["tapes"], response)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Couldn't list storage gateway tapes")
-
- """
- List volumes attached to AWS storage gateway in CACHED or STORAGE mode
- """
- def list_gateway_volumes(self, gateway):
- try:
- paginator = self.client.get_paginator('list_volumes')
- response = paginator.paginate(
- GatewayARN=gateway["gateway_arn"],
- PaginationConfig={
- 'PageSize': 100,
- }
- ).build_full_result()
-
- gateway["volumes"] = []
- for volume in response["VolumeInfos"]:
- volume_obj = camel_dict_to_snake_dict(volume)
- if "gateway_arn" in volume_obj:
- del volume_obj["gateway_arn"]
- if "gateway_id" in volume_obj:
- del volume_obj["gateway_id"]
-
- gateway["volumes"].append(volume_obj)
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Couldn't list storage gateway volumes")
-
-
-def main():
- argument_spec = dict(
- gather_local_disks=dict(type='bool', default=True),
- gather_tapes=dict(type='bool', default=True),
- gather_file_shares=dict(type='bool', default=True),
- gather_volumes=dict(type='bool', default=True)
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
- if module._name == 'aws_sgw_facts':
- module.deprecate("The 'aws_sgw_facts' module has been renamed to 'aws_sgw_info'", version='2.13')
- client = module.client('storagegateway')
-
- if client is None: # this should never happen
- module.fail_json(msg='Unknown error, failed to create storagegateway client, no information from boto.')
-
- SGWInformationManager(client, module).fetch()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py b/lib/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py
deleted file mode 100644
index e63071d094..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py
+++ /dev/null
@@ -1,262 +0,0 @@
-#!/usr/bin/python
-# Copyright: (c) 2017, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'status': ['preview'],
- 'supported_by': 'community',
- 'metadata_version': '1.1'}
-
-DOCUMENTATION = '''
----
-module: aws_ssm_parameter_store
-short_description: Manage key-value pairs in aws parameter store.
-description:
- - Manage key-value pairs in aws parameter store.
-version_added: "2.5"
-options:
- name:
- description:
- - Parameter key name.
- required: true
- type: str
- description:
- description:
- - Parameter key description.
- required: false
- type: str
- value:
- description:
- - Parameter value.
- required: false
- type: str
- state:
- description:
- - Creates or modifies an existing parameter.
- - Deletes a parameter.
- required: false
- choices: ['present', 'absent']
- default: present
- type: str
- string_type:
- description:
- - Parameter String type.
- required: false
- choices: ['String', 'StringList', 'SecureString']
- default: String
- type: str
- decryption:
- description:
- - Work with SecureString type to get plain text secrets
- type: bool
- required: false
- default: true
- key_id:
- description:
- - AWS KMS key to decrypt the secrets.
- - The default key (C(alias/aws/ssm)) is automatically generated the first
- time it's requested.
- required: false
- default: alias/aws/ssm
- type: str
- overwrite_value:
- description:
- - Option to overwrite an existing value if it already exists.
- required: false
- version_added: "2.6"
- choices: ['never', 'changed', 'always']
- default: changed
- type: str
-author:
- - Nathan Webster (@nathanwebsterdotme)
- - Bill Wang (@ozbillwang) <ozbillwang@gmail.com>
- - Michael De La Rue (@mikedlr)
-extends_documentation_fragment:
- - aws
- - ec2
-requirements: [ botocore, boto3 ]
-'''
-
-EXAMPLES = '''
-- name: Create or update key/value pair in aws parameter store
- aws_ssm_parameter_store:
- name: "Hello"
- description: "This is your first key"
- value: "World"
-
-- name: Delete the key
- aws_ssm_parameter_store:
- name: "Hello"
- state: absent
-
-- name: Create or update secure key/value pair with default kms key (aws/ssm)
- aws_ssm_parameter_store:
- name: "Hello"
- description: "This is your first key"
- string_type: "SecureString"
- value: "World"
-
-- name: Create or update secure key/value pair with nominated kms key
- aws_ssm_parameter_store:
- name: "Hello"
- description: "This is your first key"
- string_type: "SecureString"
- key_id: "alias/demo"
- value: "World"
-
-- name: Always update a parameter store value and create a new version
- aws_ssm_parameter_store:
- name: "overwrite_example"
- description: "This example will always overwrite the value"
- string_type: "String"
- value: "Test1234"
- overwrite_value: "always"
-
-- name: recommend to use with aws_ssm lookup plugin
- debug: msg="{{ lookup('aws_ssm', 'hello') }}"
-'''
-
-RETURN = '''
-put_parameter:
- description: Add one or more parameters to the system.
- returned: success
- type: dict
-delete_parameter:
- description: Delete a parameter from the system.
- returned: success
- type: dict
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-
-try:
- from botocore.exceptions import ClientError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-
-def update_parameter(client, module, args):
- changed = False
- response = {}
-
- try:
- response = client.put_parameter(**args)
- changed = True
- except ClientError as e:
- module.fail_json_aws(e, msg="setting parameter")
-
- return changed, response
-
-
-def create_update_parameter(client, module):
- changed = False
- existing_parameter = None
- response = {}
-
- args = dict(
- Name=module.params.get('name'),
- Value=module.params.get('value'),
- Type=module.params.get('string_type')
- )
-
- if (module.params.get('overwrite_value') in ("always", "changed")):
- args.update(Overwrite=True)
- else:
- args.update(Overwrite=False)
-
- if module.params.get('description'):
- args.update(Description=module.params.get('description'))
-
- if module.params.get('string_type') == 'SecureString':
- args.update(KeyId=module.params.get('key_id'))
-
- try:
- existing_parameter = client.get_parameter(Name=args['Name'], WithDecryption=True)
- except Exception:
- pass
-
- if existing_parameter:
- if (module.params.get('overwrite_value') == 'always'):
-
- (changed, response) = update_parameter(client, module, args)
-
- elif (module.params.get('overwrite_value') == 'changed'):
- if existing_parameter['Parameter']['Type'] != args['Type']:
- (changed, response) = update_parameter(client, module, args)
-
- if existing_parameter['Parameter']['Value'] != args['Value']:
- (changed, response) = update_parameter(client, module, args)
-
- if args.get('Description'):
- # Description field not available from get_parameter function so get it from describe_parameters
- describe_existing_parameter = None
- try:
- describe_existing_parameter_paginator = client.get_paginator('describe_parameters')
- describe_existing_parameter = describe_existing_parameter_paginator.paginate(
- Filters=[{"Key": "Name", "Values": [args['Name']]}]).build_full_result()
-
- except ClientError as e:
- module.fail_json_aws(e, msg="getting description value")
-
- if describe_existing_parameter['Parameters'][0]['Description'] != args['Description']:
- (changed, response) = update_parameter(client, module, args)
- else:
- (changed, response) = update_parameter(client, module, args)
-
- return changed, response
-
-
-def delete_parameter(client, module):
- response = {}
-
- try:
- response = client.delete_parameter(
- Name=module.params.get('name')
- )
- except ClientError as e:
- if e.response['Error']['Code'] == 'ParameterNotFound':
- return False, {}
- module.fail_json_aws(e, msg="deleting parameter")
-
- return True, response
-
-
-def setup_client(module):
- connection = module.client('ssm')
- return connection
-
-
-def setup_module_object():
- argument_spec = dict(
- name=dict(required=True),
- description=dict(),
- value=dict(required=False, no_log=True),
- state=dict(default='present', choices=['present', 'absent']),
- string_type=dict(default='String', choices=['String', 'StringList', 'SecureString']),
- decryption=dict(default=True, type='bool'),
- key_id=dict(default="alias/aws/ssm"),
- overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']),
- )
-
- return AnsibleAWSModule(
- argument_spec=argument_spec,
- )
-
-
-def main():
- module = setup_module_object()
- state = module.params.get('state')
- client = setup_client(module)
-
- invocations = {
- "present": create_update_parameter,
- "absent": delete_parameter,
- }
- (changed, response) = invocations[state](client, module)
- module.exit_json(changed=changed, response=response)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_step_functions_state_machine.py b/lib/ansible/modules/cloud/amazon/aws_step_functions_state_machine.py
deleted file mode 100644
index 329ee4283d..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_step_functions_state_machine.py
+++ /dev/null
@@ -1,232 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2019, Tom De Keyser (@tdekeyser)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = '''
----
-module: aws_step_functions_state_machine
-
-short_description: Manage AWS Step Functions state machines
-
-version_added: "2.10"
-
-description:
- - Create, update and delete state machines in AWS Step Functions.
- - Calling the module in C(state=present) for an existing AWS Step Functions state machine
- will attempt to update the state machine definition, IAM Role, or tags with the provided data.
-
-options:
- name:
- description:
- - Name of the state machine
- required: true
- type: str
- definition:
- description:
- - The Amazon States Language definition of the state machine. See
- U(https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) for more
- information on the Amazon States Language.
- - "This parameter is required when C(state=present)."
- type: json
- role_arn:
- description:
- - The ARN of the IAM Role that will be used by the state machine for its executions.
- - "This parameter is required when C(state=present)."
- type: str
- state:
- description:
- - Desired state for the state machine
- default: present
- choices: [ present, absent ]
- type: str
- tags:
- description:
- - A hash/dictionary of tags to add to the new state machine or to add/remove from an existing one.
- type: dict
- purge_tags:
- description:
- - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter.
- If the I(tags) parameter is not set then tags will not be modified.
- default: yes
- type: bool
-
-extends_documentation_fragment:
- - aws
- - ec2
-
-author:
- - Tom De Keyser (@tdekeyser)
-'''
-
-EXAMPLES = '''
-# Create a new AWS Step Functions state machine
-- name: Setup HelloWorld state machine
- aws_step_functions_state_machine:
- name: "HelloWorldStateMachine"
- definition: "{{ lookup('file','state_machine.json') }}"
- role_arn: arn:aws:iam::987654321012:role/service-role/invokeLambdaStepFunctionsRole
- tags:
- project: helloWorld
-
-# Update an existing state machine
-- name: Change IAM Role and tags of HelloWorld state machine
- aws_step_functions_state_machine:
- name: HelloWorldStateMachine
- definition: "{{ lookup('file','state_machine.json') }}"
- role_arn: arn:aws:iam::987654321012:role/service-role/anotherStepFunctionsRole
- tags:
- otherTag: aDifferentTag
-
-# Remove the AWS Step Functions state machine
-- name: Delete HelloWorld state machine
- aws_step_functions_state_machine:
- name: HelloWorldStateMachine
- state: absent
-'''
-
-RETURN = '''
-state_machine_arn:
- description: ARN of the AWS Step Functions state machine
- type: str
- returned: always
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, compare_aws_tags, boto3_tag_list_to_ansible_dict
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def manage_state_machine(state, sfn_client, module):
- state_machine_arn = get_state_machine_arn(sfn_client, module)
-
- if state == 'present':
- if state_machine_arn is None:
- create(sfn_client, module)
- else:
- update(state_machine_arn, sfn_client, module)
- elif state == 'absent':
- if state_machine_arn is not None:
- remove(state_machine_arn, sfn_client, module)
-
- check_mode(module, msg='State is up-to-date.')
- module.exit_json(changed=False)
-
-
-def create(sfn_client, module):
- check_mode(module, msg='State machine would be created.', changed=True)
-
- tags = module.params.get('tags')
- sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') if tags else []
-
- state_machine = sfn_client.create_state_machine(
- name=module.params.get('name'),
- definition=module.params.get('definition'),
- roleArn=module.params.get('role_arn'),
- tags=sfn_tags
- )
- module.exit_json(changed=True, state_machine_arn=state_machine.get('stateMachineArn'))
-
-
-def remove(state_machine_arn, sfn_client, module):
- check_mode(module, msg='State machine would be deleted: {0}'.format(state_machine_arn), changed=True)
-
- sfn_client.delete_state_machine(stateMachineArn=state_machine_arn)
- module.exit_json(changed=True, state_machine_arn=state_machine_arn)
-
-
-def update(state_machine_arn, sfn_client, module):
- tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module)
-
- if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove:
- check_mode(module, msg='State machine would be updated: {0}'.format(state_machine_arn), changed=True)
-
- sfn_client.update_state_machine(
- stateMachineArn=state_machine_arn,
- definition=module.params.get('definition'),
- roleArn=module.params.get('role_arn')
- )
- sfn_client.untag_resource(
- resourceArn=state_machine_arn,
- tagKeys=tags_to_remove
- )
- sfn_client.tag_resource(
- resourceArn=state_machine_arn,
- tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name='key', tag_value_key_name='value')
- )
-
- module.exit_json(changed=True, state_machine_arn=state_machine_arn)
-
-
-def compare_tags(state_machine_arn, sfn_client, module):
- new_tags = module.params.get('tags')
- current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get('tags')
- return compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get('purge_tags'))
-
-
-def params_changed(state_machine_arn, sfn_client, module):
- """
- Check whether the state machine definition or IAM Role ARN is different
- from the existing state machine parameters.
- """
- current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn)
- return current.get('definition') != module.params.get('definition') or current.get('roleArn') != module.params.get('role_arn')
-
-
-def get_state_machine_arn(sfn_client, module):
- """
- Finds the state machine ARN based on the name parameter. Returns None if
- there is no state machine with this name.
- """
- target_name = module.params.get('name')
- all_state_machines = sfn_client.list_state_machines(aws_retry=True).get('stateMachines')
-
- for state_machine in all_state_machines:
- if state_machine.get('name') == target_name:
- return state_machine.get('stateMachineArn')
-
-
-def check_mode(module, msg='', changed=False):
- if module.check_mode:
- module.exit_json(changed=changed, output=msg)
-
-
-def main():
- module_args = dict(
- name=dict(type='str', required=True),
- definition=dict(type='json'),
- role_arn=dict(type='str'),
- state=dict(choices=['present', 'absent'], default='present'),
- tags=dict(default=None, type='dict'),
- purge_tags=dict(default=True, type='bool'),
- )
- module = AnsibleAWSModule(
- argument_spec=module_args,
- required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])],
- supports_check_mode=True
- )
-
- sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5))
- state = module.params.get('state')
-
- try:
- manage_state_machine(state, sfn_client, module)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to manage state machine')
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_step_functions_state_machine_execution.py b/lib/ansible/modules/cloud/amazon/aws_step_functions_state_machine_execution.py
deleted file mode 100644
index a6e0d7182d..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_step_functions_state_machine_execution.py
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2019, Prasad Katti (@prasadkatti)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = '''
----
-module: aws_step_functions_state_machine_execution
-
-short_description: Start or stop execution of an AWS Step Functions state machine.
-
-version_added: "2.10"
-
-description:
- - Start or stop execution of a state machine in AWS Step Functions.
-
-options:
- action:
- description: Desired action (start or stop) for a state machine execution.
- default: start
- choices: [ start, stop ]
- type: str
- name:
- description: Name of the execution.
- type: str
- execution_input:
- description: The JSON input data for the execution.
- type: json
- default: {}
- state_machine_arn:
- description: The ARN of the state machine that will be executed.
- type: str
- execution_arn:
- description: The ARN of the execution you wish to stop.
- type: str
- cause:
- description: A detailed explanation of the cause for stopping the execution.
- type: str
- default: ''
- error:
- description: The error code of the failure to pass in when stopping the execution.
- type: str
- default: ''
-
-extends_documentation_fragment:
- - aws
- - ec2
-
-author:
- - Prasad Katti (@prasadkatti)
-'''
-
-EXAMPLES = '''
-- name: Start an execution of a state machine
- aws_step_functions_state_machine_execution:
- name: an_execution_name
- execution_input: '{ "IsHelloWorldExample": true }'
- state_machine_arn: "arn:aws:states:us-west-2:682285639423:stateMachine:HelloWorldStateMachine"
-
-- name: Stop an execution of a state machine
- aws_step_functions_state_machine_execution:
- action: stop
- execution_arn: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
- cause: "cause of task failure"
- error: "error code of the failure"
-'''
-
-RETURN = '''
-execution_arn:
- description: ARN of the AWS Step Functions state machine execution.
- type: str
- returned: if action == start and changed == True
- sample: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
-start_date:
- description: The date the execution is started.
- type: str
- returned: if action == start and changed == True
- sample: "2019-11-02T22:39:49.071000-07:00"
-stop_date:
- description: The date the execution is stopped.
- type: str
- returned: if action == stop
- sample: "2019-11-02T22:39:49.071000-07:00"
-'''
-
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def start_execution(module, sfn_client):
- '''
- start_execution uses execution name to determine if a previous execution already exists.
- If an execution by the provided name exists, call client.start_execution will not be called.
- '''
-
- state_machine_arn = module.params.get('state_machine_arn')
- name = module.params.get('name')
- execution_input = module.params.get('execution_input')
-
- try:
- # list_executions is eventually consistent
- page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn)
-
- for execution in page_iterators.build_full_result()['executions']:
- if name == execution['name']:
- check_mode(module, msg='State machine execution already exists.', changed=False)
- module.exit_json(changed=False)
-
- check_mode(module, msg='State machine execution would be started.', changed=True)
- res_execution = sfn_client.start_execution(
- stateMachineArn=state_machine_arn,
- name=name,
- input=execution_input
- )
- except (ClientError, BotoCoreError) as e:
- if e.response['Error']['Code'] == 'ExecutionAlreadyExists':
- # this will never be executed anymore
- module.exit_json(changed=False)
- module.fail_json_aws(e, msg="Failed to start execution.")
-
- module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution))
-
-
-def stop_execution(module, sfn_client):
-
- cause = module.params.get('cause')
- error = module.params.get('error')
- execution_arn = module.params.get('execution_arn')
-
- try:
- # describe_execution is eventually consistent
- execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status']
- if execution_status != 'RUNNING':
- check_mode(module, msg='State machine execution is not running.', changed=False)
- module.exit_json(changed=False)
-
- check_mode(module, msg='State machine execution would be stopped.', changed=True)
- res = sfn_client.stop_execution(
- executionArn=execution_arn,
- cause=cause,
- error=error
- )
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to stop execution.")
-
- module.exit_json(changed=True, **camel_dict_to_snake_dict(res))
-
-
-def check_mode(module, msg='', changed=False):
- if module.check_mode:
- module.exit_json(changed=changed, output=msg)
-
-
-def main():
- module_args = dict(
- action=dict(choices=['start', 'stop'], default='start'),
- name=dict(type='str'),
- execution_input=dict(type='json', default={}),
- state_machine_arn=dict(type='str'),
- cause=dict(type='str', default=''),
- error=dict(type='str', default=''),
- execution_arn=dict(type='str')
- )
- module = AnsibleAWSModule(
- argument_spec=module_args,
- required_if=[('action', 'start', ['name', 'state_machine_arn']),
- ('action', 'stop', ['execution_arn']),
- ],
- supports_check_mode=True
- )
-
- sfn_client = module.client('stepfunctions')
-
- action = module.params.get('action')
- if action == "start":
- start_execution(module, sfn_client)
- else:
- stop_execution(module, sfn_client)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_waf_condition.py b/lib/ansible/modules/cloud/amazon/aws_waf_condition.py
deleted file mode 100644
index 79f891af8d..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_waf_condition.py
+++ /dev/null
@@ -1,736 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Will Thames
-# Copyright (c) 2015 Mike Mochan
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-module: aws_waf_condition
-short_description: Create and delete WAF Conditions
-description:
- - Read the AWS documentation for WAF
- U(https://aws.amazon.com/documentation/waf/)
-version_added: "2.5"
-
-author:
- - Will Thames (@willthames)
- - Mike Mochan (@mmochan)
-extends_documentation_fragment:
- - aws
- - ec2
-options:
- name:
- description: Name of the Web Application Firewall condition to manage.
- required: true
- type: str
- type:
- description: The type of matching to perform.
- choices:
- - byte
- - geo
- - ip
- - regex
- - size
- - sql
- - xss
- type: str
- required: true
- filters:
- description:
- - A list of the filters against which to match.
- - For I(type=byte), valid keys are I(field_to_match), I(position), I(header), I(transformation) and I(target_string).
- - For I(type=geo), the only valid key is I(country).
- - For I(type=ip), the only valid key is I(ip_address).
- - For I(type=regex), valid keys are I(field_to_match), I(transformation) and I(regex_pattern).
- - For I(type=size), valid keys are I(field_to_match), I(transformation), I(comparison) and I(size).
- - For I(type=sql), valid keys are I(field_to_match) and I(transformation).
- - For I(type=xss), valid keys are I(field_to_match) and I(transformation).
- - Required when I(state=present).
- type: list
- elements: dict
- suboptions:
- field_to_match:
- description:
- - The field upon which to perform the match.
- - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss).
- type: str
- choices: ['uri', 'query_string', 'header', 'method', 'body']
- position:
- description:
- - Where in the field the match needs to occur.
- - Only valid when I(type=byte).
- type: str
- choices: ['exactly', 'starts_with', 'ends_with', 'contains', 'contains_word']
- header:
- description:
- - Which specific header should be matched.
- - Required when I(field_to_match=header).
- - Valid when I(type=byte).
- type: str
- transformation:
- description:
- - A transform to apply on the field prior to performing the match.
- - Valid when I(type=byte), I(type=regex), I(type=sql) or I(type=xss).
- type: str
- choices: ['none', 'compress_white_space', 'html_entity_decode', 'lowercase', 'cmd_line', 'url_decode']
- country:
- description:
- - Value of geo constraint (typically a two letter country code).
- - The only valid key when I(type=geo).
- type: str
- ip_address:
- description:
- - An IP Address or CIDR to match.
- - The only valid key when I(type=ip).
- type: str
- regex_pattern:
- description:
- - A dict describing the regular expressions used to perform the match.
- - Only valid when I(type=regex).
- type: dict
- suboptions:
- name:
- description: A name to describe the set of patterns.
- type: str
- regex_strings:
- description: A list of regular expressions to match.
- type: list
- elements: str
- comparison:
- description:
- - What type of comparison to perform.
- - Only valid key when I(type=size).
- type: str
- choices: ['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']
- size:
- description:
- - The size of the field (in bytes).
- - Only valid key when I(type=size).
- type: int
- target_string:
- description:
- - The string to search for.
- - May be up to 50 bytes.
- - Valid when I(type=byte).
- type: str
- purge_filters:
- description:
- - Whether to remove existing filters from a condition if not passed in I(filters).
- default: false
- type: bool
- waf_regional:
- description: Whether to use waf-regional module.
- default: false
- required: no
- type: bool
- version_added: 2.9
- state:
- description: Whether the condition should be C(present) or C(absent).
- choices:
- - present
- - absent
- default: present
- type: str
-
-'''
-
-EXAMPLES = '''
- - name: create WAF byte condition
- aws_waf_condition:
- name: my_byte_condition
- filters:
- - field_to_match: header
- position: STARTS_WITH
- target_string: Hello
- header: Content-type
- type: byte
-
- - name: create WAF geo condition
- aws_waf_condition:
- name: my_geo_condition
- filters:
- - country: US
- - country: AU
- - country: AT
- type: geo
-
- - name: create IP address condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- filters:
- - ip_address: "10.0.0.0/8"
- - ip_address: "192.168.0.0/24"
- type: ip
-
- - name: create WAF regex condition
- aws_waf_condition:
- name: my_regex_condition
- filters:
- - field_to_match: query_string
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
-
- - name: create WAF size condition
- aws_waf_condition:
- name: my_size_condition
- filters:
- - field_to_match: query_string
- size: 300
- comparison: GT
- type: size
-
- - name: create WAF sql injection condition
- aws_waf_condition:
- name: my_sql_condition
- filters:
- - field_to_match: query_string
- transformation: url_decode
- type: sql
-
- - name: create WAF xss condition
- aws_waf_condition:
- name: my_xss_condition
- filters:
- - field_to_match: query_string
- transformation: url_decode
- type: xss
-
-'''
-
-RETURN = '''
-condition:
- description: Condition returned by operation.
- returned: always
- type: complex
- contains:
- condition_id:
- description: Type-agnostic ID for the condition.
- returned: when state is present
- type: str
- sample: dd74b1ff-8c06-4a4f-897a-6b23605de413
- byte_match_set_id:
- description: ID for byte match set.
- returned: always
- type: str
- sample: c4882c96-837b-44a2-a762-4ea87dbf812b
- byte_match_tuples:
- description: List of byte match tuples.
- returned: always
- type: complex
- contains:
- field_to_match:
- description: Field to match.
- returned: always
- type: complex
- contains:
- data:
- description: Which specific header (if type is header).
- type: str
- sample: content-type
- type:
- description: Type of field
- type: str
- sample: HEADER
- positional_constraint:
- description: Position in the field to match.
- type: str
- sample: STARTS_WITH
- target_string:
- description: String to look for.
- type: str
- sample: Hello
- text_transformation:
- description: Transformation to apply to the field before matching.
- type: str
- sample: NONE
- geo_match_constraints:
- description: List of geographical constraints.
- returned: when type is geo and state is present
- type: complex
- contains:
- type:
- description: Type of geo constraint.
- type: str
- sample: Country
- value:
- description: Value of geo constraint (typically a country code).
- type: str
- sample: AT
- geo_match_set_id:
- description: ID of the geo match set.
- returned: when type is geo and state is present
- type: str
- sample: dd74b1ff-8c06-4a4f-897a-6b23605de413
- ip_set_descriptors:
- description: list of IP address filters
- returned: when type is ip and state is present
- type: complex
- contains:
- type:
- description: Type of IP address (IPV4 or IPV6).
- returned: always
- type: str
- sample: IPV4
- value:
- description: IP address.
- returned: always
- type: str
- sample: 10.0.0.0/8
- ip_set_id:
- description: ID of condition.
- returned: when type is ip and state is present
- type: str
- sample: 78ad334a-3535-4036-85e6-8e11e745217b
- name:
- description: Name of condition.
- returned: when state is present
- type: str
- sample: my_waf_condition
- regex_match_set_id:
- description: ID of the regex match set.
- returned: when type is regex and state is present
- type: str
- sample: 5ea3f6a8-3cd3-488b-b637-17b79ce7089c
- regex_match_tuples:
- description: List of regex matches.
- returned: when type is regex and state is present
- type: complex
- contains:
- field_to_match:
- description: Field on which the regex match is applied.
- type: complex
- contains:
- type:
- description: The field name.
- returned: when type is regex and state is present
- type: str
- sample: QUERY_STRING
- regex_pattern_set_id:
- description: ID of the regex pattern.
- type: str
- sample: 6fdf7f2d-9091-445c-aef2-98f3c051ac9e
- text_transformation:
- description: transformation applied to the text before matching
- type: str
- sample: NONE
- size_constraint_set_id:
- description: ID of the size constraint set.
- returned: when type is size and state is present
- type: str
- sample: de84b4b3-578b-447e-a9a0-0db35c995656
- size_constraints:
- description: List of size constraints to apply.
- returned: when type is size and state is present
- type: complex
- contains:
- comparison_operator:
- description: Comparison operator to apply.
- type: str
- sample: GT
- field_to_match:
- description: Field on which the size constraint is applied.
- type: complex
- contains:
- type:
- description: Field name.
- type: str
- sample: QUERY_STRING
- size:
- description: Size to compare against the field.
- type: int
- sample: 300
- text_transformation:
- description: Transformation applied to the text before matching.
- type: str
- sample: NONE
- sql_injection_match_set_id:
- description: ID of the SQL injection match set.
- returned: when type is sql and state is present
- type: str
- sample: de84b4b3-578b-447e-a9a0-0db35c995656
- sql_injection_match_tuples:
- description: List of SQL injection match sets.
- returned: when type is sql and state is present
- type: complex
- contains:
- field_to_match:
- description: Field on which the SQL injection match is applied.
- type: complex
- contains:
- type:
- description: Field name.
- type: str
- sample: QUERY_STRING
- text_transformation:
- description: Transformation applied to the text before matching.
- type: str
- sample: URL_DECODE
- xss_match_set_id:
- description: ID of the XSS match set.
- returned: when type is xss and state is present
- type: str
- sample: de84b4b3-578b-447e-a9a0-0db35c995656
- xss_match_tuples:
- description: List of XSS match sets.
- returned: when type is xss and state is present
- type: complex
- contains:
- field_to_match:
- description: Field on which the XSS match is applied.
- type: complex
- contains:
- type:
- description: Field name
- type: str
- sample: QUERY_STRING
- text_transformation:
- description: transformation applied to the text before matching.
- type: str
- sample: URL_DECODE
-'''
-
-try:
- import botocore
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_policies
-from ansible.module_utils.aws.waf import run_func_with_change_token_backoff, MATCH_LOOKUP
-from ansible.module_utils.aws.waf import get_rule_with_backoff, list_rules_with_backoff, list_regional_rules_with_backoff
-
-
-class Condition(object):
-
- def __init__(self, client, module):
- self.client = client
- self.module = module
- self.type = module.params['type']
- self.method_suffix = MATCH_LOOKUP[self.type]['method']
- self.conditionset = MATCH_LOOKUP[self.type]['conditionset']
- self.conditionsets = MATCH_LOOKUP[self.type]['conditionset'] + 's'
- self.conditionsetid = MATCH_LOOKUP[self.type]['conditionset'] + 'Id'
- self.conditiontuple = MATCH_LOOKUP[self.type]['conditiontuple']
- self.conditiontuples = MATCH_LOOKUP[self.type]['conditiontuple'] + 's'
- self.conditiontype = MATCH_LOOKUP[self.type]['type']
-
- def format_for_update(self, condition_set_id):
- # Prep kwargs
- kwargs = dict()
- kwargs['Updates'] = list()
-
- for filtr in self.module.params.get('filters'):
- # Only for ip_set
- if self.type == 'ip':
- # there might be a better way of detecting an IPv6 address
- if ':' in filtr.get('ip_address'):
- ip_type = 'IPV6'
- else:
- ip_type = 'IPV4'
- condition_insert = {'Type': ip_type, 'Value': filtr.get('ip_address')}
-
- # Specific for geo_match_set
- if self.type == 'geo':
- condition_insert = dict(Type='Country', Value=filtr.get('country'))
-
- # Common For everything but ip_set and geo_match_set
- if self.type not in ('ip', 'geo'):
-
- condition_insert = dict(FieldToMatch=dict(Type=filtr.get('field_to_match').upper()),
- TextTransformation=filtr.get('transformation', 'none').upper())
-
- if filtr.get('field_to_match').upper() == "HEADER":
- if filtr.get('header'):
- condition_insert['FieldToMatch']['Data'] = filtr.get('header').lower()
- else:
- self.module.fail_json(msg=str("DATA required when HEADER requested"))
-
- # Specific for byte_match_set
- if self.type == 'byte':
- condition_insert['TargetString'] = filtr.get('target_string')
- condition_insert['PositionalConstraint'] = filtr.get('position')
-
- # Specific for size_constraint_set
- if self.type == 'size':
- condition_insert['ComparisonOperator'] = filtr.get('comparison')
- condition_insert['Size'] = filtr.get('size')
-
- # Specific for regex_match_set
- if self.type == 'regex':
- condition_insert['RegexPatternSetId'] = self.ensure_regex_pattern_present(filtr.get('regex_pattern'))['RegexPatternSetId']
-
- kwargs['Updates'].append({'Action': 'INSERT', self.conditiontuple: condition_insert})
-
- kwargs[self.conditionsetid] = condition_set_id
- return kwargs
-
- def format_for_deletion(self, condition):
- return {'Updates': [{'Action': 'DELETE', self.conditiontuple: current_condition_tuple}
- for current_condition_tuple in condition[self.conditiontuples]],
- self.conditionsetid: condition[self.conditionsetid]}
-
- @AWSRetry.exponential_backoff()
- def list_regex_patterns_with_backoff(self, **params):
- return self.client.list_regex_pattern_sets(**params)
-
- @AWSRetry.exponential_backoff()
- def get_regex_pattern_set_with_backoff(self, regex_pattern_set_id):
- return self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)
-
- def list_regex_patterns(self):
- # at time of writing(2017-11-20) no regex pattern paginator exists
- regex_patterns = []
- params = {}
- while True:
- try:
- response = self.list_regex_patterns_with_backoff(**params)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not list regex patterns')
- regex_patterns.extend(response['RegexPatternSets'])
- if 'NextMarker' in response:
- params['NextMarker'] = response['NextMarker']
- else:
- break
- return regex_patterns
-
- def get_regex_pattern_by_name(self, name):
- existing_regex_patterns = self.list_regex_patterns()
- regex_lookup = dict((item['Name'], item['RegexPatternSetId']) for item in existing_regex_patterns)
- if name in regex_lookup:
- return self.get_regex_pattern_set_with_backoff(regex_lookup[name])['RegexPatternSet']
- else:
- return None
-
- def ensure_regex_pattern_present(self, regex_pattern):
- name = regex_pattern['name']
-
- pattern_set = self.get_regex_pattern_by_name(name)
- if not pattern_set:
- pattern_set = run_func_with_change_token_backoff(self.client, self.module, {'Name': name},
- self.client.create_regex_pattern_set)['RegexPatternSet']
- missing = set(regex_pattern['regex_strings']) - set(pattern_set['RegexPatternStrings'])
- extra = set(pattern_set['RegexPatternStrings']) - set(regex_pattern['regex_strings'])
- if not missing and not extra:
- return pattern_set
- updates = [{'Action': 'INSERT', 'RegexPatternString': pattern} for pattern in missing]
- updates.extend([{'Action': 'DELETE', 'RegexPatternString': pattern} for pattern in extra])
- run_func_with_change_token_backoff(self.client, self.module,
- {'RegexPatternSetId': pattern_set['RegexPatternSetId'], 'Updates': updates},
- self.client.update_regex_pattern_set, wait=True)
- return self.get_regex_pattern_set_with_backoff(pattern_set['RegexPatternSetId'])['RegexPatternSet']
-
- def delete_unused_regex_pattern(self, regex_pattern_set_id):
- try:
- regex_pattern_set = self.client.get_regex_pattern_set(RegexPatternSetId=regex_pattern_set_id)['RegexPatternSet']
- updates = list()
- for regex_pattern_string in regex_pattern_set['RegexPatternStrings']:
- updates.append({'Action': 'DELETE', 'RegexPatternString': regex_pattern_string})
- run_func_with_change_token_backoff(self.client, self.module,
- {'RegexPatternSetId': regex_pattern_set_id, 'Updates': updates},
- self.client.update_regex_pattern_set)
-
- run_func_with_change_token_backoff(self.client, self.module,
- {'RegexPatternSetId': regex_pattern_set_id},
- self.client.delete_regex_pattern_set, wait=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- if e.response['Error']['Code'] == 'WAFNonexistentItemException':
- return
- self.module.fail_json_aws(e, msg='Could not delete regex pattern')
-
- def get_condition_by_name(self, name):
- all_conditions = [d for d in self.list_conditions() if d['Name'] == name]
- if all_conditions:
- return all_conditions[0][self.conditionsetid]
-
- @AWSRetry.exponential_backoff()
- def get_condition_by_id_with_backoff(self, condition_set_id):
- params = dict()
- params[self.conditionsetid] = condition_set_id
- func = getattr(self.client, 'get_' + self.method_suffix)
- return func(**params)[self.conditionset]
-
- def get_condition_by_id(self, condition_set_id):
- try:
- return self.get_condition_by_id_with_backoff(condition_set_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not get condition')
-
- def list_conditions(self):
- method = 'list_' + self.method_suffix + 's'
- try:
- paginator = self.client.get_paginator(method)
- func = paginator.paginate().build_full_result
- except botocore.exceptions.OperationNotPageableError:
- # list_geo_match_sets and list_regex_match_sets do not have a paginator
- func = getattr(self.client, method)
- try:
- return func()[self.conditionsets]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not list %s conditions' % self.type)
-
- def tidy_up_regex_patterns(self, regex_match_set):
- all_regex_match_sets = self.list_conditions()
- all_match_set_patterns = list()
- for rms in all_regex_match_sets:
- all_match_set_patterns.extend(conditiontuple['RegexPatternSetId']
- for conditiontuple in self.get_condition_by_id(rms[self.conditionsetid])[self.conditiontuples])
- for filtr in regex_match_set[self.conditiontuples]:
- if filtr['RegexPatternSetId'] not in all_match_set_patterns:
- self.delete_unused_regex_pattern(filtr['RegexPatternSetId'])
-
- def find_condition_in_rules(self, condition_set_id):
- rules_in_use = []
- try:
- if self.client.__class__.__name__ == 'WAF':
- all_rules = list_rules_with_backoff(self.client)
- elif self.client.__class__.__name__ == 'WAFRegional':
- all_rules = list_regional_rules_with_backoff(self.client)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not list rules')
- for rule in all_rules:
- try:
- rule_details = get_rule_with_backoff(self.client, rule['RuleId'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not get rule details')
- if condition_set_id in [predicate['DataId'] for predicate in rule_details['Predicates']]:
- rules_in_use.append(rule_details['Name'])
- return rules_in_use
-
- def find_and_delete_condition(self, condition_set_id):
- current_condition = self.get_condition_by_id(condition_set_id)
- in_use_rules = self.find_condition_in_rules(condition_set_id)
- if in_use_rules:
- rulenames = ', '.join(in_use_rules)
- self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition['Name'], rulenames))
- if current_condition[self.conditiontuples]:
- # Filters are deleted using update with the DELETE action
- func = getattr(self.client, 'update_' + self.method_suffix)
- params = self.format_for_deletion(current_condition)
- try:
- # We do not need to wait for the conditiontuple delete because we wait later for the delete_* call
- run_func_with_change_token_backoff(self.client, self.module, params, func)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not delete filters from condition')
- func = getattr(self.client, 'delete_' + self.method_suffix)
- params = dict()
- params[self.conditionsetid] = condition_set_id
- try:
- run_func_with_change_token_backoff(self.client, self.module, params, func, wait=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not delete condition')
- # tidy up regex patterns
- if self.type == 'regex':
- self.tidy_up_regex_patterns(current_condition)
- return True, {}
-
- def find_missing(self, update, current_condition):
- missing = []
- for desired in update['Updates']:
- found = False
- desired_condition = desired[self.conditiontuple]
- current_conditions = current_condition[self.conditiontuples]
- for condition in current_conditions:
- if not compare_policies(condition, desired_condition):
- found = True
- if not found:
- missing.append(desired)
- return missing
-
- def find_and_update_condition(self, condition_set_id):
- current_condition = self.get_condition_by_id(condition_set_id)
- update = self.format_for_update(condition_set_id)
- missing = self.find_missing(update, current_condition)
- if self.module.params.get('purge_filters'):
- extra = [{'Action': 'DELETE', self.conditiontuple: current_tuple}
- for current_tuple in current_condition[self.conditiontuples]
- if current_tuple not in [desired[self.conditiontuple] for desired in update['Updates']]]
- else:
- extra = []
- changed = bool(missing or extra)
- if changed:
- update['Updates'] = missing + extra
- func = getattr(self.client, 'update_' + self.method_suffix)
- try:
- result = run_func_with_change_token_backoff(self.client, self.module, update, func, wait=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not update condition')
- return changed, self.get_condition_by_id(condition_set_id)
-
- def ensure_condition_present(self):
- name = self.module.params['name']
- condition_set_id = self.get_condition_by_name(name)
- if condition_set_id:
- return self.find_and_update_condition(condition_set_id)
- else:
- params = dict()
- params['Name'] = name
- func = getattr(self.client, 'create_' + self.method_suffix)
- try:
- condition = run_func_with_change_token_backoff(self.client, self.module, params, func)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg='Could not create condition')
- return self.find_and_update_condition(condition[self.conditionset][self.conditionsetid])
-
- def ensure_condition_absent(self):
- condition_set_id = self.get_condition_by_name(self.module.params['name'])
- if condition_set_id:
- return self.find_and_delete_condition(condition_set_id)
- return False, {}
-
-
-def main():
- filters_subspec = dict(
- country=dict(),
- field_to_match=dict(choices=['uri', 'query_string', 'header', 'method', 'body']),
- header=dict(),
- transformation=dict(choices=['none', 'compress_white_space',
- 'html_entity_decode', 'lowercase',
- 'cmd_line', 'url_decode']),
- position=dict(choices=['exactly', 'starts_with', 'ends_with',
- 'contains', 'contains_word']),
- comparison=dict(choices=['EQ', 'NE', 'LE', 'LT', 'GE', 'GT']),
- target_string=dict(), # Bytes
- size=dict(type='int'),
- ip_address=dict(),
- regex_pattern=dict(),
- )
- argument_spec = dict(
- name=dict(required=True),
- type=dict(required=True, choices=['byte', 'geo', 'ip', 'regex', 'size', 'sql', 'xss']),
- filters=dict(type='list'),
- purge_filters=dict(type='bool', default=False),
- waf_regional=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent']),
- )
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[['state', 'present', ['filters']]])
- state = module.params.get('state')
-
- resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
- client = module.client(resource)
-
- condition = Condition(client, module)
-
- if state == 'present':
- (changed, results) = condition.ensure_condition_present()
- # return a condition agnostic ID for use by aws_waf_rule
- results['ConditionId'] = results[condition.conditionsetid]
- else:
- (changed, results) = condition.ensure_condition_absent()
-
- module.exit_json(changed=changed, condition=camel_dict_to_snake_dict(results))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_waf_info.py b/lib/ansible/modules/cloud/amazon/aws_waf_info.py
deleted file mode 100644
index 428c077e01..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_waf_info.py
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-module: aws_waf_info
-short_description: Retrieve information for WAF ACLs, Rule , Conditions and Filters.
-description:
- - Retrieve information for WAF ACLs, Rule , Conditions and Filters.
- - This module was called C(aws_waf_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.4"
-requirements: [ boto3 ]
-options:
- name:
- description:
- - The name of a Web Application Firewall.
- type: str
- waf_regional:
- description: Whether to use the waf-regional module.
- default: false
- required: no
- type: bool
- version_added: "2.9"
-
-author:
- - Mike Mochan (@mmochan)
- - Will Thames (@willthames)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: obtain all WAF information
- aws_waf_info:
-
-- name: obtain all information for a single WAF
- aws_waf_info:
- name: test_waf
-
-- name: obtain all information for a single WAF Regional
- aws_waf_info:
- name: test_waf
- waf_regional: true
-'''
-
-RETURN = '''
-wafs:
- description: The WAFs that match the passed arguments.
- returned: success
- type: complex
- contains:
- name:
- description: A friendly name or description of the WebACL.
- returned: always
- type: str
- sample: test_waf
- default_action:
- description: The action to perform if none of the Rules contained in the WebACL match.
- returned: always
- type: int
- sample: BLOCK
- metric_name:
- description: A friendly name or description for the metrics for this WebACL.
- returned: always
- type: str
- sample: test_waf_metric
- rules:
- description: An array that contains the action for each Rule in a WebACL , the priority of the Rule.
- returned: always
- type: complex
- contains:
- action:
- description: The action to perform if the Rule matches.
- returned: always
- type: str
- sample: BLOCK
- metric_name:
- description: A friendly name or description for the metrics for this Rule.
- returned: always
- type: str
- sample: ipblockrule
- name:
- description: A friendly name or description of the Rule.
- returned: always
- type: str
- sample: ip_block_rule
- predicates:
- description: The Predicates list contains a Predicate for each
- ByteMatchSet, IPSet, SizeConstraintSet, SqlInjectionMatchSet or XssMatchSet
- object in a Rule.
- returned: always
- type: list
- sample:
- [
- {
- "byte_match_set_id": "47b822b5-abcd-1234-faaf-1234567890",
- "byte_match_tuples": [
- {
- "field_to_match": {
- "type": "QUERY_STRING"
- },
- "positional_constraint": "STARTS_WITH",
- "target_string": "bobbins",
- "text_transformation": "NONE"
- }
- ],
- "name": "bobbins",
- "negated": false,
- "type": "ByteMatch"
- }
- ]
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.waf import list_web_acls, get_web_acl
-
-
-def main():
- argument_spec = dict(
- name=dict(required=False),
- waf_regional=dict(type='bool', default=False)
- )
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._name == 'aws_waf_facts':
- module.deprecate("The 'aws_waf_facts' module has been renamed to 'aws_waf_info'", version='2.13')
-
- resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
- client = module.client(resource)
- web_acls = list_web_acls(client, module)
- name = module.params['name']
- if name:
- web_acls = [web_acl for web_acl in web_acls if
- web_acl['Name'] == name]
- if not web_acls:
- module.fail_json(msg="WAF named %s not found" % name)
- module.exit_json(wafs=[get_web_acl(client, module, web_acl['WebACLId'])
- for web_acl in web_acls])
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_waf_rule.py b/lib/ansible/modules/cloud/amazon/aws_waf_rule.py
deleted file mode 100644
index 358c42c696..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_waf_rule.py
+++ /dev/null
@@ -1,355 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Will Thames
-# Copyright (c) 2015 Mike Mochan
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-module: aws_waf_rule
-short_description: Create and delete WAF Rules
-description:
- - Read the AWS documentation for WAF
- U(https://aws.amazon.com/documentation/waf/).
-version_added: "2.5"
-
-author:
- - Mike Mochan (@mmochan)
- - Will Thames (@willthames)
-extends_documentation_fragment:
- - aws
- - ec2
-options:
- name:
- description: Name of the Web Application Firewall rule.
- required: yes
- type: str
- metric_name:
- description:
- - A friendly name or description for the metrics for the rule.
- - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace.
- - You can't change I(metric_name) after you create the rule.
- - Defaults to the same as I(name) with disallowed characters removed.
- type: str
- state:
- description: Whether the rule should be present or absent.
- choices:
- - present
- - absent
- default: present
- type: str
- conditions:
- description: >
- List of conditions used in the rule. M(aws_waf_condition) can be used to
- create new conditions.
- type: list
- elements: dict
- suboptions:
- type:
- required: true
- type: str
- choices: ['byte','geo','ip','size','sql','xss']
- description: The type of rule to match.
- negated:
- required: true
- type: bool
- description: Whether the condition should be negated.
- condition:
- required: true
- type: str
- description: The name of the condition. The condition must already exist.
- purge_conditions:
- description:
- - Whether or not to remove conditions that are not passed when updating `conditions`.
- default: false
- type: bool
- waf_regional:
- description: Whether to use waf-regional module.
- default: false
- required: false
- type: bool
- version_added: "2.9"
-'''
-
-EXAMPLES = '''
-
- - name: create WAF rule
- aws_waf_rule:
- name: my_waf_rule
- conditions:
- - name: my_regex_condition
- type: regex
- negated: no
- - name: my_geo_condition
- type: geo
- negated: no
- - name: my_byte_condition
- type: byte
- negated: yes
-
- - name: remove WAF rule
- aws_waf_rule:
- name: "my_waf_rule"
- state: absent
-
-'''
-
-RETURN = '''
-rule:
- description: WAF rule contents
- returned: always
- type: complex
- contains:
- metric_name:
- description: Metric name for the rule.
- returned: always
- type: str
- sample: ansibletest1234rule
- name:
- description: Friendly name for the rule.
- returned: always
- type: str
- sample: ansible-test-1234_rule
- predicates:
- description: List of conditions used in the rule.
- returned: always
- type: complex
- contains:
- data_id:
- description: ID of the condition.
- returned: always
- type: str
- sample: 8251acdb-526c-42a8-92bc-d3d13e584166
- negated:
- description: Whether the sense of the condition is negated.
- returned: always
- type: bool
- sample: false
- type:
- description: type of the condition.
- returned: always
- type: str
- sample: ByteMatch
- rule_id:
- description: ID of the WAF rule.
- returned: always
- type: str
- sample: 15de0cbc-9204-4e1f-90e6-69b2f415c261
-'''
-
-import re
-
-try:
- import botocore
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible.module_utils.aws.waf import run_func_with_change_token_backoff, list_rules_with_backoff, list_regional_rules_with_backoff, MATCH_LOOKUP
-from ansible.module_utils.aws.waf import get_web_acl_with_backoff, list_web_acls_with_backoff, list_regional_web_acls_with_backoff
-
-
-def get_rule_by_name(client, module, name):
- rules = [d['RuleId'] for d in list_rules(client, module) if d['Name'] == name]
- if rules:
- return rules[0]
-
-
-def get_rule(client, module, rule_id):
- try:
- return client.get_rule(RuleId=rule_id)['Rule']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not get WAF rule')
-
-
-def list_rules(client, module):
- if client.__class__.__name__ == 'WAF':
- try:
- return list_rules_with_backoff(client)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list WAF rules')
- elif client.__class__.__name__ == 'WAFRegional':
- try:
- return list_regional_rules_with_backoff(client)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list WAF Regional rules')
-
-
-def list_regional_rules(client, module):
- try:
- return list_regional_rules_with_backoff(client)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list WAF rules')
-
-
-def find_and_update_rule(client, module, rule_id):
- rule = get_rule(client, module, rule_id)
- rule_id = rule['RuleId']
-
- existing_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
- desired_conditions = dict((condition_type, dict()) for condition_type in MATCH_LOOKUP)
- all_conditions = dict()
-
- for condition_type in MATCH_LOOKUP:
- method = 'list_' + MATCH_LOOKUP[condition_type]['method'] + 's'
- all_conditions[condition_type] = dict()
- try:
- paginator = client.get_paginator(method)
- func = paginator.paginate().build_full_result
- except (KeyError, botocore.exceptions.OperationNotPageableError):
- # list_geo_match_sets and list_regex_match_sets do not have a paginator
- # and throw different exceptions
- func = getattr(client, method)
- try:
- pred_results = func()[MATCH_LOOKUP[condition_type]['conditionset'] + 's']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list %s conditions' % condition_type)
- for pred in pred_results:
- pred['DataId'] = pred[MATCH_LOOKUP[condition_type]['conditionset'] + 'Id']
- all_conditions[condition_type][pred['Name']] = camel_dict_to_snake_dict(pred)
- all_conditions[condition_type][pred['DataId']] = camel_dict_to_snake_dict(pred)
-
- for condition in module.params['conditions']:
- desired_conditions[condition['type']][condition['name']] = condition
-
- reverse_condition_types = dict((v['type'], k) for (k, v) in MATCH_LOOKUP.items())
- for condition in rule['Predicates']:
- existing_conditions[reverse_condition_types[condition['Type']]][condition['DataId']] = camel_dict_to_snake_dict(condition)
-
- insertions = list()
- deletions = list()
-
- for condition_type in desired_conditions:
- for (condition_name, condition) in desired_conditions[condition_type].items():
- if condition_name not in all_conditions[condition_type]:
- module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type))
- condition['data_id'] = all_conditions[condition_type][condition_name]['data_id']
- if condition['data_id'] not in existing_conditions[condition_type]:
- insertions.append(format_for_insertion(condition))
-
- if module.params['purge_conditions']:
- for condition_type in existing_conditions:
- deletions.extend([format_for_deletion(condition) for condition in existing_conditions[condition_type].values()
- if not all_conditions[condition_type][condition['data_id']]['name'] in desired_conditions[condition_type]])
-
- changed = bool(insertions or deletions)
- update = {
- 'RuleId': rule_id,
- 'Updates': insertions + deletions
- }
- if changed:
- try:
- run_func_with_change_token_backoff(client, module, update, client.update_rule, wait=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not update rule conditions')
-
- return changed, get_rule(client, module, rule_id)
-
-
-def format_for_insertion(condition):
- return dict(Action='INSERT',
- Predicate=dict(Negated=condition['negated'],
- Type=MATCH_LOOKUP[condition['type']]['type'],
- DataId=condition['data_id']))
-
-
-def format_for_deletion(condition):
- return dict(Action='DELETE',
- Predicate=dict(Negated=condition['negated'],
- Type=condition['type'],
- DataId=condition['data_id']))
-
-
-def remove_rule_conditions(client, module, rule_id):
- conditions = get_rule(client, module, rule_id)['Predicates']
- updates = [format_for_deletion(camel_dict_to_snake_dict(condition)) for condition in conditions]
- try:
- run_func_with_change_token_backoff(client, module, {'RuleId': rule_id, 'Updates': updates}, client.update_rule)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not remove rule conditions')
-
-
-def ensure_rule_present(client, module):
- name = module.params['name']
- rule_id = get_rule_by_name(client, module, name)
- params = dict()
- if rule_id:
- return find_and_update_rule(client, module, rule_id)
- else:
- params['Name'] = module.params['name']
- metric_name = module.params['metric_name']
- if not metric_name:
- metric_name = re.sub(r'[^a-zA-Z0-9]', '', module.params['name'])
- params['MetricName'] = metric_name
- try:
- new_rule = run_func_with_change_token_backoff(client, module, params, client.create_rule)['Rule']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not create rule')
- return find_and_update_rule(client, module, new_rule['RuleId'])
-
-
-def find_rule_in_web_acls(client, module, rule_id):
- web_acls_in_use = []
- try:
- if client.__class__.__name__ == 'WAF':
- all_web_acls = list_web_acls_with_backoff(client)
- elif client.__class__.__name__ == 'WAFRegional':
- all_web_acls = list_regional_web_acls_with_backoff(client)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list Web ACLs')
- for web_acl in all_web_acls:
- try:
- web_acl_details = get_web_acl_with_backoff(client, web_acl['WebACLId'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not get Web ACL details')
- if rule_id in [rule['RuleId'] for rule in web_acl_details['Rules']]:
- web_acls_in_use.append(web_acl_details['Name'])
- return web_acls_in_use
-
-
-def ensure_rule_absent(client, module):
- rule_id = get_rule_by_name(client, module, module.params['name'])
- in_use_web_acls = find_rule_in_web_acls(client, module, rule_id)
- if in_use_web_acls:
- web_acl_names = ', '.join(in_use_web_acls)
- module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" %
- (module.params['name'], web_acl_names))
- if rule_id:
- remove_rule_conditions(client, module, rule_id)
- try:
- return True, run_func_with_change_token_backoff(client, module, {'RuleId': rule_id}, client.delete_rule, wait=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not delete rule')
- return False, {}
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- metric_name=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- conditions=dict(type='list'),
- purge_conditions=dict(type='bool', default=False),
- waf_regional=dict(type='bool', default=False),
- )
- module = AnsibleAWSModule(argument_spec=argument_spec)
- state = module.params.get('state')
-
- resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
- client = module.client(resource)
- if state == 'present':
- (changed, results) = ensure_rule_present(client, module)
- else:
- (changed, results) = ensure_rule_absent(client, module)
-
- module.exit_json(changed=changed, rule=camel_dict_to_snake_dict(results))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/aws_waf_web_acl.py b/lib/ansible/modules/cloud/amazon/aws_waf_web_acl.py
deleted file mode 100644
index d6cd4dea26..0000000000
--- a/lib/ansible/modules/cloud/amazon/aws_waf_web_acl.py
+++ /dev/null
@@ -1,359 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-module: aws_waf_web_acl
-short_description: Create and delete WAF Web ACLs.
-description:
- - Read the AWS documentation for WAF
- U(https://aws.amazon.com/documentation/waf/).
-version_added: "2.5"
-
-author:
- - Mike Mochan (@mmochan)
- - Will Thames (@willthames)
-extends_documentation_fragment:
- - aws
- - ec2
-options:
- name:
- description: Name of the Web Application Firewall ACL to manage.
- required: yes
- type: str
- default_action:
- description: The action that you want AWS WAF to take when a request doesn't
- match the criteria specified in any of the Rule objects that are associated with the WebACL.
- choices:
- - block
- - allow
- - count
- type: str
- state:
- description: Whether the Web ACL should be present or absent.
- choices:
- - present
- - absent
- default: present
- type: str
- metric_name:
- description:
- - A friendly name or description for the metrics for this WebACL.
- - The name can contain only alphanumeric characters (A-Z, a-z, 0-9); the name can't contain whitespace.
- - You can't change I(metric_name) after you create the WebACL.
- - Metric name will default to I(name) with disallowed characters stripped out.
- type: str
- rules:
- description:
- - A list of rules that the Web ACL will enforce.
- type: list
- elements: dict
- suboptions:
- name:
- description: Name of the rule.
- type: str
- required: true
- action:
- description: The action to perform.
- type: str
- required: true
- priority:
- description: The priority of the action. Priorities must be unique. Lower numbered priorities are evaluated first.
- type: int
- required: true
- type:
- description: The type of rule.
- choices:
- - rate_based
- - regular
- type: str
- purge_rules:
- description:
- - Whether to remove rules that aren't passed with I(rules).
- default: False
- type: bool
- waf_regional:
- description: Whether to use waf-regional module.
- default: false
- required: no
- type: bool
- version_added: "2.9"
-'''
-
-EXAMPLES = '''
- - name: create web ACL
- aws_waf_web_acl:
- name: my_web_acl
- rules:
- - name: my_rule
- priority: 1
- action: block
- default_action: block
- purge_rules: yes
- state: present
-
- - name: delete the web acl
- aws_waf_web_acl:
- name: my_web_acl
- state: absent
-'''
-
-RETURN = '''
-web_acl:
- description: contents of the Web ACL.
- returned: always
- type: complex
- contains:
- default_action:
- description: Default action taken by the Web ACL if no rules match.
- returned: always
- type: dict
- sample:
- type: BLOCK
- metric_name:
- description: Metric name used as an identifier.
- returned: always
- type: str
- sample: mywebacl
- name:
- description: Friendly name of the Web ACL.
- returned: always
- type: str
- sample: my web acl
- rules:
- description: List of rules.
- returned: always
- type: complex
- contains:
- action:
- description: Action taken by the WAF when the rule matches.
- returned: always
- type: complex
- sample:
- type: ALLOW
- priority:
- description: priority number of the rule (lower numbers are run first).
- returned: always
- type: int
- sample: 2
- rule_id:
- description: Rule ID.
- returned: always
- type: str
- sample: a6fc7ab5-287b-479f-8004-7fd0399daf75
- type:
- description: Type of rule (either REGULAR or RATE_BASED).
- returned: always
- type: str
- sample: REGULAR
- web_acl_id:
- description: Unique identifier of Web ACL.
- returned: always
- type: str
- sample: 10fff965-4b6b-46e2-9d78-24f6d2e2d21c
-'''
-
-try:
- import botocore
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-import re
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.waiters import get_waiter
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible.module_utils.aws.waf import list_rules_with_backoff, list_web_acls_with_backoff, list_regional_web_acls_with_backoff, \
- run_func_with_change_token_backoff, list_regional_rules_with_backoff
-
-
-def get_web_acl_by_name(client, module, name):
- acls = [d['WebACLId'] for d in list_web_acls(client, module) if d['Name'] == name]
- if acls:
- return acls[0]
- else:
- return acls
-
-
-def create_rule_lookup(client, module):
- if client.__class__.__name__ == 'WAF':
- try:
- rules = list_rules_with_backoff(client)
- return dict((rule['Name'], rule) for rule in rules)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list rules')
- elif client.__class__.__name__ == 'WAFRegional':
- try:
- rules = list_regional_rules_with_backoff(client)
- return dict((rule['Name'], rule) for rule in rules)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not list regional rules')
-
-
-def get_web_acl(client, module, web_acl_id):
- try:
- return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not get Web ACL with id %s' % web_acl_id)
-
-
-def list_web_acls(client, module,):
- if client.__class__.__name__ == 'WAF':
- try:
- return list_web_acls_with_backoff(client)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not get Web ACLs')
- elif client.__class__.__name__ == 'WAFRegional':
- try:
- return list_regional_web_acls_with_backoff(client)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not get Web ACLs')
-
-
-def find_and_update_web_acl(client, module, web_acl_id):
- acl = get_web_acl(client, module, web_acl_id)
- rule_lookup = create_rule_lookup(client, module)
- existing_rules = acl['Rules']
- desired_rules = [{'RuleId': rule_lookup[rule['name']]['RuleId'],
- 'Priority': rule['priority'],
- 'Action': {'Type': rule['action'].upper()},
- 'Type': rule.get('type', 'regular').upper()}
- for rule in module.params['rules']]
- missing = [rule for rule in desired_rules if rule not in existing_rules]
- extras = []
- if module.params['purge_rules']:
- extras = [rule for rule in existing_rules if rule not in desired_rules]
-
- insertions = [format_for_update(rule, 'INSERT') for rule in missing]
- deletions = [format_for_update(rule, 'DELETE') for rule in extras]
- changed = bool(insertions + deletions)
-
- # Purge rules before adding new ones in case a deletion shares the same
- # priority as an insertion.
- params = {
- 'WebACLId': acl['WebACLId'],
- 'DefaultAction': acl['DefaultAction']
- }
- change_tokens = []
- if deletions:
- try:
- params['Updates'] = deletions
- result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
- change_tokens.append(result['ChangeToken'])
- get_waiter(
- client, 'change_token_in_sync',
- ).wait(
- ChangeToken=result['ChangeToken']
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not update Web ACL')
- if insertions:
- try:
- params['Updates'] = insertions
- result = run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
- change_tokens.append(result['ChangeToken'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not update Web ACL')
- if change_tokens:
- for token in change_tokens:
- get_waiter(
- client, 'change_token_in_sync',
- ).wait(
- ChangeToken=token
- )
- if changed:
- acl = get_web_acl(client, module, web_acl_id)
- return changed, acl
-
-
-def format_for_update(rule, action):
- return dict(
- Action=action,
- ActivatedRule=dict(
- Priority=rule['Priority'],
- RuleId=rule['RuleId'],
- Action=dict(
- Type=rule['Action']['Type']
- )
- )
- )
-
-
-def remove_rules_from_web_acl(client, module, web_acl_id):
- acl = get_web_acl(client, module, web_acl_id)
- deletions = [format_for_update(rule, 'DELETE') for rule in acl['Rules']]
- try:
- params = {'WebACLId': acl['WebACLId'], 'DefaultAction': acl['DefaultAction'], 'Updates': deletions}
- run_func_with_change_token_backoff(client, module, params, client.update_web_acl)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not remove rule')
-
-
-def ensure_web_acl_present(client, module):
- changed = False
- result = None
- name = module.params['name']
- web_acl_id = get_web_acl_by_name(client, module, name)
- if web_acl_id:
- (changed, result) = find_and_update_web_acl(client, module, web_acl_id)
- else:
- metric_name = module.params['metric_name']
- if not metric_name:
- metric_name = re.sub(r'[^A-Za-z0-9]', '', module.params['name'])
- default_action = module.params['default_action'].upper()
- try:
- params = {'Name': name, 'MetricName': metric_name, 'DefaultAction': {'Type': default_action}}
- new_web_acl = run_func_with_change_token_backoff(client, module, params, client.create_web_acl)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not create Web ACL')
- (changed, result) = find_and_update_web_acl(client, module, new_web_acl['WebACL']['WebACLId'])
- return changed, result
-
-
-def ensure_web_acl_absent(client, module):
- web_acl_id = get_web_acl_by_name(client, module, module.params['name'])
- if web_acl_id:
- web_acl = get_web_acl(client, module, web_acl_id)
- if web_acl['Rules']:
- remove_rules_from_web_acl(client, module, web_acl_id)
- try:
- run_func_with_change_token_backoff(client, module, {'WebACLId': web_acl_id}, client.delete_web_acl, wait=True)
- return True, {}
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Could not delete Web ACL')
- return False, {}
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- default_action=dict(choices=['block', 'allow', 'count']),
- metric_name=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- rules=dict(type='list'),
- purge_rules=dict(type='bool', default=False),
- waf_regional=dict(type='bool', default=False)
- )
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[['state', 'present', ['default_action', 'rules']]])
- state = module.params.get('state')
-
- resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
- client = module.client(resource)
- if state == 'present':
- (changed, results) = ensure_web_acl_present(client, module)
- else:
- (changed, results) = ensure_web_acl_absent(client, module)
-
- module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudformation_exports_info.py b/lib/ansible/modules/cloud/amazon/cloudformation_exports_info.py
deleted file mode 100644
index a8f79774f0..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudformation_exports_info.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-module: cloudformation_exports_info
-short_description: Read a value from CloudFormation Exports
-description:
- - Module retrieves a value from CloudFormation Exports
-requirements: ['boto3 >= 1.11.15']
-version_added: "2.10"
-author:
- - "Michael Moyle (@mmoyle)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Get Exports
- cloudformation_exports_info:
- profile: 'my_aws_profile'
- region: 'my_region'
- register: cf_exports
-- debug:
- msg: "{{ cf_exports }}"
-'''
-
-RETURN = '''
-export_items:
- description: A dictionary of Exports items names and values.
- returned: Always
- type: dict
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import AWSRetry
-
-try:
- from botocore.exceptions import ClientError
- from botocore.exceptions import BotoCoreError
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-
-@AWSRetry.exponential_backoff()
-def list_exports(cloudformation_client):
- '''Get Exports Names and Values and return in dictionary '''
- list_exports_paginator = cloudformation_client.get_paginator('list_exports')
- exports = list_exports_paginator.paginate().build_full_result()['Exports']
- export_items = dict()
-
- for item in exports:
- export_items[item['Name']] = item['Value']
-
- return export_items
-
-
-def main():
- argument_spec = dict()
- result = dict(
- changed=False,
- original_message=''
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False)
- cloudformation_client = module.client('cloudformation')
-
- try:
- result['export_items'] = list_exports(cloudformation_client)
-
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e)
-
- result.update()
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudformation_stack_set.py b/lib/ansible/modules/cloud/amazon/cloudformation_stack_set.py
deleted file mode 100644
index ba6c2576f4..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudformation_stack_set.py
+++ /dev/null
@@ -1,724 +0,0 @@
-#!/usr/bin/python
-# Copyright: (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: cloudformation_stack_set
-short_description: Manage groups of CloudFormation stacks
-description:
- - Launches/updates/deletes AWS CloudFormation Stack Sets.
-notes:
- - To make an individual stack, you want the M(cloudformation) module.
-version_added: "2.7"
-options:
- name:
- description:
- - Name of the CloudFormation stack set.
- required: true
- type: str
- description:
- description:
- - A description of what this stack set creates.
- type: str
- parameters:
- description:
- - A list of hashes of all the template variables for the stack. The value can be a string or a dict.
- - Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
- default: {}
- type: dict
- state:
- description:
- - If I(state=present), stack will be created. If I(state=present) and if stack exists and template has changed, it will be updated.
- If I(state=absent), stack will be removed.
- default: present
- choices: [ present, absent ]
- type: str
- template:
- description:
- - The local path of the CloudFormation template.
- - This must be the full path to the file, relative to the working directory. If using roles this may look
- like C(roles/cloudformation/files/cloudformation-example.json).
- - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
- must be specified (but only one of them).
- - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
- are specified, the previous template will be reused.
- type: path
- template_body:
- description:
- - Template body. Use this to pass in the actual body of the CloudFormation template.
- - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
- must be specified (but only one of them).
- - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
- are specified, the previous template will be reused.
- type: str
- template_url:
- description:
- - Location of file containing the template body.
- - The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region
- as the stack.
- - If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
- must be specified (but only one of them).
- - If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
- are specified, the previous template will be reused.
- type: str
- purge_stacks:
- description:
- - Only applicable when I(state=absent). Sets whether, when deleting a stack set, the stack instances should also be deleted.
- - By default, instances will be deleted. To keep stacks when stack set is deleted set I(purge_stacks=false).
- type: bool
- default: true
- wait:
- description:
- - Whether or not to wait for stack operation to complete. This includes waiting for stack instances to reach UPDATE_COMPLETE status.
- - If you choose not to wait, this module will not notify when stack operations fail because it will not wait for them to finish.
- type: bool
- default: false
- wait_timeout:
- description:
- - How long to wait (in seconds) for stacks to complete create/update/delete operations.
- default: 900
- type: int
- capabilities:
- description:
- - Capabilities allow stacks to create and modify IAM resources, which may include adding users or roles.
- - Currently the only available values are 'CAPABILITY_IAM' and 'CAPABILITY_NAMED_IAM'. Either or both may be provided.
- - >
- The following resources require that one or both of these parameters is specified: AWS::IAM::AccessKey,
- AWS::IAM::Group, AWS::IAM::InstanceProfile, AWS::IAM::Policy, AWS::IAM::Role, AWS::IAM::User, AWS::IAM::UserToGroupAddition
- type: list
- elements: str
- choices:
- - 'CAPABILITY_IAM'
- - 'CAPABILITY_NAMED_IAM'
- regions:
- description:
- - A list of AWS regions to create instances of a stack in. The I(region) parameter chooses where the Stack Set is created, and I(regions)
- specifies the region for stack instances.
- - At least one region must be specified to create a stack set. On updates, if fewer regions are specified only the specified regions will
- have their stack instances updated.
- type: list
- elements: str
- accounts:
- description:
- - A list of AWS accounts in which to create instance of CloudFormation stacks.
- - At least one region must be specified to create a stack set. On updates, if fewer regions are specified only the specified regions will
- have their stack instances updated.
- type: list
- elements: str
- administration_role_arn:
- description:
- - ARN of the administration role, meaning the role that CloudFormation Stack Sets use to assume the roles in your child accounts.
- - This defaults to C(arn:aws:iam::{{ account ID }}:role/AWSCloudFormationStackSetAdministrationRole) where C({{ account ID }}) is replaced with the
- account number of the current IAM role/user/STS credentials.
- aliases:
- - admin_role_arn
- - admin_role
- - administration_role
- type: str
- execution_role_name:
- description:
- - ARN of the execution role, meaning the role that CloudFormation Stack Sets assumes in your child accounts.
- - This MUST NOT be an ARN, and the roles must exist in each child account specified.
- - The default name for the execution role is C(AWSCloudFormationStackSetExecutionRole)
- aliases:
- - exec_role_name
- - exec_role
- - execution_role
- type: str
- tags:
- description:
- - Dictionary of tags to associate with stack and its resources during stack creation.
- - Can be updated later, updating tags removes previous entries.
- type: dict
- failure_tolerance:
- description:
- - Settings to change what is considered "failed" when running stack instance updates, and how many to do at a time.
- type: dict
- suboptions:
- fail_count:
- description:
- - The number of accounts, per region, for which this operation can fail before CloudFormation
- stops the operation in that region.
- - You must specify one of I(fail_count) and I(fail_percentage).
- type: int
- fail_percentage:
- type: int
- description:
- - The percentage of accounts, per region, for which this stack operation can fail before CloudFormation
- stops the operation in that region.
- - You must specify one of I(fail_count) and I(fail_percentage).
- parallel_percentage:
- type: int
- description:
- - The maximum percentage of accounts in which to perform this operation at one time.
- - You must specify one of I(parallel_count) and I(parallel_percentage).
- - Note that this setting lets you specify the maximum for operations.
- For large deployments, under certain circumstances the actual percentage may be lower.
- parallel_count:
- type: int
- description:
- - The maximum number of accounts in which to perform this operation at one time.
- - I(parallel_count) may be at most one more than the I(fail_count).
- - You must specify one of I(parallel_count) and I(parallel_percentage).
- - Note that this setting lets you specify the maximum for operations.
- For large deployments, under certain circumstances the actual count may be lower.
-
-author: "Ryan Scott Brown (@ryansb)"
-extends_documentation_fragment:
-- aws
-- ec2
-requirements: [ boto3>=1.6, botocore>=1.10.26 ]
-'''
-
-EXAMPLES = '''
-- name: Create a stack set with instances in two accounts
- cloudformation_stack_set:
- name: my-stack
- description: Test stack in two accounts
- state: present
- template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
- accounts: [1234567890, 2345678901]
- regions:
- - us-east-1
-
-- name: on subsequent calls, templates are optional but parameters and tags can be altered
- cloudformation_stack_set:
- name: my-stack
- state: present
- parameters:
- InstanceName: my_stacked_instance
- tags:
- foo: bar
- test: stack
- accounts: [1234567890, 2345678901]
- regions:
- - us-east-1
-
-- name: The same type of update, but wait for the update to complete in all stacks
- cloudformation_stack_set:
- name: my-stack
- state: present
- wait: true
- parameters:
- InstanceName: my_restacked_instance
- tags:
- foo: bar
- test: stack
- accounts: [1234567890, 2345678901]
- regions:
- - us-east-1
-'''
-
-RETURN = '''
-operations_log:
- type: list
- description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
- returned: always
- sample:
- - action: CREATE
- creation_timestamp: '2018-06-18T17:40:46.372000+00:00'
- end_timestamp: '2018-06-18T17:41:24.560000+00:00'
- operation_id: Ansible-StackInstance-Create-0ff2af5b-251d-4fdb-8b89-1ee444eba8b8
- status: FAILED
- stack_instances:
- - account: '1234567890'
- region: us-east-1
- stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
- status: OUTDATED
- status_reason: Account 1234567890 should have 'AWSCloudFormationStackSetAdministrationRole' role with trust relationship to CloudFormation service.
-
-operations:
- description: All operations initiated by this run of the cloudformation_stack_set module
- returned: always
- type: list
- sample:
- - action: CREATE
- administration_role_arn: arn:aws:iam::1234567890:role/AWSCloudFormationStackSetAdministrationRole
- creation_timestamp: '2018-06-18T17:40:46.372000+00:00'
- end_timestamp: '2018-06-18T17:41:24.560000+00:00'
- execution_role_name: AWSCloudFormationStackSetExecutionRole
- operation_id: Ansible-StackInstance-Create-0ff2af5b-251d-4fdb-8b89-1ee444eba8b8
- operation_preferences:
- region_order:
- - us-east-1
- - us-east-2
- stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
- status: FAILED
-stack_instances:
- description: CloudFormation stack instances that are members of this stack set. This will also include their region and account ID.
- returned: state == present
- type: list
- sample:
- - account: '1234567890'
- region: us-east-1
- stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
- status: OUTDATED
- status_reason: >
- Account 1234567890 should have 'AWSCloudFormationStackSetAdministrationRole' role with trust relationship to CloudFormation service.
- - account: '1234567890'
- region: us-east-2
- stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
- status: OUTDATED
- status_reason: Cancelled since failure tolerance has exceeded
-stack_set:
- type: dict
- description: Facts about the currently deployed stack set, its parameters, and its tags
- returned: state == present
- sample:
- administration_role_arn: arn:aws:iam::1234567890:role/AWSCloudFormationStackSetAdministrationRole
- capabilities: []
- description: test stack PRIME
- execution_role_name: AWSCloudFormationStackSetExecutionRole
- parameters: []
- stack_set_arn: arn:aws:cloudformation:us-east-1:1234567890:stackset/TestStackPrime:19f3f684-aae9-467-ba36-e09f92cf5929
- stack_set_id: TestStackPrime:19f3f684-aae9-4e67-ba36-e09f92cf5929
- stack_set_name: TestStackPrime
- status: ACTIVE
- tags:
- Some: Thing
- an: other
- template_body: |
- AWSTemplateFormatVersion: "2010-09-09"
- Parameters: {}
- Resources:
- Bukkit:
- Type: "AWS::S3::Bucket"
- Properties: {}
- other:
- Type: "AWS::SNS::Topic"
- Properties: {}
-
-''' # NOQA
-
-import time
-import datetime
-import uuid
-import itertools
-
-try:
- import boto3
- import botocore.exceptions
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- # handled by AnsibleAWSModule
- pass
-
-from ansible.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, camel_dict_to_snake_dict
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils._text import to_native
-
-
-def create_stack_set(module, stack_params, cfn):
- try:
- cfn.create_stack_set(aws_retry=True, **stack_params)
- return await_stack_set_exists(cfn, stack_params['StackSetName'])
- except (ClientError, BotoCoreError) as err:
- module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get('StackSetName')))
-
-
-def update_stack_set(module, stack_params, cfn):
- # if the state is present and the stack already exists, we try to update it.
- # AWS will tell us if the stack template and parameters are the same and
- # don't need to be updated.
- try:
- cfn.update_stack_set(**stack_params)
- except is_boto3_error_code('StackSetNotFound') as err: # pylint: disable=duplicate-except
- module.fail_json_aws(err, msg="Failed to find stack set. Check the name & region.")
- except is_boto3_error_code('StackInstanceNotFound') as err: # pylint: disable=duplicate-except
- module.fail_json_aws(err, msg="One or more stack instances were not found for this stack set. Double check "
- "the `accounts` and `regions` parameters.")
- except is_boto3_error_code('OperationInProgressException') as err: # pylint: disable=duplicate-except
- module.fail_json_aws(
- err, msg="Another operation is already in progress on this stack set - please try again later. When making "
- "multiple cloudformation_stack_set calls, it's best to enable `wait: yes` to avoid unfinished op errors.")
- except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except
- module.fail_json_aws(err, msg="Could not update stack set.")
- if module.params.get('wait'):
- await_stack_set_operation(
- module, cfn, operation_id=stack_params['OperationId'],
- stack_set_name=stack_params['StackSetName'],
- max_wait=module.params.get('wait_timeout'),
- )
-
- return True
-
-
-def compare_stack_instances(cfn, stack_set_name, accounts, regions):
- instance_list = cfn.list_stack_instances(
- aws_retry=True,
- StackSetName=stack_set_name,
- )['Summaries']
- desired_stack_instances = set(itertools.product(accounts, regions))
- existing_stack_instances = set((i['Account'], i['Region']) for i in instance_list)
- # new stacks, existing stacks, unspecified stacks
- return (desired_stack_instances - existing_stack_instances), existing_stack_instances, (existing_stack_instances - desired_stack_instances)
-
-
-@AWSRetry.backoff(tries=3, delay=4)
-def stack_set_facts(cfn, stack_set_name):
- try:
- ss = cfn.describe_stack_set(StackSetName=stack_set_name)['StackSet']
- ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags'])
- return ss
- except cfn.exceptions.from_code('StackSetNotFound'):
- # Return None if the stack doesn't exist
- return
-
-
-def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wait):
- wait_start = datetime.datetime.now()
- operation = None
- for i in range(max_wait // 15):
- try:
- operation = cfn.describe_stack_set_operation(StackSetName=stack_set_name, OperationId=operation_id)
- if operation['StackSetOperation']['Status'] not in ('RUNNING', 'STOPPING'):
- # Stack set has completed operation
- break
- except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except
- pass
- except is_boto3_error_code('OperationNotFound'): # pylint: disable=duplicate-except
- pass
- time.sleep(15)
-
- if operation and operation['StackSetOperation']['Status'] not in ('FAILED', 'STOPPED'):
- await_stack_instance_completion(
- module, cfn,
- stack_set_name=stack_set_name,
- # subtract however long we waited already
- max_wait=int(max_wait - (datetime.datetime.now() - wait_start).total_seconds()),
- )
- elif operation and operation['StackSetOperation']['Status'] in ('FAILED', 'STOPPED'):
- pass
- else:
- module.warn(
- "Timed out waiting for operation {0} on stack set {1} after {2} seconds. Returning unfinished operation".format(
- operation_id, stack_set_name, max_wait
- )
- )
-
-
-def await_stack_instance_completion(module, cfn, stack_set_name, max_wait):
- to_await = None
- for i in range(max_wait // 15):
- try:
- stack_instances = cfn.list_stack_instances(StackSetName=stack_set_name)
- to_await = [inst for inst in stack_instances['Summaries']
- if inst['Status'] != 'CURRENT']
- if not to_await:
- return stack_instances['Summaries']
- except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except
- # this means the deletion beat us, or the stack set is not yet propagated
- pass
- time.sleep(15)
-
- module.warn(
- "Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation".format(
- stack_set_name, ', '.join(s['StackId'] for s in to_await), max_wait
- )
- )
-
-
-def await_stack_set_exists(cfn, stack_set_name):
- # AWSRetry will retry on `StackSetNotFound` errors for us
- ss = cfn.describe_stack_set(StackSetName=stack_set_name, aws_retry=True)['StackSet']
- ss['Tags'] = boto3_tag_list_to_ansible_dict(ss['Tags'])
- return camel_dict_to_snake_dict(ss, ignore_list=('Tags',))
-
-
-def describe_stack_tree(module, stack_set_name, operation_ids=None):
- jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=5, delay=3, max_delay=5, catch_extra_error_codes=['StackSetNotFound'])
- cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator)
- result = dict()
- result['stack_set'] = camel_dict_to_snake_dict(
- cfn.describe_stack_set(
- StackSetName=stack_set_name,
- aws_retry=True,
- )['StackSet']
- )
- result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict(result['stack_set']['tags'])
- result['operations_log'] = sorted(
- camel_dict_to_snake_dict(
- cfn.list_stack_set_operations(
- StackSetName=stack_set_name,
- aws_retry=True,
- )
- )['summaries'],
- key=lambda x: x['creation_timestamp']
- )
- result['stack_instances'] = sorted(
- [
- camel_dict_to_snake_dict(i) for i in
- cfn.list_stack_instances(StackSetName=stack_set_name)['Summaries']
- ],
- key=lambda i: i['region'] + i['account']
- )
-
- if operation_ids:
- result['operations'] = []
- for op_id in operation_ids:
- try:
- result['operations'].append(camel_dict_to_snake_dict(
- cfn.describe_stack_set_operation(
- StackSetName=stack_set_name,
- OperationId=op_id,
- )['StackSetOperation']
- ))
- except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except
- pass
- return result
-
-
-def get_operation_preferences(module):
- params = dict()
- if module.params.get('regions'):
- params['RegionOrder'] = list(module.params['regions'])
- for param, api_name in {
- 'fail_count': 'FailureToleranceCount',
- 'fail_percentage': 'FailureTolerancePercentage',
- 'parallel_percentage': 'MaxConcurrentPercentage',
- 'parallel_count': 'MaxConcurrentCount',
- }.items():
- if module.params.get('failure_tolerance', {}).get(param):
- params[api_name] = module.params.get('failure_tolerance', {}).get(param)
- return params
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- description=dict(),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=900),
- state=dict(default='present', choices=['present', 'absent']),
- purge_stacks=dict(type='bool', default=True),
- parameters=dict(type='dict', default={}),
- template=dict(type='path'),
- template_url=dict(),
- template_body=dict(),
- capabilities=dict(type='list', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']),
- regions=dict(type='list'),
- accounts=dict(type='list'),
- failure_tolerance=dict(
- type='dict',
- default={},
- options=dict(
- fail_count=dict(type='int'),
- fail_percentage=dict(type='int'),
- parallel_percentage=dict(type='int'),
- parallel_count=dict(type='int'),
- ),
- mutually_exclusive=[
- ['fail_count', 'fail_percentage'],
- ['parallel_count', 'parallel_percentage'],
- ],
- ),
- administration_role_arn=dict(aliases=['admin_role_arn', 'administration_role', 'admin_role']),
- execution_role_name=dict(aliases=['execution_role', 'exec_role', 'exec_role_name']),
- tags=dict(type='dict'),
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- mutually_exclusive=[['template_url', 'template', 'template_body']],
- supports_check_mode=True
- )
- if not (module.boto3_at_least('1.6.0') and module.botocore_at_least('1.10.26')):
- module.fail_json(msg="Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26")
-
- # Wrap the cloudformation client methods that this module uses with
- # automatic backoff / retry for throttling error codes
- jittered_backoff_decorator = AWSRetry.jittered_backoff(retries=10, delay=3, max_delay=30, catch_extra_error_codes=['StackSetNotFound'])
- cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator)
- existing_stack_set = stack_set_facts(cfn, module.params['name'])
-
- operation_uuid = to_native(uuid.uuid4())
- operation_ids = []
- # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
- stack_params = {}
- state = module.params['state']
- if state == 'present' and not module.params['accounts']:
- module.fail_json(
- msg="Can't create a stack set without choosing at least one account. "
- "To get the ID of the current account, use the aws_caller_info module."
- )
-
- module.params['accounts'] = [to_native(a) for a in module.params['accounts']]
-
- stack_params['StackSetName'] = module.params['name']
- if module.params.get('description'):
- stack_params['Description'] = module.params['description']
-
- if module.params.get('capabilities'):
- stack_params['Capabilities'] = module.params['capabilities']
-
- if module.params['template'] is not None:
- with open(module.params['template'], 'r') as tpl:
- stack_params['TemplateBody'] = tpl.read()
- elif module.params['template_body'] is not None:
- stack_params['TemplateBody'] = module.params['template_body']
- elif module.params['template_url'] is not None:
- stack_params['TemplateURL'] = module.params['template_url']
- else:
- # no template is provided, but if the stack set exists already, we can use the existing one.
- if existing_stack_set:
- stack_params['UsePreviousTemplate'] = True
- else:
- module.fail_json(
- msg="The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, "
- "`template_body`, or `template_url`".format(module.params['name'])
- )
-
- stack_params['Parameters'] = []
- for k, v in module.params['parameters'].items():
- if isinstance(v, dict):
- # set parameter based on a dict to allow additional CFN Parameter Attributes
- param = dict(ParameterKey=k)
-
- if 'value' in v:
- param['ParameterValue'] = to_native(v['value'])
-
- if 'use_previous_value' in v and bool(v['use_previous_value']):
- param['UsePreviousValue'] = True
- param.pop('ParameterValue', None)
-
- stack_params['Parameters'].append(param)
- else:
- # allow default k/v configuration to set a template parameter
- stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
-
- if module.params.get('tags') and isinstance(module.params.get('tags'), dict):
- stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
-
- if module.params.get('administration_role_arn'):
- # TODO loosen the semantics here to autodetect the account ID and build the ARN
- stack_params['AdministrationRoleARN'] = module.params['administration_role_arn']
- if module.params.get('execution_role_name'):
- stack_params['ExecutionRoleName'] = module.params['execution_role_name']
-
- result = {}
-
- if module.check_mode:
- if state == 'absent' and existing_stack_set:
- module.exit_json(changed=True, msg='Stack set would be deleted', meta=[])
- elif state == 'absent' and not existing_stack_set:
- module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[])
- elif state == 'present' and not existing_stack_set:
- module.exit_json(changed=True, msg='New stack set would be created', meta=[])
- elif state == 'present' and existing_stack_set:
- new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances(
- cfn,
- module.params['name'],
- module.params['accounts'],
- module.params['regions'],
- )
- if new_stacks:
- module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[])
- elif unspecified_stacks and module.params.get('purge_stack_instances'):
- module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[])
- else:
- # TODO: need to check the template and other settings for correct check mode
- module.exit_json(changed=False, msg='No changes detected', meta=[])
-
- changed = False
- if state == 'present':
- if not existing_stack_set:
- # on create this parameter has a different name, and cannot be referenced later in the job log
- stack_params['ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(operation_uuid)
- changed = True
- create_stack_set(module, stack_params, cfn)
- else:
- stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(operation_uuid)
- operation_ids.append(stack_params['OperationId'])
- if module.params.get('regions'):
- stack_params['OperationPreferences'] = get_operation_preferences(module)
- changed |= update_stack_set(module, stack_params, cfn)
-
- # now create/update any appropriate stack instances
- new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances(
- cfn,
- module.params['name'],
- module.params['accounts'],
- module.params['regions'],
- )
- if new_stack_instances:
- operation_ids.append('Ansible-StackInstance-Create-{0}'.format(operation_uuid))
- changed = True
- cfn.create_stack_instances(
- StackSetName=module.params['name'],
- Accounts=list(set(acct for acct, region in new_stack_instances)),
- Regions=list(set(region for acct, region in new_stack_instances)),
- OperationPreferences=get_operation_preferences(module),
- OperationId=operation_ids[-1],
- )
- else:
- operation_ids.append('Ansible-StackInstance-Update-{0}'.format(operation_uuid))
- cfn.update_stack_instances(
- StackSetName=module.params['name'],
- Accounts=list(set(acct for acct, region in existing_stack_instances)),
- Regions=list(set(region for acct, region in existing_stack_instances)),
- OperationPreferences=get_operation_preferences(module),
- OperationId=operation_ids[-1],
- )
- for op in operation_ids:
- await_stack_set_operation(
- module, cfn, operation_id=op,
- stack_set_name=module.params['name'],
- max_wait=module.params.get('wait_timeout'),
- )
-
- elif state == 'absent':
- if not existing_stack_set:
- module.exit_json(msg='Stack set {0} does not exist'.format(module.params['name']))
- if module.params.get('purge_stack_instances') is False:
- pass
- try:
- cfn.delete_stack_set(
- StackSetName=module.params['name'],
- )
- module.exit_json(msg='Stack set {0} deleted'.format(module.params['name']))
- except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Cannot delete stack {0} while there is an operation in progress'.format(module.params['name']))
- except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except
- delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(operation_uuid)
- cfn.delete_stack_instances(
- StackSetName=module.params['name'],
- Accounts=module.params['accounts'],
- Regions=module.params['regions'],
- RetainStacks=(not module.params.get('purge_stacks')),
- OperationId=delete_instances_op
- )
- await_stack_set_operation(
- module, cfn, operation_id=delete_instances_op,
- stack_set_name=stack_params['StackSetName'],
- max_wait=module.params.get('wait_timeout'),
- )
- try:
- cfn.delete_stack_set(
- StackSetName=module.params['name'],
- )
- except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except
- # this time, it is likely that either the delete failed or there are more stacks.
- instances = cfn.list_stack_instances(
- StackSetName=module.params['name'],
- )
- stack_states = ', '.join('(account={Account}, region={Region}, state={Status})'.format(**i) for i in instances['Summaries'])
- module.fail_json_aws(exc, msg='Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states)
- module.exit_json(changed=True, msg='Stack set {0} deleted'.format(module.params['name']))
-
- result.update(**describe_stack_tree(module, stack_params['StackSetName'], operation_ids=operation_ids))
- if any(o['status'] == 'FAILED' for o in result['operations']):
- module.fail_json(msg="One or more operations failed to execute", **result)
- module.exit_json(changed=changed, **result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudfront_distribution.py b/lib/ansible/modules/cloud/amazon/cloudfront_distribution.py
deleted file mode 100644
index a2d2514a35..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudfront_distribution.py
+++ /dev/null
@@ -1,2264 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-
-module: cloudfront_distribution
-
-short_description: Create, update and delete AWS CloudFront distributions.
-
-description:
- - Allows for easy creation, updating and deletion of CloudFront distributions.
-
-requirements:
- - boto3 >= 1.0.0
- - python >= 2.6
-
-version_added: "2.5"
-
-author:
- - Willem van Ketwich (@wilvk)
- - Will Thames (@willthames)
-
-extends_documentation_fragment:
- - aws
- - ec2
-
-options:
-
- state:
- description:
- - The desired state of the distribution.
- - I(state=present) creates a new distribution or updates an existing distribution.
- - I(state=absent) deletes an existing distribution.
- choices: ['present', 'absent']
- default: 'present'
- type: str
-
- distribution_id:
- description:
- - The ID of the CloudFront distribution.
- - This parameter can be exchanged with I(alias) or I(caller_reference) and is used in conjunction with I(e_tag).
- type: str
-
- e_tag:
- description:
- - A unique identifier of a modified or existing distribution. Used in conjunction with I(distribution_id).
- - Is determined automatically if not specified.
- type: str
-
- caller_reference:
- description:
- - A unique identifier for creating and updating CloudFront distributions.
- - Each caller reference must be unique across all distributions. e.g. a caller reference used in a web
- distribution cannot be reused in a streaming distribution. This parameter can be used instead of I(distribution_id)
- to reference an existing distribution. If not specified, this defaults to a datetime stamp of the format
- C(YYYY-MM-DDTHH:MM:SS.ffffff).
- type: str
-
- tags:
- description:
- - Should be input as a dict of key-value pairs.
- - Note that numeric keys or values must be wrapped in quotes. e.g. "Priority:" '1'
- type: dict
-
- purge_tags:
- description:
- - Specifies whether existing tags will be removed before adding new tags.
- - When I(purge_tags=yes), existing tags are removed and I(tags) are added, if specified.
- If no tags are specified, it removes all existing tags for the distribution.
- - When I(purge_tags=no), existing tags are kept and I(tags) are added, if specified.
- default: false
- type: bool
-
- alias:
- description:
- - The name of an alias (CNAME) that is used in a distribution. This is used to effectively reference a distribution by its alias as an alias can only
- be used by one distribution per AWS account. This variable avoids having to provide the I(distribution_id) as well as
- the I(e_tag), or I(caller_reference) of an existing distribution.
- type: str
-
- aliases:
- description:
- - A list) of domain name aliases (CNAMEs) as strings to be used for the distribution.
- - Each alias must be unique across all distribution for the AWS account.
- type: list
- elements: str
-
- purge_aliases:
- description:
- - Specifies whether existing aliases will be removed before adding new aliases.
- - When I(purge_aliases=yes), existing aliases are removed and I(aliases) are added.
- default: false
- type: bool
-
- default_root_object:
- description:
- - A config element that specifies the path to request when the user requests the origin.
- - e.g. if specified as 'index.html', this maps to www.example.com/index.html when www.example.com is called by the user.
- - This prevents the entire distribution origin from being exposed at the root.
- type: str
-
- default_origin_domain_name:
- description:
- - The domain name to use for an origin if no I(origins) have been specified.
- - Should only be used on a first run of generating a distribution and not on
- subsequent runs.
- - Should not be used in conjunction with I(distribution_id), I(caller_reference) or I(alias).
- type: str
-
- default_origin_path:
- description:
- - The default origin path to specify for an origin if no I(origins) have been specified. Defaults to empty if not specified.
- type: str
-
- origins:
- type: list
- elements: dict
- description:
- - A config element that is a list of complex origin objects to be specified for the distribution. Used for creating and updating distributions.
- suboptions:
- id:
- description: A unique identifier for the origin or origin group. I(id) must be unique within the distribution.
- type: str
- domain_name:
- description:
- - The domain name which CloudFront will query as the origin.
- - For more information see the CloudFront documentation
- at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesDomainName)
- type: str
- origin_path:
- description: Tells CloudFront to request your content from a directory in your Amazon S3 bucket or your custom origin.
- type: str
- custom_headers:
- description:
- - Custom headers you wish to add to the request before passing it to the origin.
- - For more information see the CloudFront documentation
- at U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/forward-custom-headers.html)
- type: list
- elements: dict
- suboptions:
- header_name:
- description: The name of a header that you want CloudFront to forward to your origin.
- type: str
- header_value:
- description: The value for the header that you specified in the I(header_name) field.
- type: str
- s3_origin_access_identity_enabled:
- description:
- - Use an origin access identity to configure the origin so that viewers can only access objects in an Amazon S3 bucket through CloudFront.
- - Will automatically create an Identity for you.
- - See also U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html).
- type: bool
- custom_origin_config:
- description: Connection information about the origin.
- type: dict
- suboptions:
- http_port:
- description: The HTTP port the custom origin listens on.
- type: int
- https_port:
- description: The HTTPS port the custom origin listens on.
- type: int
- origin_protocol_policy:
- description: The origin protocol policy to apply to your origin.
- type: str
- origin_ssl_protocols:
- description: A list of SSL/TLS protocols that you want CloudFront to use when communicating to the origin over HTTPS.
- type: list
- elements: str
- origin_read_timeout:
- description: A timeout (in seconds) when reading from your origin.
- type: int
- origin_keepalive_timeout:
- description: A keep-alive timeout (in seconds).
- type: int
-
- purge_origins:
- description: Whether to remove any origins that aren't listed in I(origins).
- default: false
- type: bool
-
- default_cache_behavior:
- type: dict
- description:
- - A dict specifying the default cache behavior of the distribution.
- - If not specified, the I(target_origin_id) is defined as the I(target_origin_id) of the first valid
- I(cache_behavior) in I(cache_behaviors) with defaults.
- suboptions:
- target_origin_id:
- description:
- - The ID of the origin that you want CloudFront to route requests to
- by default.
- type: str
- forwarded_values:
- description:
- - A dict that specifies how CloudFront handles query strings and cookies.
- type: dict
- suboptions:
- query_string:
- description:
- - Indicates whether you want CloudFront to forward query strings
- to the origin that is associated with this cache behavior.
- type: bool
- cookies:
- description: A dict that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones.
- type: dict
- suboptions:
- forward:
- description:
- - Specifies which cookies to forward to the origin for this cache behavior.
- - Valid values are C(all), C(none), or C(whitelist).
- type: str
- whitelisted_names:
- type: list
- elements: str
- description: A list of coockies to forward to the origin for this cache behavior.
- headers:
- description:
- - A list of headers to forward to the origin for this cache behavior.
- - To forward all headers use a list containing a single element '*' (C(['*']))
- type: list
- elements: str
- query_string_cache_keys:
- description:
- - A list that contains the query string parameters you want CloudFront to use as a basis for caching for a cache behavior.
- type: list
- elements: str
- trusted_signers:
- description:
- - A dict that specifies the AWS accounts that you want to allow to create signed URLs for private content.
- type: dict
- suboptions:
- enabled:
- description: Whether you want to require viewers to use signed URLs to access the files specified by I(target_origin_id)
- type: bool
- items:
- description: A list of trusted signers for this cache behavior.
- elements: str
- type: list
- viewer_protocol_policy:
- description:
- - The protocol that viewers can use to access the files in the origin specified by I(target_origin_id).
- - Valid values are C(allow-all), C(redirect-to-https) and C(https-only).
- type: str
- default_ttl:
- description: The default amount of time that you want objects to stay in CloudFront caches.
- type: int
- max_ttl:
- description: The maximum amount of time that you want objects to stay in CloudFront caches.
- type: int
- min_ttl:
- description: The minimum amount of time that you want objects to stay in CloudFront caches.
- type: int
- allowed_methods:
- description: A dict that controls which HTTP methods CloudFront processes and forwards.
- type: dict
- suboptions:
- items:
- description: A list of HTTP methods that you want CloudFront to process and forward.
- type: list
- elements: str
- cached_methods:
- description:
- - A list of HTTP methods that you want CloudFront to apply caching to.
- - This can either be C([GET,HEAD]), or C([GET,HEAD,OPTIONS]).
- type: list
- elements: str
- smooth_streaming:
- description:
- - Whether you want to distribute media files in the Microsoft Smooth Streaming format.
- type: bool
- compress:
- description:
- - Whether you want CloudFront to automatically compress files.
- type: bool
- lambda_function_associations:
- description:
- - A list of Lambda function associations to use for this cache behavior.
- type: list
- elements: dict
- suboptions:
- lambda_function_arn:
- description: The ARN of the Lambda function.
- type: str
- event_type:
- description:
- - Specifies the event type that triggers a Lambda function invocation.
- - This can be C(viewer-request), C(origin-request), C(origin-response) or C(viewer-response).
- type: str
- field_level_encryption_id:
- description:
- - The field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data.
- type: str
-
- cache_behaviors:
- type: list
- elements: dict
- description:
- - A list of dictionaries describing the cache behaviors for the distribution.
- - The order of the list is preserved across runs unless I(purge_cache_behaviors) is enabled.
- suboptions:
- path_pattern:
- description:
- - The pattern that specifies which requests to apply the behavior to.
- type: str
- target_origin_id:
- description:
- - The ID of the origin that you want CloudFront to route requests to
- by default.
- type: str
- forwarded_values:
- description:
- - A dict that specifies how CloudFront handles query strings and cookies.
- type: dict
- suboptions:
- query_string:
- description:
- - Indicates whether you want CloudFront to forward query strings
- to the origin that is associated with this cache behavior.
- type: bool
- cookies:
- description: A dict that specifies whether you want CloudFront to forward cookies to the origin and, if so, which ones.
- type: dict
- suboptions:
- forward:
- description:
- - Specifies which cookies to forward to the origin for this cache behavior.
- - Valid values are C(all), C(none), or C(whitelist).
- type: str
- whitelisted_names:
- type: list
- elements: str
- description: A list of coockies to forward to the origin for this cache behavior.
- headers:
- description:
- - A list of headers to forward to the origin for this cache behavior.
- - To forward all headers use a list containing a single element '*' (C(['*']))
- type: list
- elements: str
- query_string_cache_keys:
- description:
- - A list that contains the query string parameters you want CloudFront to use as a basis for caching for a cache behavior.
- type: list
- elements: str
- trusted_signers:
- description:
- - A dict that specifies the AWS accounts that you want to allow to create signed URLs for private content.
- type: dict
- suboptions:
- enabled:
- description: Whether you want to require viewers to use signed URLs to access the files specified by I(path_pattern) and I(target_origin_id)
- type: bool
- items:
- description: A list of trusted signers for this cache behavior.
- elements: str
- type: list
- viewer_protocol_policy:
- description:
- - The protocol that viewers can use to access the files in the origin specified by I(target_origin_id) when a request matches I(path_pattern).
- - Valid values are C(allow-all), C(redirect-to-https) and C(https-only).
- type: str
- default_ttl:
- description: The default amount of time that you want objects to stay in CloudFront caches.
- type: int
- max_ttl:
- description: The maximum amount of time that you want objects to stay in CloudFront caches.
- type: int
- min_ttl:
- description: The minimum amount of time that you want objects to stay in CloudFront caches.
- type: int
- allowed_methods:
- description: A dict that controls which HTTP methods CloudFront processes and forwards.
- type: dict
- suboptions:
- items:
- description: A list of HTTP methods that you want CloudFront to process and forward.
- type: list
- elements: str
- cached_methods:
- description:
- - A list of HTTP methods that you want CloudFront to apply caching to.
- - This can either be C([GET,HEAD]), or C([GET,HEAD,OPTIONS]).
- type: list
- elements: str
- smooth_streaming:
- description:
- - Whether you want to distribute media files in the Microsoft Smooth Streaming format.
- type: bool
- compress:
- description:
- - Whether you want CloudFront to automatically compress files.
- type: bool
- lambda_function_associations:
- description:
- - A list of Lambda function associations to use for this cache behavior.
- type: list
- elements: dict
- suboptions:
- lambda_function_arn:
- description: The ARN of the Lambda function.
- type: str
- event_type:
- description:
- - Specifies the event type that triggers a Lambda function invocation.
- - This can be C(viewer-request), C(origin-request), C(origin-response) or C(viewer-response).
- type: str
- field_level_encryption_id:
- description:
- - The field-level encryption configuration that you want CloudFront to use for encrypting specific fields of data.
- type: str
-
-
- purge_cache_behaviors:
- description:
- - Whether to remove any cache behaviors that aren't listed in I(cache_behaviors).
- - This switch also allows the reordering of I(cache_behaviors).
- default: false
- type: bool
-
- custom_error_responses:
- type: list
- elements: dict
- description:
- - A config element that is a I(list[]) of complex custom error responses to be specified for the distribution.
- - This attribute configures custom http error messages returned to the user.
- suboptions:
- error_code:
- type: int
- description: The error code the custom error page is for.
- error_caching_min_ttl:
- type: int
- description: The length of time (in seconds) that CloudFront will cache status codes for.
- response_code:
- type: int
- description:
- - The HTTP status code that CloudFront should return to a user when the origin returns the HTTP status code specified by I(error_code).
- response_page_path:
- type: str
- description:
- - The path to the custom error page that you want CloudFront to return to a viewer when your origin returns
- the HTTP status code specified by I(error_code).
-
- purge_custom_error_responses:
- description: Whether to remove any custom error responses that aren't listed in I(custom_error_responses).
- default: false
- type: bool
-
- comment:
- description:
- - A comment that describes the CloudFront distribution.
- - If not specified, it defaults to a generic message that it has been created with Ansible, and a datetime stamp.
- type: str
-
- logging:
- description:
- - A config element that is a complex object that defines logging for the distribution.
- suboptions:
- enabled:
- description: When I(enabled=true) CloudFront will log access to an S3 bucket.
- type: bool
- include_cookies:
- description: When I(include_cookies=true) CloudFront will include cookies in the logs.
- type: bool
- bucket:
- description: The S3 bucket to store the log in.
- type: str
- prefix:
- description: A prefix to include in the S3 object names.
- type: str
- type: dict
-
- price_class:
- description:
- - A string that specifies the pricing class of the distribution. As per
- U(https://aws.amazon.com/cloudfront/pricing/)
- - I(price_class=PriceClass_100) consists of the areas United States, Canada and Europe.
- - I(price_class=PriceClass_200) consists of the areas United States, Canada, Europe, Japan, India,
- Hong Kong, Philippines, S. Korea, Singapore & Taiwan.
- - I(price_class=PriceClass_All) consists of the areas United States, Canada, Europe, Japan, India,
- South America, Australia, Hong Kong, Philippines, S. Korea, Singapore & Taiwan.
- - AWS defaults this to C(PriceClass_All).
- - Valid values are C(PriceClass_100), C(PriceClass_200) and C(PriceClass_All)
- type: str
-
- enabled:
- description:
- - A boolean value that specifies whether the distribution is enabled or disabled.
- default: false
- type: bool
-
- viewer_certificate:
- type: dict
- description:
- - A dict that specifies the encryption details of the distribution.
- suboptions:
- cloudfront_default_certificate:
- type: bool
- description:
- - If you're using the CloudFront domain name for your distribution, such as C(123456789abcde.cloudfront.net)
- you should set I(cloudfront_default_certificate=true)
- - If I(cloudfront_default_certificate=true) do not set I(ssl_support_method).
- iam_certificate_id:
- type: str
- description:
- - The ID of a certificate stored in IAM to use for HTTPS connections.
- - If I(iam_certificate_id) is set then you must also specify I(ssl_support_method)
- acm_certificate_arn:
- type: str
- description:
- - The ID of a certificate stored in ACM to use for HTTPS connections.
- - If I(acm_certificate_id) is set then you must also specify I(ssl_support_method)
- ssl_support_method:
- type: str
- description:
- - How CloudFront should serve SSL certificates.
- - Valid values are C(sni-only) for SNI, and C(vip) if CloudFront is configured to use a dedicated IP for your content.
- minimum_protocol_version:
- type: str
- description:
- - The security policy that you want CloudFront to use for HTTPS connections.
- - See U(https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html)
- for supported security policies.
-
- restrictions:
- type: dict
- description:
- - A config element that is a complex object that describes how a distribution should restrict it's content.
- suboptions:
- geo_restriction:
- description: Apply a restriciton based on the location of the requester.
- type: dict
- suboptions:
- restriction_type:
- type: str
- description:
- - The method that you want to use to restrict distribution of your content by country.
- - Valid values are C(none), C(whitelist), C(blacklist)
- items:
- description:
- - A list of ISO 3166-1 two letter (Alpha 2) country codes that the
- restriction should apply to.
- - 'See the ISO website for a full list of codes U(https://www.iso.org/obp/ui/#search/code/)'
- type: list
-
- web_acl_id:
- description:
- - The ID of a Web Application Firewall (WAF) Access Control List (ACL).
- type: str
-
- http_version:
- description:
- - The version of the http protocol to use for the distribution.
- - AWS defaults this to C(http2).
- - Valid values are C(http1.1) and C(http2)
- type: str
-
- ipv6_enabled:
- description:
- - Determines whether IPv6 support is enabled or not.
- type: bool
- default: false
-
- wait:
- description:
- - Specifies whether the module waits until the distribution has completed processing the creation or update.
- type: bool
- default: false
-
- wait_timeout:
- description:
- - Specifies the duration in seconds to wait for a timeout of a cloudfront create or update.
- default: 1800
- type: int
-
-'''
-
-EXAMPLES = '''
-
-# create a basic distribution with defaults and tags
-
-- cloudfront_distribution:
- state: present
- default_origin_domain_name: www.my-cloudfront-origin.com
- tags:
- Name: example distribution
- Project: example project
- Priority: '1'
-
-# update a distribution comment by distribution_id
-
-- cloudfront_distribution:
- state: present
- distribution_id: E1RP5A2MJ8073O
- comment: modified by ansible cloudfront.py
-
-# update a distribution comment by caller_reference
-
-- cloudfront_distribution:
- state: present
- caller_reference: my cloudfront distribution 001
- comment: modified by ansible cloudfront.py
-
-# update a distribution's aliases and comment using the distribution_id as a reference
-
-- cloudfront_distribution:
- state: present
- distribution_id: E1RP5A2MJ8073O
- comment: modified by cloudfront.py again
- aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ]
-
-# update a distribution's aliases and comment using an alias as a reference
-
-- cloudfront_distribution:
- state: present
- caller_reference: my test distribution
- comment: modified by cloudfront.py again
- aliases:
- - www.my-distribution-source.com
- - zzz.aaa.io
-
-# update a distribution's comment and aliases and tags and remove existing tags
-
-- cloudfront_distribution:
- state: present
- distribution_id: E15BU8SDCGSG57
- comment: modified by cloudfront.py again
- aliases:
- - tested.com
- tags:
- Project: distribution 1.2
- purge_tags: yes
-
-# create a distribution with an origin, logging and default cache behavior
-
-- cloudfront_distribution:
- state: present
- caller_reference: unique test distribution ID
- origins:
- - id: 'my test origin-000111'
- domain_name: www.example.com
- origin_path: /production
- custom_headers:
- - header_name: MyCustomHeaderName
- header_value: MyCustomHeaderValue
- default_cache_behavior:
- target_origin_id: 'my test origin-000111'
- forwarded_values:
- query_string: true
- cookies:
- forward: all
- headers:
- - '*'
- viewer_protocol_policy: allow-all
- smooth_streaming: true
- compress: true
- allowed_methods:
- items:
- - GET
- - HEAD
- cached_methods:
- - GET
- - HEAD
- logging:
- enabled: true
- include_cookies: false
- bucket: mylogbucket.s3.amazonaws.com
- prefix: myprefix/
- enabled: false
- comment: this is a CloudFront distribution with logging
-
-# delete a distribution
-
-- cloudfront_distribution:
- state: absent
- caller_reference: replaceable distribution
-'''
-
-RETURN = '''
-active_trusted_signers:
- description: Key pair IDs that CloudFront is aware of for each trusted signer.
- returned: always
- type: complex
- contains:
- enabled:
- description: Whether trusted signers are in use.
- returned: always
- type: bool
- sample: false
- quantity:
- description: Number of trusted signers.
- returned: always
- type: int
- sample: 1
- items:
- description: Number of trusted signers.
- returned: when there are trusted signers
- type: list
- sample:
- - key_pair_id
-aliases:
- description: Aliases that refer to the distribution.
- returned: always
- type: complex
- contains:
- items:
- description: List of aliases.
- returned: always
- type: list
- sample:
- - test.example.com
- quantity:
- description: Number of aliases.
- returned: always
- type: int
- sample: 1
-arn:
- description: Amazon Resource Name of the distribution.
- returned: always
- type: str
- sample: arn:aws:cloudfront::123456789012:distribution/E1234ABCDEFGHI
-cache_behaviors:
- description: CloudFront cache behaviors.
- returned: always
- type: complex
- contains:
- items:
- description: List of cache behaviors.
- returned: always
- type: complex
- contains:
- allowed_methods:
- description: Methods allowed by the cache behavior.
- returned: always
- type: complex
- contains:
- cached_methods:
- description: Methods cached by the cache behavior.
- returned: always
- type: complex
- contains:
- items:
- description: List of cached methods.
- returned: always
- type: list
- sample:
- - HEAD
- - GET
- quantity:
- description: Count of cached methods.
- returned: always
- type: int
- sample: 2
- items:
- description: List of methods allowed by the cache behavior.
- returned: always
- type: list
- sample:
- - HEAD
- - GET
- quantity:
- description: Count of methods allowed by the cache behavior.
- returned: always
- type: int
- sample: 2
- compress:
- description: Whether compression is turned on for the cache behavior.
- returned: always
- type: bool
- sample: false
- default_ttl:
- description: Default Time to Live of the cache behavior.
- returned: always
- type: int
- sample: 86400
- forwarded_values:
- description: Values forwarded to the origin for this cache behavior.
- returned: always
- type: complex
- contains:
- cookies:
- description: Cookies to forward to the origin.
- returned: always
- type: complex
- contains:
- forward:
- description: Which cookies to forward to the origin for this cache behavior.
- returned: always
- type: str
- sample: none
- whitelisted_names:
- description: The names of the cookies to forward to the origin for this cache behavior.
- returned: when I(forward=whitelist)
- type: complex
- contains:
- quantity:
- description: Count of cookies to forward.
- returned: always
- type: int
- sample: 1
- items:
- description: List of cookies to forward.
- returned: when list is not empty
- type: list
- sample: my_cookie
- headers:
- description: Which headers are used to vary on cache retrievals.
- returned: always
- type: complex
- contains:
- quantity:
- description: Count of headers to vary on.
- returned: always
- type: int
- sample: 1
- items:
- description: List of headers to vary on.
- returned: when list is not empty
- type: list
- sample:
- - Host
- query_string:
- description: Whether the query string is used in cache lookups.
- returned: always
- type: bool
- sample: false
- query_string_cache_keys:
- description: Which query string keys to use in cache lookups.
- returned: always
- type: complex
- contains:
- quantity:
- description: Count of query string cache keys to use in cache lookups.
- returned: always
- type: int
- sample: 1
- items:
- description: List of query string cache keys to use in cache lookups.
- returned: when list is not empty
- type: list
- sample:
- lambda_function_associations:
- description: Lambda function associations for a cache behavior.
- returned: always
- type: complex
- contains:
- quantity:
- description: Count of lambda function associations.
- returned: always
- type: int
- sample: 1
- items:
- description: List of lambda function associations.
- returned: when list is not empty
- type: list
- sample:
- - lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
- event_type: viewer-response
- max_ttl:
- description: Maximum Time to Live.
- returned: always
- type: int
- sample: 31536000
- min_ttl:
- description: Minimum Time to Live.
- returned: always
- type: int
- sample: 0
- path_pattern:
- description: Path pattern that determines this cache behavior.
- returned: always
- type: str
- sample: /path/to/files/*
- smooth_streaming:
- description: Whether smooth streaming is enabled.
- returned: always
- type: bool
- sample: false
- target_origin_id:
- description: ID of origin reference by this cache behavior.
- returned: always
- type: str
- sample: origin_abcd
- trusted_signers:
- description: Trusted signers.
- returned: always
- type: complex
- contains:
- enabled:
- description: Whether trusted signers are enabled for this cache behavior.
- returned: always
- type: bool
- sample: false
- quantity:
- description: Count of trusted signers.
- returned: always
- type: int
- sample: 1
- viewer_protocol_policy:
- description: Policy of how to handle http/https.
- returned: always
- type: str
- sample: redirect-to-https
- quantity:
- description: Count of cache behaviors.
- returned: always
- type: int
- sample: 1
-
-caller_reference:
- description: Idempotency reference given when creating CloudFront distribution.
- returned: always
- type: str
- sample: '1484796016700'
-comment:
- description: Any comments you want to include about the distribution.
- returned: always
- type: str
- sample: 'my first CloudFront distribution'
-custom_error_responses:
- description: Custom error responses to use for error handling.
- returned: always
- type: complex
- contains:
- items:
- description: List of custom error responses.
- returned: always
- type: complex
- contains:
- error_caching_min_ttl:
- description: Minimum time to cache this error response.
- returned: always
- type: int
- sample: 300
- error_code:
- description: Origin response code that triggers this error response.
- returned: always
- type: int
- sample: 500
- response_code:
- description: Response code to return to the requester.
- returned: always
- type: str
- sample: '500'
- response_page_path:
- description: Path that contains the error page to display.
- returned: always
- type: str
- sample: /errors/5xx.html
- quantity:
- description: Count of custom error response items
- returned: always
- type: int
- sample: 1
-default_cache_behavior:
- description: Default cache behavior.
- returned: always
- type: complex
- contains:
- allowed_methods:
- description: Methods allowed by the cache behavior.
- returned: always
- type: complex
- contains:
- cached_methods:
- description: Methods cached by the cache behavior.
- returned: always
- type: complex
- contains:
- items:
- description: List of cached methods.
- returned: always
- type: list
- sample:
- - HEAD
- - GET
- quantity:
- description: Count of cached methods.
- returned: always
- type: int
- sample: 2
- items:
- description: List of methods allowed by the cache behavior.
- returned: always
- type: list
- sample:
- - HEAD
- - GET
- quantity:
- description: Count of methods allowed by the cache behavior.
- returned: always
- type: int
- sample: 2
- compress:
- description: Whether compression is turned on for the cache behavior.
- returned: always
- type: bool
- sample: false
- default_ttl:
- description: Default Time to Live of the cache behavior.
- returned: always
- type: int
- sample: 86400
- forwarded_values:
- description: Values forwarded to the origin for this cache behavior.
- returned: always
- type: complex
- contains:
- cookies:
- description: Cookies to forward to the origin.
- returned: always
- type: complex
- contains:
- forward:
- description: Which cookies to forward to the origin for this cache behavior.
- returned: always
- type: str
- sample: none
- whitelisted_names:
- description: The names of the cookies to forward to the origin for this cache behavior.
- returned: when I(forward=whitelist)
- type: complex
- contains:
- quantity:
- description: Count of cookies to forward.
- returned: always
- type: int
- sample: 1
- items:
- description: List of cookies to forward.
- returned: when list is not empty
- type: list
- sample: my_cookie
- headers:
- description: Which headers are used to vary on cache retrievals.
- returned: always
- type: complex
- contains:
- quantity:
- description: Count of headers to vary on.
- returned: always
- type: int
- sample: 1
- items:
- description: List of headers to vary on.
- returned: when list is not empty
- type: list
- sample:
- - Host
- query_string:
- description: Whether the query string is used in cache lookups.
- returned: always
- type: bool
- sample: false
- query_string_cache_keys:
- description: Which query string keys to use in cache lookups.
- returned: always
- type: complex
- contains:
- quantity:
- description: Count of query string cache keys to use in cache lookups.
- returned: always
- type: int
- sample: 1
- items:
- description: List of query string cache keys to use in cache lookups.
- returned: when list is not empty
- type: list
- sample:
- lambda_function_associations:
- description: Lambda function associations for a cache behavior.
- returned: always
- type: complex
- contains:
- quantity:
- description: Count of lambda function associations.
- returned: always
- type: int
- sample: 1
- items:
- description: List of lambda function associations.
- returned: when list is not empty
- type: list
- sample:
- - lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function
- event_type: viewer-response
- max_ttl:
- description: Maximum Time to Live.
- returned: always
- type: int
- sample: 31536000
- min_ttl:
- description: Minimum Time to Live.
- returned: always
- type: int
- sample: 0
- path_pattern:
- description: Path pattern that determines this cache behavior.
- returned: always
- type: str
- sample: /path/to/files/*
- smooth_streaming:
- description: Whether smooth streaming is enabled.
- returned: always
- type: bool
- sample: false
- target_origin_id:
- description: ID of origin reference by this cache behavior.
- returned: always
- type: str
- sample: origin_abcd
- trusted_signers:
- description: Trusted signers.
- returned: always
- type: complex
- contains:
- enabled:
- description: Whether trusted signers are enabled for this cache behavior.
- returned: always
- type: bool
- sample: false
- quantity:
- description: Count of trusted signers.
- returned: always
- type: int
- sample: 1
- viewer_protocol_policy:
- description: Policy of how to handle http/https.
- returned: always
- type: str
- sample: redirect-to-https
-default_root_object:
- description: The object that you want CloudFront to request from your origin (for example, index.html)
- when a viewer requests the root URL for your distribution.
- returned: always
- type: str
- sample: ''
-diff:
- description: Difference between previous configuration and new configuration.
- returned: always
- type: dict
- sample: {}
-domain_name:
- description: Domain name of CloudFront distribution.
- returned: always
- type: str
- sample: d1vz8pzgurxosf.cloudfront.net
-enabled:
- description: Whether the CloudFront distribution is enabled or not.
- returned: always
- type: bool
- sample: true
-http_version:
- description: Version of HTTP supported by the distribution.
- returned: always
- type: str
- sample: http2
-id:
- description: CloudFront distribution ID.
- returned: always
- type: str
- sample: E123456ABCDEFG
-in_progress_invalidation_batches:
- description: The number of invalidation batches currently in progress.
- returned: always
- type: int
- sample: 0
-is_ipv6_enabled:
- description: Whether IPv6 is enabled.
- returned: always
- type: bool
- sample: true
-last_modified_time:
- description: Date and time distribution was last modified.
- returned: always
- type: str
- sample: '2017-10-13T01:51:12.656000+00:00'
-logging:
- description: Logging information.
- returned: always
- type: complex
- contains:
- bucket:
- description: S3 bucket logging destination.
- returned: always
- type: str
- sample: logs-example-com.s3.amazonaws.com
- enabled:
- description: Whether logging is enabled.
- returned: always
- type: bool
- sample: true
- include_cookies:
- description: Whether to log cookies.
- returned: always
- type: bool
- sample: false
- prefix:
- description: Prefix added to logging object names.
- returned: always
- type: str
- sample: cloudfront/test
-origins:
- description: Origins in the CloudFront distribution.
- returned: always
- type: complex
- contains:
- items:
- description: List of origins.
- returned: always
- type: complex
- contains:
- custom_headers:
- description: Custom headers passed to the origin.
- returned: always
- type: complex
- contains:
- quantity:
- description: Count of headers.
- returned: always
- type: int
- sample: 1
- custom_origin_config:
- description: Configuration of the origin.
- returned: always
- type: complex
- contains:
- http_port:
- description: Port on which HTTP is listening.
- returned: always
- type: int
- sample: 80
- https_port:
- description: Port on which HTTPS is listening.
- returned: always
- type: int
- sample: 443
- origin_keepalive_timeout:
- description: Keep-alive timeout.
- returned: always
- type: int
- sample: 5
- origin_protocol_policy:
- description: Policy of which protocols are supported.
- returned: always
- type: str
- sample: https-only
- origin_read_timeout:
- description: Timeout for reads to the origin.
- returned: always
- type: int
- sample: 30
- origin_ssl_protocols:
- description: SSL protocols allowed by the origin.
- returned: always
- type: complex
- contains:
- items:
- description: List of SSL protocols.
- returned: always
- type: list
- sample:
- - TLSv1
- - TLSv1.1
- - TLSv1.2
- quantity:
- description: Count of SSL protocols.
- returned: always
- type: int
- sample: 3
- domain_name:
- description: Domain name of the origin.
- returned: always
- type: str
- sample: test-origin.example.com
- id:
- description: ID of the origin.
- returned: always
- type: str
- sample: test-origin.example.com
- origin_path:
- description: Subdirectory to prefix the request from the S3 or HTTP origin.
- returned: always
- type: str
- sample: ''
- quantity:
- description: Count of origins.
- returned: always
- type: int
- sample: 1
-price_class:
- description: Price class of CloudFront distribution.
- returned: always
- type: str
- sample: PriceClass_All
-restrictions:
- description: Restrictions in use by CloudFront.
- returned: always
- type: complex
- contains:
- geo_restriction:
- description: Controls the countries in which your content is distributed.
- returned: always
- type: complex
- contains:
- quantity:
- description: Count of restrictions.
- returned: always
- type: int
- sample: 1
- items:
- description: List of country codes allowed or disallowed.
- returned: always
- type: list
- sample: xy
- restriction_type:
- description: Type of restriction.
- returned: always
- type: str
- sample: blacklist
-status:
- description: Status of the CloudFront distribution.
- returned: always
- type: str
- sample: InProgress
-tags:
- description: Distribution tags.
- returned: always
- type: dict
- sample:
- Hello: World
-viewer_certificate:
- description: Certificate used by CloudFront distribution.
- returned: always
- type: complex
- contains:
- acm_certificate_arn:
- description: ARN of ACM certificate.
- returned: when certificate comes from ACM
- type: str
- sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
- certificate:
- description: Reference to certificate.
- returned: always
- type: str
- sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef
- certificate_source:
- description: Where certificate comes from.
- returned: always
- type: str
- sample: acm
- minimum_protocol_version:
- description: Minimum SSL/TLS protocol supported by this distribution.
- returned: always
- type: str
- sample: TLSv1
- ssl_support_method:
- description: Support for pre-SNI browsers or not.
- returned: always
- type: str
- sample: sni-only
-web_acl_id:
- description: ID of Web Access Control List (from WAF service).
- returned: always
- type: str
- sample: abcd1234-1234-abcd-abcd-abcd12345678
-'''
-
-from ansible.module_utils._text import to_text, to_native
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager
-from ansible.module_utils.common.dict_transformations import recursive_diff
-from ansible.module_utils.ec2 import compare_aws_tags, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
-import datetime
-
-try:
- from collections import OrderedDict
-except ImportError:
- try:
- from ordereddict import OrderedDict
- except ImportError:
- pass # caught by AnsibleAWSModule (as python 2.6 + boto3 => ordereddict is installed)
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def change_dict_key_name(dictionary, old_key, new_key):
- if old_key in dictionary:
- dictionary[new_key] = dictionary.get(old_key)
- dictionary.pop(old_key, None)
- return dictionary
-
-
-def merge_validation_into_config(config, validated_node, node_name):
- if validated_node is not None:
- if isinstance(validated_node, dict):
- config_node = config.get(node_name)
- if config_node is not None:
- config_node_items = list(config_node.items())
- else:
- config_node_items = []
- config[node_name] = dict(config_node_items + list(validated_node.items()))
- if isinstance(validated_node, list):
- config[node_name] = list(set(config.get(node_name) + validated_node))
- return config
-
-
-def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True):
- if list_items is None:
- list_items = []
- if not isinstance(list_items, list):
- raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items)))
- result = {}
- if include_quantity:
- result['quantity'] = len(list_items)
- if len(list_items) > 0:
- result['items'] = list_items
- return result
-
-
-def create_distribution(client, module, config, tags):
- try:
- if not tags:
- return client.create_distribution(DistributionConfig=config)['Distribution']
- else:
- distribution_config_with_tags = {
- 'DistributionConfig': config,
- 'Tags': {
- 'Items': tags
- }
- }
- return client.create_distribution_with_tags(DistributionConfigWithTags=distribution_config_with_tags)['Distribution']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error creating distribution")
-
-
-def delete_distribution(client, module, distribution):
- try:
- return client.delete_distribution(Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution']))
-
-
-def update_distribution(client, module, config, distribution_id, e_tag):
- try:
- return client.update_distribution(DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config))
-
-
-def tag_resource(client, module, arn, tags):
- try:
- return client.tag_resource(Resource=arn, Tags=dict(Items=tags))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error tagging resource")
-
-
-def untag_resource(client, module, arn, tag_keys):
- try:
- return client.untag_resource(Resource=arn, TagKeys=dict(Items=tag_keys))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error untagging resource")
-
-
-def list_tags_for_resource(client, module, arn):
- try:
- response = client.list_tags_for_resource(Resource=arn)
- return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items'))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error listing tags for resource")
-
-
-def update_tags(client, module, existing_tags, valid_tags, purge_tags, arn):
- changed = False
- to_add, to_remove = compare_aws_tags(existing_tags, valid_tags, purge_tags)
- if to_remove:
- untag_resource(client, module, arn, to_remove)
- changed = True
- if to_add:
- tag_resource(client, module, arn, ansible_dict_to_boto3_tag_list(to_add))
- changed = True
- return changed
-
-
-class CloudFrontValidationManager(object):
- """
- Manages CloudFront validations
- """
-
- def __init__(self, module):
- self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
- self.module = module
- self.__default_distribution_enabled = True
- self.__default_http_port = 80
- self.__default_https_port = 443
- self.__default_ipv6_enabled = False
- self.__default_origin_ssl_protocols = [
- 'TLSv1',
- 'TLSv1.1',
- 'TLSv1.2'
- ]
- self.__default_custom_origin_protocol_policy = 'match-viewer'
- self.__default_custom_origin_read_timeout = 30
- self.__default_custom_origin_keepalive_timeout = 5
- self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
- self.__default_cache_behavior_min_ttl = 0
- self.__default_cache_behavior_max_ttl = 31536000
- self.__default_cache_behavior_default_ttl = 86400
- self.__default_cache_behavior_compress = False
- self.__default_cache_behavior_viewer_protocol_policy = 'allow-all'
- self.__default_cache_behavior_smooth_streaming = False
- self.__default_cache_behavior_forwarded_values_forward_cookies = 'none'
- self.__default_cache_behavior_forwarded_values_query_string = True
- self.__default_trusted_signers_enabled = False
- self.__valid_price_classes = set([
- 'PriceClass_100',
- 'PriceClass_200',
- 'PriceClass_All'
- ])
- self.__valid_origin_protocol_policies = set([
- 'http-only',
- 'match-viewer',
- 'https-only'
- ])
- self.__valid_origin_ssl_protocols = set([
- 'SSLv3',
- 'TLSv1',
- 'TLSv1.1',
- 'TLSv1.2'
- ])
- self.__valid_cookie_forwarding = set([
- 'none',
- 'whitelist',
- 'all'
- ])
- self.__valid_viewer_protocol_policies = set([
- 'allow-all',
- 'https-only',
- 'redirect-to-https'
- ])
- self.__valid_methods = set([
- 'GET',
- 'HEAD',
- 'POST',
- 'PUT',
- 'PATCH',
- 'OPTIONS',
- 'DELETE'
- ])
- self.__valid_methods_cached_methods = [
- set([
- 'GET',
- 'HEAD'
- ]),
- set([
- 'GET',
- 'HEAD',
- 'OPTIONS'
- ])
- ]
- self.__valid_methods_allowed_methods = [
- self.__valid_methods_cached_methods[0],
- self.__valid_methods_cached_methods[1],
- self.__valid_methods
- ]
- self.__valid_lambda_function_association_event_types = set([
- 'viewer-request',
- 'viewer-response',
- 'origin-request',
- 'origin-response'
- ])
- self.__valid_viewer_certificate_ssl_support_methods = set([
- 'sni-only',
- 'vip'
- ])
- self.__valid_viewer_certificate_minimum_protocol_versions = set([
- 'SSLv3',
- 'TLSv1',
- 'TLSv1_2016',
- 'TLSv1.1_2016',
- 'TLSv1.2_2018'
- ])
- self.__valid_viewer_certificate_certificate_sources = set([
- 'cloudfront',
- 'iam',
- 'acm'
- ])
- self.__valid_http_versions = set([
- 'http1.1',
- 'http2'
- ])
- self.__s3_bucket_domain_identifier = '.s3.amazonaws.com'
-
- def add_missing_key(self, dict_object, key_to_set, value_to_set):
- if key_to_set not in dict_object and value_to_set is not None:
- dict_object[key_to_set] = value_to_set
- return dict_object
-
- def add_key_else_change_dict_key(self, dict_object, old_key, new_key, value_to_set):
- if old_key not in dict_object and value_to_set is not None:
- dict_object[new_key] = value_to_set
- else:
- dict_object = change_dict_key_name(dict_object, old_key, new_key)
- return dict_object
-
- def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False):
- if key_name in dict_object:
- self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values)
- else:
- if to_aws_list:
- dict_object[key_name] = ansible_list_to_cloudfront_list(value_to_set)
- elif value_to_set is not None:
- dict_object[key_name] = value_to_set
- return dict_object
-
- def validate_logging(self, logging):
- try:
- if logging is None:
- return None
- valid_logging = {}
- if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging):
- self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.")
- valid_logging['include_cookies'] = logging.get('include_cookies')
- valid_logging['enabled'] = logging.get('enabled')
- valid_logging['bucket'] = logging.get('bucket')
- valid_logging['prefix'] = logging.get('prefix')
- return valid_logging
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating distribution logging")
-
- def validate_is_list(self, list_to_validate, list_name):
- if not isinstance(list_to_validate, list):
- self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__))
-
- def validate_required_key(self, key_name, full_key_name, dict_object):
- if key_name not in dict_object:
- self.module.fail_json(msg="%s must be specified." % full_key_name)
-
- def validate_origins(self, client, config, origins, default_origin_domain_name,
- default_origin_path, create_distribution, purge_origins=False):
- try:
- if origins is None:
- if default_origin_domain_name is None and not create_distribution:
- if purge_origins:
- return None
- else:
- return ansible_list_to_cloudfront_list(config)
- if default_origin_domain_name is not None:
- origins = [{
- 'domain_name': default_origin_domain_name,
- 'origin_path': default_origin_path or ''
- }]
- else:
- origins = []
- self.validate_is_list(origins, 'origins')
- if not origins and default_origin_domain_name is None and create_distribution:
- self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.")
- all_origins = OrderedDict()
- new_domains = list()
- for origin in config:
- all_origins[origin.get('domain_name')] = origin
- for origin in origins:
- origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path)
- all_origins[origin['domain_name']] = origin
- new_domains.append(origin['domain_name'])
- if purge_origins:
- for domain in list(all_origins.keys()):
- if domain not in new_domains:
- del(all_origins[domain])
- return ansible_list_to_cloudfront_list(list(all_origins.values()))
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating distribution origins")
-
- def validate_s3_origin_configuration(self, client, existing_config, origin):
- if origin['s3_origin_access_identity_enabled'] and existing_config.get('s3_origin_config', {}).get('origin_access_identity'):
- return existing_config['s3_origin_config']['origin_access_identity']
- if not origin['s3_origin_access_identity_enabled']:
- return None
- try:
- comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string)
- caller_reference = "%s-%s" % (origin.get('domain_name'), self.__default_datetime_string)
- cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference,
- Comment=comment))
- oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id']
- except Exception as e:
- self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id'])
- return "origin-access-identity/cloudfront/%s" % oai
-
- def validate_origin(self, client, existing_config, origin, default_origin_path):
- try:
- origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or ''))
- self.validate_required_key('origin_path', 'origins[].origin_path', origin)
- origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string))
- if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0:
- for custom_header in origin.get('custom_headers'):
- if 'header_name' not in custom_header or 'header_value' not in custom_header:
- self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.")
- origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers'))
- else:
- origin['custom_headers'] = ansible_list_to_cloudfront_list()
- if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower():
- if origin.get("s3_origin_access_identity_enabled") is not None:
- s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin)
- if s3_origin_config:
- oai = s3_origin_config
- else:
- oai = ""
- origin["s3_origin_config"] = dict(origin_access_identity=oai)
- del(origin["s3_origin_access_identity_enabled"])
- if 'custom_origin_config' in origin:
- self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive")
- else:
- origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {}))
- custom_origin_config = origin.get('custom_origin_config')
- custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy',
- 'origins[].custom_origin_config.origin_protocol_policy',
- self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies)
- custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout)
- custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout)
- custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port)
- custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port)
- if custom_origin_config.get('origin_ssl_protocols', {}).get('items'):
- custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items']
- if custom_origin_config.get('origin_ssl_protocols'):
- self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols',
- self.__valid_origin_ssl_protocols)
- else:
- custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols
- custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols'])
- return origin
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error validating distribution origin")
-
- def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge_cache_behaviors=False):
- try:
- if cache_behaviors is None and valid_origins is not None and purge_cache_behaviors is False:
- return ansible_list_to_cloudfront_list(config)
- all_cache_behaviors = OrderedDict()
- # cache behaviors are order dependent so we don't preserve the existing ordering when purge_cache_behaviors
- # is true (if purge_cache_behaviors is not true, we can't really know the full new order)
- if not purge_cache_behaviors:
- for behavior in config:
- all_cache_behaviors[behavior['path_pattern']] = behavior
- for cache_behavior in cache_behaviors:
- valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}),
- cache_behavior, valid_origins)
- all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior
- if purge_cache_behaviors:
- for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]):
- del(all_cache_behaviors[target_origin_id])
- return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values()))
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors")
-
- def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_default_cache=False):
- if is_default_cache and cache_behavior is None:
- cache_behavior = {}
- if cache_behavior is None and valid_origins is not None:
- return config
- cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache)
- cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior)
- cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior)
- cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior)
- cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior)
- cache_behavior = self.validate_field_level_encryption_id(config, cache_behavior.get('field_level_encryption_id'), cache_behavior)
- return cache_behavior
-
- def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache):
- try:
- cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l',
- config.get('min_t_t_l', self.__default_cache_behavior_min_ttl))
- cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l',
- config.get('max_t_t_l', self.__default_cache_behavior_max_ttl))
- cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l',
- config.get('default_t_t_l', self.__default_cache_behavior_default_ttl))
- cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress))
- target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id'))
- if not target_origin_id:
- target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins)
- if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]:
- if is_default_cache:
- cache_behavior_name = 'Default cache behavior'
- else:
- cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern']
- self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." %
- cache_behavior_name)
- cache_behavior['target_origin_id'] = target_origin_id
- cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy',
- config.get('viewer_protocol_policy',
- self.__default_cache_behavior_viewer_protocol_policy),
- self.__valid_viewer_protocol_policies)
- cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming',
- config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming))
- return cache_behavior
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys")
-
- def validate_forwarded_values(self, config, forwarded_values, cache_behavior):
- try:
- if not forwarded_values:
- forwarded_values = dict()
- existing_config = config.get('forwarded_values', {})
- headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items'))
- if headers:
- headers.sort()
- forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers)
- if 'cookies' not in forwarded_values:
- forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies)
- forwarded_values['cookies'] = {'forward': forward}
- else:
- existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items')
- whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist)
- if whitelist:
- self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names')
- forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist)
- cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward'))
- self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward',
- self.__valid_cookie_forwarding)
- forwarded_values['cookies']['forward'] = cookie_forwarding
- query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', []))
- self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys')
- forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys)
- forwarded_values = self.add_missing_key(forwarded_values, 'query_string',
- existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string))
- cache_behavior['forwarded_values'] = forwarded_values
- return cache_behavior
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating forwarded values")
-
- def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior):
- try:
- if lambda_function_associations is not None:
- self.validate_is_list(lambda_function_associations, 'lambda_function_associations')
- for association in lambda_function_associations:
- association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n')
- self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type',
- self.__valid_lambda_function_association_event_types)
- cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations)
- else:
- if 'lambda_function_associations' in config:
- cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations')
- else:
- cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([])
- return cache_behavior
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating lambda function associations")
-
- def validate_field_level_encryption_id(self, config, field_level_encryption_id, cache_behavior):
- if field_level_encryption_id is not None:
- cache_behavior['field_level_encryption_id'] = field_level_encryption_id
- elif 'field_level_encryption_id' in config:
- cache_behavior['field_level_encryption_id'] = config.get('field_level_encryption_id')
- else:
- cache_behavior['field_level_encryption_id'] = ""
- return cache_behavior
-
- def validate_allowed_methods(self, config, allowed_methods, cache_behavior):
- try:
- if allowed_methods is not None:
- self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods)
- temp_allowed_items = allowed_methods.get('items')
- self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items')
- self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]',
- self.__valid_methods_allowed_methods)
- cached_items = allowed_methods.get('cached_methods')
- if 'cached_methods' in allowed_methods:
- self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods')
- self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]',
- self.__valid_methods_cached_methods)
- # we don't care if the order of how cloudfront stores the methods differs - preserving existing
- # order reduces likelihood of making unnecessary changes
- if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items):
- cache_behavior['allowed_methods'] = config['allowed_methods']
- else:
- cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items)
-
- if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])):
- cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods']
- else:
- cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items)
- else:
- if 'allowed_methods' in config:
- cache_behavior['allowed_methods'] = config.get('allowed_methods')
- return cache_behavior
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating allowed methods")
-
- def validate_trusted_signers(self, config, trusted_signers, cache_behavior):
- try:
- if trusted_signers is None:
- trusted_signers = {}
- if 'items' in trusted_signers:
- valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items'))
- else:
- valid_trusted_signers = dict(quantity=config.get('quantity', 0))
- if 'items' in config:
- valid_trusted_signers = dict(items=config['items'])
- valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled))
- cache_behavior['trusted_signers'] = valid_trusted_signers
- return cache_behavior
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating trusted signers")
-
- def validate_viewer_certificate(self, viewer_certificate):
- try:
- if viewer_certificate is None:
- return None
- if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None:
- self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" +
- "_certificate set to true.")
- self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method',
- self.__valid_viewer_certificate_ssl_support_methods)
- self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version',
- self.__valid_viewer_certificate_minimum_protocol_versions)
- self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source',
- self.__valid_viewer_certificate_certificate_sources)
- viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate')
- viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method')
- viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id')
- viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn')
- return viewer_certificate
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating viewer certificate")
-
- def validate_custom_error_responses(self, config, custom_error_responses, purge_custom_error_responses):
- try:
- if custom_error_responses is None and not purge_custom_error_responses:
- return ansible_list_to_cloudfront_list(config)
- self.validate_is_list(custom_error_responses, 'custom_error_responses')
- result = list()
- existing_responses = dict((response['error_code'], response) for response in custom_error_responses)
- for custom_error_response in custom_error_responses:
- self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response)
- custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l')
- if 'response_code' in custom_error_response:
- custom_error_response['response_code'] = str(custom_error_response['response_code'])
- if custom_error_response['error_code'] in existing_responses:
- del(existing_responses[custom_error_response['error_code']])
- result.append(custom_error_response)
- if not purge_custom_error_responses:
- result.extend(existing_responses.values())
-
- return ansible_list_to_cloudfront_list(result)
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating custom error responses")
-
- def validate_restrictions(self, config, restrictions, purge_restrictions=False):
- try:
- if restrictions is None:
- if purge_restrictions:
- return None
- else:
- return config
- self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions)
- geo_restriction = restrictions.get('geo_restriction')
- self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction)
- existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', [])
- geo_restriction_items = geo_restriction.get('items')
- if not purge_restrictions:
- geo_restriction_items.extend([rest for rest in existing_restrictions if
- rest not in geo_restriction_items])
- valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items)
- valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type')
- return {'geo_restriction': valid_restrictions}
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating restrictions")
-
- def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id):
- try:
- config['default_root_object'] = default_root_object or config.get('default_root_object', '')
- config['is_i_p_v_6_enabled'] = ipv6_enabled or config.get('i_p_v_6_enabled', self.__default_ipv6_enabled)
- if http_version is not None or config.get('http_version'):
- self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions)
- config['http_version'] = http_version or config.get('http_version')
- if web_acl_id or config.get('web_a_c_l_id'):
- config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id')
- return config
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating distribution config parameters")
-
- def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False):
- try:
- if config is None:
- config = {}
- if aliases is not None:
- if not purge_aliases:
- aliases.extend([alias for alias in config.get('aliases', {}).get('items', [])
- if alias not in aliases])
- config['aliases'] = ansible_list_to_cloudfront_list(aliases)
- if logging is not None:
- config['logging'] = self.validate_logging(logging)
- config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled)
- if price_class is not None:
- self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes)
- config['price_class'] = price_class
- return config
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating common distribution parameters")
-
- def validate_comment(self, config, comment):
- config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string)
- return config
-
- def validate_caller_reference(self, caller_reference):
- return caller_reference or self.__default_datetime_string
-
- def get_first_origin_id_for_default_cache_behavior(self, valid_origins):
- try:
- if valid_origins is not None:
- valid_origins_list = valid_origins.get('items')
- if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0:
- return str(valid_origins_list[0].get('id'))
- self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.")
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior")
-
- def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list):
- try:
- self.validate_is_list(attribute_list, attribute_list_name)
- if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or
- isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)):
- self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list)))
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list")
-
- def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list):
- if attribute is not None and attribute not in allowed_list:
- self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list)))
-
- def validate_distribution_from_caller_reference(self, caller_reference):
- try:
- distributions = self.__cloudfront_facts_mgr.list_distributions(False)
- distribution_name = 'Distribution'
- distribution_config_name = 'DistributionConfig'
- distribution_ids = [dist.get('Id') for dist in distributions]
- for distribution_id in distribution_ids:
- distribution = self.__cloudfront_facts_mgr.get_distribution(distribution_id)
- if distribution is not None:
- distribution_config = distribution[distribution_name].get(distribution_config_name)
- if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference:
- distribution[distribution_name][distribution_config_name] = distribution_config
- return distribution
-
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating distribution from caller reference")
-
- def validate_distribution_from_aliases_caller_reference(self, distribution_id, aliases, caller_reference):
- try:
- if caller_reference is not None:
- return self.validate_distribution_from_caller_reference(caller_reference)
- else:
- if aliases:
- distribution_id = self.validate_distribution_id_from_alias(aliases)
- if distribution_id:
- return self.__cloudfront_facts_mgr.get_distribution(distribution_id)
- return None
- except Exception as e:
- self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference")
-
- def validate_distribution_id_from_alias(self, aliases):
- distributions = self.__cloudfront_facts_mgr.list_distributions(False)
- if distributions:
- for distribution in distributions:
- distribution_aliases = distribution.get('Aliases', {}).get('Items', [])
- if set(aliases) & set(distribution_aliases):
- return distribution['Id']
- return None
-
- def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference):
- if distribution_id is None:
- distribution_id = self.validate_distribution_from_caller_reference(caller_reference=caller_reference)['Id']
-
- try:
- waiter = client.get_waiter('distribution_deployed')
- attempts = 1 + int(wait_timeout / 60)
- waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts})
- except botocore.exceptions.WaiterError as e:
- self.module.fail_json_aws(e, msg="Timeout waiting for CloudFront action."
- " Waited for {0} seconds before timeout.".format(to_text(wait_timeout)))
-
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id))
-
-
-def main():
- argument_spec = dict(
- state=dict(choices=['present', 'absent'], default='present'),
- caller_reference=dict(),
- comment=dict(),
- distribution_id=dict(),
- e_tag=dict(),
- tags=dict(type='dict', default={}),
- purge_tags=dict(type='bool', default=False),
- alias=dict(),
- aliases=dict(type='list', default=[]),
- purge_aliases=dict(type='bool', default=False),
- default_root_object=dict(),
- origins=dict(type='list'),
- purge_origins=dict(type='bool', default=False),
- default_cache_behavior=dict(type='dict'),
- cache_behaviors=dict(type='list'),
- purge_cache_behaviors=dict(type='bool', default=False),
- custom_error_responses=dict(type='list'),
- purge_custom_error_responses=dict(type='bool', default=False),
- logging=dict(type='dict'),
- price_class=dict(),
- enabled=dict(type='bool'),
- viewer_certificate=dict(type='dict'),
- restrictions=dict(type='dict'),
- web_acl_id=dict(),
- http_version=dict(),
- ipv6_enabled=dict(type='bool'),
- default_origin_domain_name=dict(),
- default_origin_path=dict(),
- wait=dict(default=False, type='bool'),
- wait_timeout=dict(default=1800, type='int')
- )
-
- result = {}
- changed = True
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=False,
- mutually_exclusive=[
- ['distribution_id', 'alias'],
- ['default_origin_domain_name', 'distribution_id'],
- ['default_origin_domain_name', 'alias'],
- ]
- )
-
- client = module.client('cloudfront')
-
- validation_mgr = CloudFrontValidationManager(module)
-
- state = module.params.get('state')
- caller_reference = module.params.get('caller_reference')
- comment = module.params.get('comment')
- e_tag = module.params.get('e_tag')
- tags = module.params.get('tags')
- purge_tags = module.params.get('purge_tags')
- distribution_id = module.params.get('distribution_id')
- alias = module.params.get('alias')
- aliases = module.params.get('aliases')
- purge_aliases = module.params.get('purge_aliases')
- default_root_object = module.params.get('default_root_object')
- origins = module.params.get('origins')
- purge_origins = module.params.get('purge_origins')
- default_cache_behavior = module.params.get('default_cache_behavior')
- cache_behaviors = module.params.get('cache_behaviors')
- purge_cache_behaviors = module.params.get('purge_cache_behaviors')
- custom_error_responses = module.params.get('custom_error_responses')
- purge_custom_error_responses = module.params.get('purge_custom_error_responses')
- logging = module.params.get('logging')
- price_class = module.params.get('price_class')
- enabled = module.params.get('enabled')
- viewer_certificate = module.params.get('viewer_certificate')
- restrictions = module.params.get('restrictions')
- purge_restrictions = module.params.get('purge_restrictions')
- web_acl_id = module.params.get('web_acl_id')
- http_version = module.params.get('http_version')
- ipv6_enabled = module.params.get('ipv6_enabled')
- default_origin_domain_name = module.params.get('default_origin_domain_name')
- default_origin_path = module.params.get('default_origin_path')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- if alias and alias not in aliases:
- aliases.append(alias)
-
- distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
-
- update = state == 'present' and distribution
- create = state == 'present' and not distribution
- delete = state == 'absent' and distribution
-
- if not (update or create or delete):
- module.exit_json(changed=False)
-
- if update or delete:
- config = distribution['Distribution']['DistributionConfig']
- e_tag = distribution['ETag']
- distribution_id = distribution['Distribution']['Id']
- else:
- config = dict()
- if update:
- config = camel_dict_to_snake_dict(config, reversible=True)
-
- if create or update:
- config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases)
- config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id)
- config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name,
- default_origin_path, create, purge_origins)
- config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []),
- cache_behaviors, config['origins'], purge_cache_behaviors)
- config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}),
- default_cache_behavior, config['origins'], True)
- config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []),
- custom_error_responses, purge_custom_error_responses)
- valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions)
- if valid_restrictions:
- config['restrictions'] = valid_restrictions
- valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate)
- config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate')
- config = validation_mgr.validate_comment(config, comment)
- config = snake_dict_to_camel_dict(config, capitalize_first=True)
-
- if create:
- config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference)
- result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags))
- result = camel_dict_to_snake_dict(result)
- result['tags'] = list_tags_for_resource(client, module, result['arn'])
-
- if delete:
- if config['Enabled']:
- config['Enabled'] = False
- result = update_distribution(client, module, config, distribution_id, e_tag)
- validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
- distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference)
- # e_tag = distribution['ETag']
- result = delete_distribution(client, module, distribution)
-
- if update:
- changed = config != distribution['Distribution']['DistributionConfig']
- if changed:
- result = update_distribution(client, module, config, distribution_id, e_tag)
- else:
- result = distribution['Distribution']
- existing_tags = list_tags_for_resource(client, module, result['ARN'])
- distribution['Distribution']['DistributionConfig']['tags'] = existing_tags
- changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN'])
- result = camel_dict_to_snake_dict(result)
- result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn'])
- result['diff'] = dict()
- diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config)
- if diff:
- result['diff']['before'] = diff[0]
- result['diff']['after'] = diff[1]
-
- if wait and (create or update):
- validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference'))
-
- if 'distribution_config' in result:
- result.update(result['distribution_config'])
- del(result['distribution_config'])
-
- module.exit_json(changed=changed, **result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudfront_info.py b/lib/ansible/modules/cloud/amazon/cloudfront_info.py
deleted file mode 100644
index 4845cf779d..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudfront_info.py
+++ /dev/null
@@ -1,729 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: cloudfront_info
-short_description: Obtain facts about an AWS CloudFront distribution
-description:
- - Gets information about an AWS CloudFront distribution.
- - This module was called C(cloudfront_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(cloudfront_info) module no longer returns C(ansible_facts)!
-requirements:
- - boto3 >= 1.0.0
- - python >= 2.6
-version_added: "2.3"
-author: Willem van Ketwich (@wilvk)
-options:
- distribution_id:
- description:
- - The id of the CloudFront distribution. Used with I(distribution), I(distribution_config),
- I(invalidation), I(streaming_distribution), I(streaming_distribution_config), I(list_invalidations).
- required: false
- type: str
- invalidation_id:
- description:
- - The id of the invalidation to get information about.
- - Used with I(invalidation).
- required: false
- type: str
- origin_access_identity_id:
- description:
- - The id of the CloudFront origin access identity to get information about.
- required: false
- type: str
-# web_acl_id:
-# description:
-# - Used with I(list_distributions_by_web_acl_id).
-# required: false
-# type: str
- domain_name_alias:
- description:
- - Can be used instead of I(distribution_id) - uses the aliased CNAME for the CloudFront
- distribution to get the distribution id where required.
- required: false
- type: str
- all_lists:
- description:
- - Get all CloudFront lists that do not require parameters.
- required: false
- default: false
- type: bool
- origin_access_identity:
- description:
- - Get information about an origin access identity.
- - Requires I(origin_access_identity_id) to be specified.
- required: false
- default: false
- type: bool
- origin_access_identity_config:
- description:
- - Get the configuration information about an origin access identity.
- - Requires I(origin_access_identity_id) to be specified.
- required: false
- default: false
- type: bool
- distribution:
- description:
- - Get information about a distribution.
- - Requires I(distribution_id) or I(domain_name_alias) to be specified.
- required: false
- default: false
- type: bool
- distribution_config:
- description:
- - Get the configuration information about a distribution.
- - Requires I(distribution_id) or I(domain_name_alias) to be specified.
- required: false
- default: false
- type: bool
- invalidation:
- description:
- - Get information about an invalidation.
- - Requires I(invalidation_id) to be specified.
- required: false
- default: false
- type: bool
- streaming_distribution:
- description:
- - Get information about a specified RTMP distribution.
- - Requires I(distribution_id) or I(domain_name_alias) to be specified.
- required: false
- default: false
- type: bool
- streaming_distribution_config:
- description:
- - Get the configuration information about a specified RTMP distribution.
- - Requires I(distribution_id) or I(domain_name_alias) to be specified.
- required: false
- default: false
- type: bool
- list_origin_access_identities:
- description:
- - Get a list of CloudFront origin access identities.
- - Requires I(origin_access_identity_id) to be set.
- required: false
- default: false
- type: bool
- list_distributions:
- description:
- - Get a list of CloudFront distributions.
- required: false
- default: false
- type: bool
- list_distributions_by_web_acl_id:
- description:
- - Get a list of distributions using web acl id as a filter.
- - Requires I(web_acl_id) to be set.
- required: false
- default: false
- type: bool
- list_invalidations:
- description:
- - Get a list of invalidations.
- - Requires I(distribution_id) or I(domain_name_alias) to be specified.
- required: false
- default: false
- type: bool
- list_streaming_distributions:
- description:
- - Get a list of streaming distributions.
- required: false
- default: false
- type: bool
- summary:
- description:
- - Returns a summary of all distributions, streaming distributions and origin_access_identities.
- - This is the default behaviour if no option is selected.
- required: false
- default: false
- type: bool
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Get a summary of distributions
-- cloudfront_info:
- summary: true
- register: result
-
-# Get information about a distribution
-- cloudfront_info:
- distribution: true
- distribution_id: my-cloudfront-distribution-id
- register: result_did
-- debug:
- msg: "{{ result_did['cloudfront']['my-cloudfront-distribution-id'] }}"
-
-# Get information about a distribution using the CNAME of the cloudfront distribution.
-- cloudfront_info:
- distribution: true
- domain_name_alias: www.my-website.com
- register: result_website
-- debug:
- msg: "{{ result_website['cloudfront']['www.my-website.com'] }}"
-
-# When the module is called as cloudfront_facts, return values are published
-# in ansible_facts['cloudfront'][<id>] and can be used as follows.
-# Note that this is deprecated and will stop working in Ansible 2.13.
-- cloudfront_facts:
- distribution: true
- distribution_id: my-cloudfront-distribution-id
-- debug:
- msg: "{{ ansible_facts['cloudfront']['my-cloudfront-distribution-id'] }}"
-
-- cloudfront_facts:
- distribution: true
- domain_name_alias: www.my-website.com
-- debug:
- msg: "{{ ansible_facts['cloudfront']['www.my-website.com'] }}"
-
-# Get all information about an invalidation for a distribution.
-- cloudfront_facts:
- invalidation: true
- distribution_id: my-cloudfront-distribution-id
- invalidation_id: my-cloudfront-invalidation-id
-
-# Get all information about a CloudFront origin access identity.
-- cloudfront_facts:
- origin_access_identity: true
- origin_access_identity_id: my-cloudfront-origin-access-identity-id
-
-# Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
-- cloudfront_facts:
- origin_access_identity: true
- origin_access_identity_id: my-cloudfront-origin-access-identity-id
-
-# Get all information about lists not requiring parameters (ie. list_origin_access_identities, list_distributions, list_streaming_distributions)
-- cloudfront_facts:
- all_lists: true
-'''
-
-RETURN = '''
-origin_access_identity:
- description: Describes the origin access identity information. Requires I(origin_access_identity_id) to be set.
- returned: only if I(origin_access_identity) is true
- type: dict
-origin_access_identity_configuration:
- description: Describes the origin access identity information configuration information. Requires I(origin_access_identity_id) to be set.
- returned: only if I(origin_access_identity_configuration) is true
- type: dict
-distribution:
- description: >
- Facts about a CloudFront distribution. Requires I(distribution_id) or I(domain_name_alias)
- to be specified. Requires I(origin_access_identity_id) to be set.
- returned: only if distribution is true
- type: dict
-distribution_config:
- description: >
- Facts about a CloudFront distribution's config. Requires I(distribution_id) or I(domain_name_alias)
- to be specified.
- returned: only if I(distribution_config) is true
- type: dict
-invalidation:
- description: >
- Describes the invalidation information for the distribution. Requires
- I(invalidation_id) to be specified and either I(distribution_id) or I(domain_name_alias.)
- returned: only if invalidation is true
- type: dict
-streaming_distribution:
- description: >
- Describes the streaming information for the distribution. Requires
- I(distribution_id) or I(domain_name_alias) to be specified.
- returned: only if I(streaming_distribution) is true
- type: dict
-streaming_distribution_config:
- description: >
- Describes the streaming configuration information for the distribution.
- Requires I(distribution_id) or I(domain_name_alias) to be specified.
- returned: only if I(streaming_distribution_config) is true
- type: dict
-summary:
- description: Gives a summary of distributions, streaming distributions and origin access identities.
- returned: as default or if summary is true
- type: dict
-result:
- description: >
- Result dict not nested under the CloudFront ID to access results of module without the knowledge of that id
- as figuring out the DistributionId is usually the reason one uses this module in the first place.
- returned: always
- type: dict
-'''
-
-from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, boto3_conn, HAS_BOTO3
-from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
-from ansible.module_utils.basic import AnsibleModule
-from functools import partial
-import traceback
-
-try:
- import botocore
-except ImportError:
- pass # will be caught by imported HAS_BOTO3
-
-
-class CloudFrontServiceManager:
- """Handles CloudFront Services"""
-
- def __init__(self, module):
- self.module = module
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- self.client = boto3_conn(module, conn_type='client',
- resource='cloudfront', region=region,
- endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoRegionError:
- self.module.fail_json(msg="Region must be specified as a parameter, in AWS_DEFAULT_REGION "
- "environment variable or in boto configuration file")
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Can't establish connection - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def get_distribution(self, distribution_id):
- try:
- func = partial(self.client.get_distribution, Id=distribution_id)
- return self.paginated_response(func)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error describing distribution - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def get_distribution_config(self, distribution_id):
- try:
- func = partial(self.client.get_distribution_config, Id=distribution_id)
- return self.paginated_response(func)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error describing distribution configuration - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def get_origin_access_identity(self, origin_access_identity_id):
- try:
- func = partial(self.client.get_cloud_front_origin_access_identity, Id=origin_access_identity_id)
- return self.paginated_response(func)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error describing origin access identity - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def get_origin_access_identity_config(self, origin_access_identity_id):
- try:
- func = partial(self.client.get_cloud_front_origin_access_identity_config, Id=origin_access_identity_id)
- return self.paginated_response(func)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error describing origin access identity configuration - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def get_invalidation(self, distribution_id, invalidation_id):
- try:
- func = partial(self.client.get_invalidation, DistributionId=distribution_id, Id=invalidation_id)
- return self.paginated_response(func)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error describing invalidation - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def get_streaming_distribution(self, distribution_id):
- try:
- func = partial(self.client.get_streaming_distribution, Id=distribution_id)
- return self.paginated_response(func)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def get_streaming_distribution_config(self, distribution_id):
- try:
- func = partial(self.client.get_streaming_distribution_config, Id=distribution_id)
- return self.paginated_response(func)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error describing streaming distribution - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def list_origin_access_identities(self):
- try:
- func = partial(self.client.list_cloud_front_origin_access_identities)
- origin_access_identity_list = self.paginated_response(func, 'CloudFrontOriginAccessIdentityList')
- if origin_access_identity_list['Quantity'] > 0:
- return origin_access_identity_list['Items']
- return {}
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error listing cloud front origin access identities - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def list_distributions(self, keyed=True):
- try:
- func = partial(self.client.list_distributions)
- distribution_list = self.paginated_response(func, 'DistributionList')
- if distribution_list['Quantity'] == 0:
- return {}
- else:
- distribution_list = distribution_list['Items']
- if not keyed:
- return distribution_list
- return self.keyed_list_helper(distribution_list)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error listing distributions - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def list_distributions_by_web_acl_id(self, web_acl_id):
- try:
- func = partial(self.client.list_distributions_by_web_acl_id, WebAclId=web_acl_id)
- distribution_list = self.paginated_response(func, 'DistributionList')
- if distribution_list['Quantity'] == 0:
- return {}
- else:
- distribution_list = distribution_list['Items']
- return self.keyed_list_helper(distribution_list)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error listing distributions by web acl id - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def list_invalidations(self, distribution_id):
- try:
- func = partial(self.client.list_invalidations, DistributionId=distribution_id)
- invalidation_list = self.paginated_response(func, 'InvalidationList')
- if invalidation_list['Quantity'] > 0:
- return invalidation_list['Items']
- return {}
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error listing invalidations - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def list_streaming_distributions(self, keyed=True):
- try:
- func = partial(self.client.list_streaming_distributions)
- streaming_distribution_list = self.paginated_response(func, 'StreamingDistributionList')
- if streaming_distribution_list['Quantity'] == 0:
- return {}
- else:
- streaming_distribution_list = streaming_distribution_list['Items']
- if not keyed:
- return streaming_distribution_list
- return self.keyed_list_helper(streaming_distribution_list)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error listing streaming distributions - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def summary(self):
- summary_dict = {}
- summary_dict.update(self.summary_get_distribution_list(False))
- summary_dict.update(self.summary_get_distribution_list(True))
- summary_dict.update(self.summary_get_origin_access_identity_list())
- return summary_dict
-
- def summary_get_origin_access_identity_list(self):
- try:
- origin_access_identity_list = {'origin_access_identities': []}
- origin_access_identities = self.list_origin_access_identities()
- for origin_access_identity in origin_access_identities:
- oai_id = origin_access_identity['Id']
- oai_full_response = self.get_origin_access_identity(oai_id)
- oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
- origin_access_identity_list['origin_access_identities'].append(oai_summary)
- return origin_access_identity_list
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error generating summary of origin access identities - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def summary_get_distribution_list(self, streaming=False):
- try:
- list_name = 'streaming_distributions' if streaming else 'distributions'
- key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
- distribution_list = {list_name: []}
- distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
- for dist in distributions:
- temp_distribution = {}
- for key_name in key_list:
- temp_distribution[key_name] = dist[key_name]
- temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])]
- temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
- if not streaming:
- temp_distribution['WebACLId'] = dist['WebACLId']
- invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
- if invalidation_ids:
- temp_distribution['Invalidations'] = invalidation_ids
- resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'])
- temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
- distribution_list[list_name].append(temp_distribution)
- return distribution_list
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error generating summary of distributions - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- except Exception as e:
- self.module.fail_json(msg="Error generating summary of distributions - " + str(e),
- exception=traceback.format_exc())
-
- def get_etag_from_distribution_id(self, distribution_id, streaming):
- distribution = {}
- if not streaming:
- distribution = self.get_distribution(distribution_id)
- else:
- distribution = self.get_streaming_distribution(distribution_id)
- return distribution['ETag']
-
- def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
- try:
- invalidation_ids = []
- invalidations = self.list_invalidations(distribution_id)
- for invalidation in invalidations:
- invalidation_ids.append(invalidation['Id'])
- return invalidation_ids
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error getting list of invalidation ids - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def get_distribution_id_from_domain_name(self, domain_name):
- try:
- distribution_id = ""
- distributions = self.list_distributions(False)
- distributions += self.list_streaming_distributions(False)
- for dist in distributions:
- if 'Items' in dist['Aliases']:
- for alias in dist['Aliases']['Items']:
- if str(alias).lower() == domain_name.lower():
- distribution_id = dist['Id']
- break
- return distribution_id
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error getting distribution id from domain name - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def get_aliases_from_distribution_id(self, distribution_id):
- aliases = []
- try:
- distributions = self.list_distributions(False)
- for dist in distributions:
- if dist['Id'] == distribution_id and 'Items' in dist['Aliases']:
- for alias in dist['Aliases']['Items']:
- aliases.append(alias)
- break
- return aliases
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg="Error getting list of aliases from distribution_id - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- def paginated_response(self, func, result_key=""):
- '''
- Returns expanded response for paginated operations.
- The 'result_key' is used to define the concatenated results that are combined from each paginated response.
- '''
- args = dict()
- results = dict()
- loop = True
- while loop:
- response = func(**args)
- if result_key == "":
- result = response
- result.pop('ResponseMetadata', None)
- else:
- result = response.get(result_key)
- results.update(result)
- args['Marker'] = response.get('NextMarker')
- for key in response.keys():
- if key.endswith('List'):
- args['Marker'] = response[key].get('NextMarker')
- break
- loop = args['Marker'] is not None
- return results
-
- def keyed_list_helper(self, list_to_key):
- keyed_list = dict()
- for item in list_to_key:
- distribution_id = item['Id']
- if 'Items' in item['Aliases']:
- aliases = item['Aliases']['Items']
- for alias in aliases:
- keyed_list.update({alias: item})
- keyed_list.update({distribution_id: item})
- return keyed_list
-
-
-def set_facts_for_distribution_id_and_alias(details, facts, distribution_id, aliases):
- facts[distribution_id].update(details)
- # also have a fixed key for accessing results/details returned
- facts['result'] = details
- facts['result']['DistributionId'] = distribution_id
-
- for alias in aliases:
- facts[alias].update(details)
- return facts
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- distribution_id=dict(required=False, type='str'),
- invalidation_id=dict(required=False, type='str'),
- origin_access_identity_id=dict(required=False, type='str'),
- domain_name_alias=dict(required=False, type='str'),
- all_lists=dict(required=False, default=False, type='bool'),
- distribution=dict(required=False, default=False, type='bool'),
- distribution_config=dict(required=False, default=False, type='bool'),
- origin_access_identity=dict(required=False, default=False, type='bool'),
- origin_access_identity_config=dict(required=False, default=False, type='bool'),
- invalidation=dict(required=False, default=False, type='bool'),
- streaming_distribution=dict(required=False, default=False, type='bool'),
- streaming_distribution_config=dict(required=False, default=False, type='bool'),
- list_origin_access_identities=dict(required=False, default=False, type='bool'),
- list_distributions=dict(required=False, default=False, type='bool'),
- list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'),
- list_invalidations=dict(required=False, default=False, type='bool'),
- list_streaming_distributions=dict(required=False, default=False, type='bool'),
- summary=dict(required=False, default=False, type='bool')
- ))
-
- module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
- is_old_facts = module._name == 'cloudfront_facts'
- if is_old_facts:
- module.deprecate("The 'cloudfront_facts' module has been renamed to 'cloudfront_info', "
- "and the renamed one no longer returns ansible_facts", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required.')
-
- service_mgr = CloudFrontServiceManager(module)
-
- distribution_id = module.params.get('distribution_id')
- invalidation_id = module.params.get('invalidation_id')
- origin_access_identity_id = module.params.get('origin_access_identity_id')
- web_acl_id = module.params.get('web_acl_id')
- domain_name_alias = module.params.get('domain_name_alias')
- all_lists = module.params.get('all_lists')
- distribution = module.params.get('distribution')
- distribution_config = module.params.get('distribution_config')
- origin_access_identity = module.params.get('origin_access_identity')
- origin_access_identity_config = module.params.get('origin_access_identity_config')
- invalidation = module.params.get('invalidation')
- streaming_distribution = module.params.get('streaming_distribution')
- streaming_distribution_config = module.params.get('streaming_distribution_config')
- list_origin_access_identities = module.params.get('list_origin_access_identities')
- list_distributions = module.params.get('list_distributions')
- list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id')
- list_invalidations = module.params.get('list_invalidations')
- list_streaming_distributions = module.params.get('list_streaming_distributions')
- summary = module.params.get('summary')
-
- aliases = []
- result = {'cloudfront': {}}
- facts = {}
-
- require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or
- streaming_distribution_config or list_invalidations)
-
- # set default to summary if no option specified
- summary = summary or not (distribution or distribution_config or origin_access_identity or
- origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or
- list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or
- list_streaming_distributions or list_distributions)
-
- # validations
- if require_distribution_id and distribution_id is None and domain_name_alias is None:
- module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.')
- if (invalidation and invalidation_id is None):
- module.fail_json(msg='Error invalidation_id has not been specified.')
- if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None:
- module.fail_json(msg='Error origin_access_identity_id has not been specified.')
- if list_distributions_by_web_acl_id and web_acl_id is None:
- module.fail_json(msg='Error web_acl_id has not been specified.')
-
- # get distribution id from domain name alias
- if require_distribution_id and distribution_id is None:
- distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias)
- if not distribution_id:
- module.fail_json(msg='Error unable to source a distribution id from domain_name_alias')
-
- # set appropriate cloudfront id
- if distribution_id and not list_invalidations:
- facts = {distribution_id: {}}
- aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
- for alias in aliases:
- facts.update({alias: {}})
- if invalidation_id:
- facts.update({invalidation_id: {}})
- elif distribution_id and list_invalidations:
- facts = {distribution_id: {}}
- aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
- for alias in aliases:
- facts.update({alias: {}})
- elif origin_access_identity_id:
- facts = {origin_access_identity_id: {}}
- elif web_acl_id:
- facts = {web_acl_id: {}}
-
- # get details based on options
- if distribution:
- facts_to_set = service_mgr.get_distribution(distribution_id)
- if distribution_config:
- facts_to_set = service_mgr.get_distribution_config(distribution_id)
- if origin_access_identity:
- facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(origin_access_identity_id))
- if origin_access_identity_config:
- facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(origin_access_identity_id))
- if invalidation:
- facts_to_set = service_mgr.get_invalidation(distribution_id, invalidation_id)
- facts[invalidation_id].update(facts_to_set)
- if streaming_distribution:
- facts_to_set = service_mgr.get_streaming_distribution(distribution_id)
- if streaming_distribution_config:
- facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id)
- if list_invalidations:
- facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)}
- if 'facts_to_set' in vars():
- facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases)
-
- # get list based on options
- if all_lists or list_origin_access_identities:
- facts['origin_access_identities'] = service_mgr.list_origin_access_identities()
- if all_lists or list_distributions:
- facts['distributions'] = service_mgr.list_distributions()
- if all_lists or list_streaming_distributions:
- facts['streaming_distributions'] = service_mgr.list_streaming_distributions()
- if list_distributions_by_web_acl_id:
- facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id)
- if list_invalidations:
- facts['invalidations'] = service_mgr.list_invalidations(distribution_id)
-
- # default summary option
- if summary:
- facts['summary'] = service_mgr.summary()
-
- result['changed'] = False
- result['cloudfront'].update(facts)
- if is_old_facts:
- module.exit_json(msg="Retrieved CloudFront facts.", ansible_facts=result)
- else:
- module.exit_json(msg="Retrieved CloudFront info.", **result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudfront_invalidation.py b/lib/ansible/modules/cloud/amazon/cloudfront_invalidation.py
deleted file mode 100644
index 10265a0950..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudfront_invalidation.py
+++ /dev/null
@@ -1,276 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-
-module: cloudfront_invalidation
-
-short_description: create invalidations for AWS CloudFront distributions
-description:
- - Allows for invalidation of a batch of paths for a CloudFront distribution.
-
-requirements:
- - boto3 >= 1.0.0
- - python >= 2.6
-
-version_added: "2.5"
-
-author: Willem van Ketwich (@wilvk)
-
-extends_documentation_fragment:
- - aws
- - ec2
-
-options:
- distribution_id:
- description:
- - The ID of the CloudFront distribution to invalidate paths for. Can be specified instead of the alias.
- required: false
- type: str
- alias:
- description:
- - The alias of the CloudFront distribution to invalidate paths for. Can be specified instead of distribution_id.
- required: false
- type: str
- caller_reference:
- description:
- - A unique reference identifier for the invalidation paths.
- - Defaults to current datetime stamp.
- required: false
- default:
- type: str
- target_paths:
- description:
- - A list of paths on the distribution to invalidate. Each path should begin with '/'. Wildcards are allowed. eg. '/foo/bar/*'
- required: true
- type: list
- elements: str
-
-notes:
- - does not support check mode
-
-'''
-
-EXAMPLES = '''
-
-- name: create a batch of invalidations using a distribution_id for a reference
- cloudfront_invalidation:
- distribution_id: E15BU8SDCGSG57
- caller_reference: testing 123
- target_paths:
- - /testpathone/test1.css
- - /testpathtwo/test2.js
- - /testpaththree/test3.ss
-
-- name: create a batch of invalidations using an alias as a reference and one path using a wildcard match
- cloudfront_invalidation:
- alias: alias.test.com
- caller_reference: testing 123
- target_paths:
- - /testpathone/test4.css
- - /testpathtwo/test5.js
- - /testpaththree/*
-
-'''
-
-RETURN = '''
-invalidation:
- description: The invalidation's information.
- returned: always
- type: complex
- contains:
- create_time:
- description: The date and time the invalidation request was first made.
- returned: always
- type: str
- sample: '2018-02-01T15:50:41.159000+00:00'
- id:
- description: The identifier for the invalidation request.
- returned: always
- type: str
- sample: I2G9MOWJZFV612
- invalidation_batch:
- description: The current invalidation information for the batch request.
- returned: always
- type: complex
- contains:
- caller_reference:
- description: The value used to uniquely identify an invalidation request.
- returned: always
- type: str
- sample: testing 123
- paths:
- description: A dict that contains information about the objects that you want to invalidate.
- returned: always
- type: complex
- contains:
- items:
- description: A list of the paths that you want to invalidate.
- returned: always
- type: list
- sample:
- - /testpathtwo/test2.js
- - /testpathone/test1.css
- - /testpaththree/test3.ss
- quantity:
- description: The number of objects that you want to invalidate.
- returned: always
- type: int
- sample: 3
- status:
- description: The status of the invalidation request.
- returned: always
- type: str
- sample: Completed
-location:
- description: The fully qualified URI of the distribution and invalidation batch request.
- returned: always
- type: str
- sample: https://cloudfront.amazonaws.com/2017-03-25/distribution/E1ZID6KZJECZY7/invalidation/I2G9MOWJZFV622
-'''
-
-from ansible.module_utils.ec2 import snake_dict_to_camel_dict
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager
-import datetime
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # caught by imported AnsibleAWSModule
-
-
-class CloudFrontInvalidationServiceManager(object):
- """
- Handles CloudFront service calls to AWS for invalidations
- """
-
- def __init__(self, module):
- self.module = module
- self.client = module.client('cloudfront')
-
- def create_invalidation(self, distribution_id, invalidation_batch):
- current_invalidation_response = self.get_invalidation(distribution_id, invalidation_batch['CallerReference'])
- try:
- response = self.client.create_invalidation(DistributionId=distribution_id, InvalidationBatch=invalidation_batch)
- response.pop('ResponseMetadata', None)
- if current_invalidation_response:
- return response, False
- else:
- return response, True
- except BotoCoreError as e:
- self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.")
- except ClientError as e:
- if ('Your request contains a caller reference that was used for a previous invalidation batch '
- 'for the same distribution.' in e.response['Error']['Message']):
- self.module.warn("InvalidationBatch target paths are not modifiable. "
- "To make a new invalidation please update caller_reference.")
- return current_invalidation_response, False
- else:
- self.module.fail_json_aws(e, msg="Error creating CloudFront invalidations.")
-
- def get_invalidation(self, distribution_id, caller_reference):
- current_invalidation = {}
- # find all invalidations for the distribution
- try:
- paginator = self.client.get_paginator('list_invalidations')
- invalidations = paginator.paginate(DistributionId=distribution_id).build_full_result().get('InvalidationList', {}).get('Items', [])
- invalidation_ids = [inv['Id'] for inv in invalidations]
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Error listing CloudFront invalidations.")
-
- # check if there is an invalidation with the same caller reference
- for inv_id in invalidation_ids:
- try:
- invalidation = self.client.get_invalidation(DistributionId=distribution_id, Id=inv_id)['Invalidation']
- caller_ref = invalidation.get('InvalidationBatch', {}).get('CallerReference')
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e, msg="Error getting CloudFront invalidation {0}".format(inv_id))
- if caller_ref == caller_reference:
- current_invalidation = invalidation
- break
-
- current_invalidation.pop('ResponseMetadata', None)
- return current_invalidation
-
-
-class CloudFrontInvalidationValidationManager(object):
- """
- Manages CloudFront validations for invalidation batches
- """
-
- def __init__(self, module):
- self.module = module
- self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
-
- def validate_distribution_id(self, distribution_id, alias):
- try:
- if distribution_id is None and alias is None:
- self.module.fail_json(msg="distribution_id or alias must be specified")
- if distribution_id is None:
- distribution_id = self.__cloudfront_facts_mgr.get_distribution_id_from_domain_name(alias)
- return distribution_id
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error validating parameters.")
-
- def create_aws_list(self, invalidation_batch):
- aws_list = {}
- aws_list["Quantity"] = len(invalidation_batch)
- aws_list["Items"] = invalidation_batch
- return aws_list
-
- def validate_invalidation_batch(self, invalidation_batch, caller_reference):
- try:
- if caller_reference is not None:
- valid_caller_reference = caller_reference
- else:
- valid_caller_reference = datetime.datetime.now().isoformat()
- valid_invalidation_batch = {
- 'paths': self.create_aws_list(invalidation_batch),
- 'caller_reference': valid_caller_reference
- }
- return valid_invalidation_batch
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error validating invalidation batch.")
-
-
-def main():
- argument_spec = dict(
- caller_reference=dict(),
- distribution_id=dict(),
- alias=dict(),
- target_paths=dict(required=True, type='list')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']])
-
- validation_mgr = CloudFrontInvalidationValidationManager(module)
- service_mgr = CloudFrontInvalidationServiceManager(module)
-
- caller_reference = module.params.get('caller_reference')
- distribution_id = module.params.get('distribution_id')
- alias = module.params.get('alias')
- target_paths = module.params.get('target_paths')
-
- result = {}
-
- distribution_id = validation_mgr.validate_distribution_id(distribution_id, alias)
- valid_target_paths = validation_mgr.validate_invalidation_batch(target_paths, caller_reference)
- valid_pascal_target_paths = snake_dict_to_camel_dict(valid_target_paths, True)
- result, changed = service_mgr.create_invalidation(distribution_id, valid_pascal_target_paths)
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudfront_origin_access_identity.py b/lib/ansible/modules/cloud/amazon/cloudfront_origin_access_identity.py
deleted file mode 100644
index 44381fcbfa..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudfront_origin_access_identity.py
+++ /dev/null
@@ -1,280 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-
-module: cloudfront_origin_access_identity
-
-short_description: Create, update and delete origin access identities for a
- CloudFront distribution
-
-description:
- - Allows for easy creation, updating and deletion of origin access
- identities.
-
-requirements:
- - boto3 >= 1.0.0
- - python >= 2.6
-
-version_added: "2.5"
-
-author: Willem van Ketwich (@wilvk)
-
-extends_documentation_fragment:
- - aws
- - ec2
-
-options:
- state:
- description: If the named resource should exist.
- choices:
- - present
- - absent
- default: present
- type: str
- origin_access_identity_id:
- description:
- - The origin_access_identity_id of the CloudFront distribution.
- required: false
- type: str
- comment:
- description:
- - A comment to describe the CloudFront origin access identity.
- required: false
- type: str
- caller_reference:
- description:
- - A unique identifier to reference the origin access identity by.
- required: false
- type: str
-
-notes:
- - Does not support check mode.
-
-'''
-
-EXAMPLES = '''
-
-- name: create an origin access identity
- cloudfront_origin_access_identity:
- state: present
- caller_reference: this is an example reference
- comment: this is an example comment
-
-- name: update an existing origin access identity using caller_reference as an identifier
- cloudfront_origin_access_identity:
- origin_access_identity_id: E17DRN9XUOAHZX
- caller_reference: this is an example reference
- comment: this is a new comment
-
-- name: delete an existing origin access identity using caller_reference as an identifier
- cloudfront_origin_access_identity:
- state: absent
- caller_reference: this is an example reference
- comment: this is a new comment
-
-'''
-
-RETURN = '''
-cloud_front_origin_access_identity:
- description: The origin access identity's information.
- returned: always
- type: complex
- contains:
- cloud_front_origin_access_identity_config:
- description: describes a url specifying the origin access identity.
- returned: always
- type: complex
- contains:
- caller_reference:
- description: a caller reference for the oai
- returned: always
- type: str
- comment:
- description: a comment describing the oai
- returned: always
- type: str
- id:
- description: a unique identifier of the oai
- returned: always
- type: str
- s3_canonical_user_id:
- description: the canonical user ID of the user who created the oai
- returned: always
- type: str
-e_tag:
- description: The current version of the origin access identity created.
- returned: always
- type: str
-location:
- description: The fully qualified URI of the new origin access identity just created.
- returned: when initially created
- type: str
-
-'''
-
-from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible.module_utils.aws.core import AnsibleAWSModule
-import datetime
-from functools import partial
-import json
-import traceback
-
-try:
- import botocore
- from botocore.signers import CloudFrontSigner
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # caught by imported AnsibleAWSModule
-
-
-class CloudFrontOriginAccessIdentityServiceManager(object):
- """
- Handles CloudFront origin access identity service calls to aws
- """
-
- def __init__(self, module):
- self.module = module
- self.client = module.client('cloudfront')
-
- def create_origin_access_identity(self, caller_reference, comment):
- try:
- return self.client.create_cloud_front_origin_access_identity(
- CloudFrontOriginAccessIdentityConfig={
- 'CallerReference': caller_reference,
- 'Comment': comment
- }
- )
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error creating cloud front origin access identity.")
-
- def delete_origin_access_identity(self, origin_access_identity_id, e_tag):
- try:
- return self.client.delete_cloud_front_origin_access_identity(Id=origin_access_identity_id, IfMatch=e_tag)
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.")
-
- def update_origin_access_identity(self, caller_reference, comment, origin_access_identity_id, e_tag):
- changed = False
- new_config = {
- 'CallerReference': caller_reference,
- 'Comment': comment
- }
-
- try:
- current_config = self.client.get_cloud_front_origin_access_identity_config(
- Id=origin_access_identity_id)['CloudFrontOriginAccessIdentityConfig']
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error getting Origin Access Identity config.")
-
- if new_config != current_config:
- changed = True
-
- try:
- # If the CallerReference is a value already sent in a previous identity request
- # the returned value is that of the original request
- result = self.client.update_cloud_front_origin_access_identity(
- CloudFrontOriginAccessIdentityConfig=new_config,
- Id=origin_access_identity_id,
- IfMatch=e_tag,
- )
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error updating Origin Access Identity.")
-
- return result, changed
-
-
-class CloudFrontOriginAccessIdentityValidationManager(object):
- """
- Manages CloudFront Origin Access Identities
- """
-
- def __init__(self, module):
- self.module = module
- self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module)
-
- def validate_etag_from_origin_access_identity_id(self, origin_access_identity_id):
- try:
- if origin_access_identity_id is None:
- return
- oai = self.__cloudfront_facts_mgr.get_origin_access_identity(origin_access_identity_id)
- if oai is not None:
- return oai.get('ETag')
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error getting etag from origin_access_identity.")
-
- def validate_origin_access_identity_id_from_caller_reference(
- self, caller_reference):
- try:
- origin_access_identities = self.__cloudfront_facts_mgr.list_origin_access_identities()
- origin_origin_access_identity_ids = [oai.get('Id') for oai in origin_access_identities]
- for origin_access_identity_id in origin_origin_access_identity_ids:
- oai_config = (self.__cloudfront_facts_mgr.get_origin_access_identity_config(origin_access_identity_id))
- temp_caller_reference = oai_config.get('CloudFrontOriginAccessIdentityConfig').get('CallerReference')
- if temp_caller_reference == caller_reference:
- return origin_access_identity_id
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Error getting Origin Access Identity from caller_reference.")
-
- def validate_comment(self, comment):
- if comment is None:
- return "origin access identity created by Ansible with datetime " + datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
- return comment
-
-
-def main():
- argument_spec = dict(
- state=dict(choices=['present', 'absent'], default='present'),
- origin_access_identity_id=dict(),
- caller_reference=dict(),
- comment=dict(),
- )
-
- result = {}
- e_tag = None
- changed = False
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False)
- service_mgr = CloudFrontOriginAccessIdentityServiceManager(module)
- validation_mgr = CloudFrontOriginAccessIdentityValidationManager(module)
-
- state = module.params.get('state')
- caller_reference = module.params.get('caller_reference')
-
- comment = module.params.get('comment')
- origin_access_identity_id = module.params.get('origin_access_identity_id')
-
- if origin_access_identity_id is None and caller_reference is not None:
- origin_access_identity_id = validation_mgr.validate_origin_access_identity_id_from_caller_reference(caller_reference)
-
- e_tag = validation_mgr.validate_etag_from_origin_access_identity_id(origin_access_identity_id)
- comment = validation_mgr.validate_comment(comment)
-
- if state == 'present':
- if origin_access_identity_id is not None and e_tag is not None:
- result, changed = service_mgr.update_origin_access_identity(caller_reference, comment, origin_access_identity_id, e_tag)
- else:
- result = service_mgr.create_origin_access_identity(caller_reference, comment)
- changed = True
- elif(state == 'absent' and origin_access_identity_id is not None and
- e_tag is not None):
- result = service_mgr.delete_origin_access_identity(origin_access_identity_id, e_tag)
- changed = True
-
- result.pop('ResponseMetadata', None)
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudtrail.py b/lib/ansible/modules/cloud/amazon/cloudtrail.py
deleted file mode 100644
index 382e8b15f0..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudtrail.py
+++ /dev/null
@@ -1,618 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: cloudtrail
-short_description: manage CloudTrail create, delete, update
-description:
- - Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled.
-version_added: "2.0"
-author:
- - Ansible Core Team
- - Ted Timmons (@tedder)
- - Daniel Shepherd (@shepdelacreme)
-requirements:
- - boto3
- - botocore
-options:
- state:
- description:
- - Add or remove CloudTrail configuration.
- - 'The following states have been preserved for backwards compatibility: I(state=enabled) and I(state=disabled).'
- - I(state=enabled) is equivalet to I(state=present).
- - I(state=disabled) is equivalet to I(state=absent).
- type: str
- choices: ['present', 'absent', 'enabled', 'disabled']
- default: present
- name:
- description:
- - Name for the CloudTrail.
- - Names are unique per-region unless the CloudTrail is a multi-region trail, in which case it is unique per-account.
- type: str
- default: default
- enable_logging:
- description:
- - Start or stop the CloudTrail logging. If stopped the trail will be paused and will not record events or deliver log files.
- default: true
- type: bool
- version_added: "2.4"
- s3_bucket_name:
- description:
- - An existing S3 bucket where CloudTrail will deliver log files.
- - This bucket should exist and have the proper policy.
- - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html).
- - Required when I(state=present).
- type: str
- version_added: "2.4"
- s3_key_prefix:
- description:
- - S3 Key prefix for delivered log files. A trailing slash is not necessary and will be removed.
- type: str
- is_multi_region_trail:
- description:
- - Specify whether the trail belongs only to one region or exists in all regions.
- default: false
- type: bool
- version_added: "2.4"
- enable_log_file_validation:
- description:
- - Specifies whether log file integrity validation is enabled.
- - CloudTrail will create a hash for every log file delivered and produce a signed digest file that can be used to ensure log files have not been tampered.
- version_added: "2.4"
- type: bool
- aliases: [ "log_file_validation_enabled" ]
- include_global_events:
- description:
- - Record API calls from global services such as IAM and STS.
- default: true
- type: bool
- aliases: [ "include_global_service_events" ]
- sns_topic_name:
- description:
- - SNS Topic name to send notifications to when a log file is delivered.
- version_added: "2.4"
- type: str
- cloudwatch_logs_role_arn:
- description:
- - Specifies a full ARN for an IAM role that assigns the proper permissions for CloudTrail to create and write to the log group.
- - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html).
- - Required when C(cloudwatch_logs_log_group_arn).
- version_added: "2.4"
- type: str
- cloudwatch_logs_log_group_arn:
- description:
- - A full ARN specifying a valid CloudWatch log group to which CloudTrail logs will be delivered. The log group should already exist.
- - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html).
- - Required when C(cloudwatch_logs_role_arn).
- type: str
- version_added: "2.4"
- kms_key_id:
- description:
- - Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. This also has the effect of enabling log file encryption.
- - The value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.
- - See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html).
- type: str
- version_added: "2.4"
- tags:
- description:
- - A hash/dictionary of tags to be applied to the CloudTrail resource.
- - Remove completely or specify an empty dictionary to remove all tags.
- default: {}
- version_added: "2.4"
- type: dict
-
-extends_documentation_fragment:
-- aws
-- ec2
-'''
-
-EXAMPLES = '''
-- name: create single region cloudtrail
- cloudtrail:
- state: present
- name: default
- s3_bucket_name: mylogbucket
- s3_key_prefix: cloudtrail
- region: us-east-1
-
-- name: create multi-region trail with validation and tags
- cloudtrail:
- state: present
- name: default
- s3_bucket_name: mylogbucket
- region: us-east-1
- is_multi_region_trail: true
- enable_log_file_validation: true
- cloudwatch_logs_role_arn: "arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role"
- cloudwatch_logs_log_group_arn: "arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*"
- kms_key_id: "alias/MyAliasName"
- tags:
- environment: dev
- Name: default
-
-- name: show another valid kms_key_id
- cloudtrail:
- state: present
- name: default
- s3_bucket_name: mylogbucket
- kms_key_id: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
- # simply "12345678-1234-1234-1234-123456789012" would be valid too.
-
-- name: pause logging the trail we just created
- cloudtrail:
- state: present
- name: default
- enable_logging: false
- s3_bucket_name: mylogbucket
- region: us-east-1
- is_multi_region_trail: true
- enable_log_file_validation: true
- tags:
- environment: dev
- Name: default
-
-- name: delete a trail
- cloudtrail:
- state: absent
- name: default
-'''
-
-RETURN = '''
-exists:
- description: whether the resource exists
- returned: always
- type: bool
- sample: true
-trail:
- description: CloudTrail resource details
- returned: always
- type: complex
- sample: hash/dictionary of values
- contains:
- trail_arn:
- description: Full ARN of the CloudTrail resource
- returned: success
- type: str
- sample: arn:aws:cloudtrail:us-east-1:123456789012:trail/default
- name:
- description: Name of the CloudTrail resource
- returned: success
- type: str
- sample: default
- is_logging:
- description: Whether logging is turned on or paused for the Trail
- returned: success
- type: bool
- sample: True
- s3_bucket_name:
- description: S3 bucket name where log files are delivered
- returned: success
- type: str
- sample: myBucket
- s3_key_prefix:
- description: Key prefix in bucket where log files are delivered (if any)
- returned: success when present
- type: str
- sample: myKeyPrefix
- log_file_validation_enabled:
- description: Whether log file validation is enabled on the trail
- returned: success
- type: bool
- sample: true
- include_global_service_events:
- description: Whether global services (IAM, STS) are logged with this trail
- returned: success
- type: bool
- sample: true
- is_multi_region_trail:
- description: Whether the trail applies to all regions or just one
- returned: success
- type: bool
- sample: true
- has_custom_event_selectors:
- description: Whether any custom event selectors are used for this trail.
- returned: success
- type: bool
- sample: False
- home_region:
- description: The home region where the trail was originally created and must be edited.
- returned: success
- type: str
- sample: us-east-1
- sns_topic_name:
- description: The SNS topic name where log delivery notifications are sent.
- returned: success when present
- type: str
- sample: myTopic
- sns_topic_arn:
- description: Full ARN of the SNS topic where log delivery notifications are sent.
- returned: success when present
- type: str
- sample: arn:aws:sns:us-east-1:123456789012:topic/myTopic
- cloud_watch_logs_log_group_arn:
- description: Full ARN of the CloudWatch Logs log group where events are delivered.
- returned: success when present
- type: str
- sample: arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*
- cloud_watch_logs_role_arn:
- description: Full ARN of the IAM role that CloudTrail assumes to deliver events.
- returned: success when present
- type: str
- sample: arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role
- kms_key_id:
- description: Full ARN of the KMS Key used to encrypt log files.
- returned: success when present
- type: str
- sample: arn:aws:kms::123456789012:key/12345678-1234-1234-1234-123456789012
- tags:
- description: hash/dictionary of tags applied to this resource
- returned: success
- type: dict
- sample: {'environment': 'dev', 'Name': 'default'}
-'''
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (camel_dict_to_snake_dict,
- ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict)
-
-
-def create_trail(module, client, ct_params):
- """
- Creates a CloudTrail
-
- module : AnsibleAWSModule object
- client : boto3 client connection object
- ct_params : The parameters for the Trail to create
- """
- resp = {}
- try:
- resp = client.create_trail(**ct_params)
- except (BotoCoreError, ClientError) as err:
- module.fail_json_aws(err, msg="Failed to create Trail")
-
- return resp
-
-
-def tag_trail(module, client, tags, trail_arn, curr_tags=None, dry_run=False):
- """
- Creates, updates, removes tags on a CloudTrail resource
-
- module : AnsibleAWSModule object
- client : boto3 client connection object
- tags : Dict of tags converted from ansible_dict to boto3 list of dicts
- trail_arn : The ARN of the CloudTrail to operate on
- curr_tags : Dict of the current tags on resource, if any
- dry_run : true/false to determine if changes will be made if needed
- """
- adds = []
- removes = []
- updates = []
- changed = False
-
- if curr_tags is None:
- # No current tags so just convert all to a tag list
- adds = ansible_dict_to_boto3_tag_list(tags)
- else:
- curr_keys = set(curr_tags.keys())
- new_keys = set(tags.keys())
- add_keys = new_keys - curr_keys
- remove_keys = curr_keys - new_keys
- update_keys = dict()
- for k in curr_keys.intersection(new_keys):
- if curr_tags[k] != tags[k]:
- update_keys.update({k: tags[k]})
-
- adds = get_tag_list(add_keys, tags)
- removes = get_tag_list(remove_keys, curr_tags)
- updates = get_tag_list(update_keys, tags)
-
- if removes or updates:
- changed = True
- if not dry_run:
- try:
- client.remove_tags(ResourceId=trail_arn, TagsList=removes + updates)
- except (BotoCoreError, ClientError) as err:
- module.fail_json_aws(err, msg="Failed to remove tags from Trail")
-
- if updates or adds:
- changed = True
- if not dry_run:
- try:
- client.add_tags(ResourceId=trail_arn, TagsList=updates + adds)
- except (BotoCoreError, ClientError) as err:
- module.fail_json_aws(err, msg="Failed to add tags to Trail")
-
- return changed
-
-
-def get_tag_list(keys, tags):
- """
- Returns a list of dicts with tags to act on
- keys : set of keys to get the values for
- tags : the dict of tags to turn into a list
- """
- tag_list = []
- for k in keys:
- tag_list.append({'Key': k, 'Value': tags[k]})
-
- return tag_list
-
-
-def set_logging(module, client, name, action):
- """
- Starts or stops logging based on given state
-
- module : AnsibleAWSModule object
- client : boto3 client connection object
- name : The name or ARN of the CloudTrail to operate on
- action : start or stop
- """
- if action == 'start':
- try:
- client.start_logging(Name=name)
- return client.get_trail_status(Name=name)
- except (BotoCoreError, ClientError) as err:
- module.fail_json_aws(err, msg="Failed to start logging")
- elif action == 'stop':
- try:
- client.stop_logging(Name=name)
- return client.get_trail_status(Name=name)
- except (BotoCoreError, ClientError) as err:
- module.fail_json_aws(err, msg="Failed to stop logging")
- else:
- module.fail_json(msg="Unsupported logging action")
-
-
-def get_trail_facts(module, client, name):
- """
- Describes existing trail in an account
-
- module : AnsibleAWSModule object
- client : boto3 client connection object
- name : Name of the trail
- """
- # get Trail info
- try:
- trail_resp = client.describe_trails(trailNameList=[name])
- except (BotoCoreError, ClientError) as err:
- module.fail_json_aws(err, msg="Failed to describe Trail")
-
- # Now check to see if our trail exists and get status and tags
- if len(trail_resp['trailList']):
- trail = trail_resp['trailList'][0]
- try:
- status_resp = client.get_trail_status(Name=trail['Name'])
- tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']])
- except (BotoCoreError, ClientError) as err:
- module.fail_json_aws(err, msg="Failed to describe Trail")
-
- trail['IsLogging'] = status_resp['IsLogging']
- trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList'])
- # Check for non-existent values and populate with None
- optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId'])
- for v in optional_vals - set(trail.keys()):
- trail[v] = None
- return trail
-
- else:
- # trail doesn't exist return None
- return None
-
-
-def delete_trail(module, client, trail_arn):
- """
- Delete a CloudTrail
-
- module : AnsibleAWSModule object
- client : boto3 client connection object
- trail_arn : Full CloudTrail ARN
- """
- try:
- client.delete_trail(Name=trail_arn)
- except (BotoCoreError, ClientError) as err:
- module.fail_json_aws(err, msg="Failed to delete Trail")
-
-
-def update_trail(module, client, ct_params):
- """
- Delete a CloudTrail
-
- module : AnsibleAWSModule object
- client : boto3 client connection object
- ct_params : The parameters for the Trail to update
- """
- try:
- client.update_trail(**ct_params)
- except (BotoCoreError, ClientError) as err:
- module.fail_json_aws(err, msg="Failed to update Trail")
-
-
-def main():
- argument_spec = dict(
- state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']),
- name=dict(default='default'),
- enable_logging=dict(default=True, type='bool'),
- s3_bucket_name=dict(),
- s3_key_prefix=dict(),
- sns_topic_name=dict(),
- is_multi_region_trail=dict(default=False, type='bool'),
- enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']),
- include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']),
- cloudwatch_logs_role_arn=dict(),
- cloudwatch_logs_log_group_arn=dict(),
- kms_key_id=dict(),
- tags=dict(default={}, type='dict'),
- )
-
- required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])]
- required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')]
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if)
-
- # collect parameters
- if module.params['state'] in ('present', 'enabled'):
- state = 'present'
- elif module.params['state'] in ('absent', 'disabled'):
- state = 'absent'
- tags = module.params['tags']
- enable_logging = module.params['enable_logging']
- ct_params = dict(
- Name=module.params['name'],
- S3BucketName=module.params['s3_bucket_name'],
- IncludeGlobalServiceEvents=module.params['include_global_events'],
- IsMultiRegionTrail=module.params['is_multi_region_trail'],
- )
-
- if module.params['s3_key_prefix']:
- ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/')
-
- if module.params['sns_topic_name']:
- ct_params['SnsTopicName'] = module.params['sns_topic_name']
-
- if module.params['cloudwatch_logs_role_arn']:
- ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn']
-
- if module.params['cloudwatch_logs_log_group_arn']:
- ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn']
-
- if module.params['enable_log_file_validation'] is not None:
- ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation']
-
- if module.params['kms_key_id']:
- ct_params['KmsKeyId'] = module.params['kms_key_id']
-
- client = module.client('cloudtrail')
- region = module.region
-
- results = dict(
- changed=False,
- exists=False
- )
-
- # Get existing trail facts
- trail = get_trail_facts(module, client, ct_params['Name'])
-
- # If the trail exists set the result exists variable
- if trail is not None:
- results['exists'] = True
-
- if state == 'absent' and results['exists']:
- # If Trail exists go ahead and delete
- results['changed'] = True
- results['exists'] = False
- results['trail'] = dict()
- if not module.check_mode:
- delete_trail(module, client, trail['TrailARN'])
-
- elif state == 'present' and results['exists']:
- # If Trail exists see if we need to update it
- do_update = False
- for key in ct_params:
- tkey = str(key)
- # boto3 has inconsistent parameter naming so we handle it here
- if key == 'EnableLogFileValidation':
- tkey = 'LogFileValidationEnabled'
- # We need to make an empty string equal None
- if ct_params.get(key) == '':
- val = None
- else:
- val = ct_params.get(key)
- if val != trail.get(tkey):
- do_update = True
- results['changed'] = True
- # If we are in check mode copy the changed values to the trail facts in result output to show what would change.
- if module.check_mode:
- trail.update({tkey: ct_params.get(key)})
-
- if not module.check_mode and do_update:
- update_trail(module, client, ct_params)
- trail = get_trail_facts(module, client, ct_params['Name'])
-
- # Check if we need to start/stop logging
- if enable_logging and not trail['IsLogging']:
- results['changed'] = True
- trail['IsLogging'] = True
- if not module.check_mode:
- set_logging(module, client, name=ct_params['Name'], action='start')
- if not enable_logging and trail['IsLogging']:
- results['changed'] = True
- trail['IsLogging'] = False
- if not module.check_mode:
- set_logging(module, client, name=ct_params['Name'], action='stop')
-
- # Check if we need to update tags on resource
- tag_dry_run = False
- if module.check_mode:
- tag_dry_run = True
- tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run)
- if tags_changed:
- results['changed'] = True
- trail['tags'] = tags
- # Populate trail facts in output
- results['trail'] = camel_dict_to_snake_dict(trail)
-
- elif state == 'present' and not results['exists']:
- # Trail doesn't exist just go create it
- results['changed'] = True
- if not module.check_mode:
- # If we aren't in check_mode then actually create it
- created_trail = create_trail(module, client, ct_params)
- # Apply tags
- tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN'])
- # Get the trail status
- try:
- status_resp = client.get_trail_status(Name=created_trail['Name'])
- except (BotoCoreError, ClientError) as err:
- module.fail_json_aws(err, msg="Failed to fetch Trail statuc")
- # Set the logging state for the trail to desired value
- if enable_logging and not status_resp['IsLogging']:
- set_logging(module, client, name=ct_params['Name'], action='start')
- if not enable_logging and status_resp['IsLogging']:
- set_logging(module, client, name=ct_params['Name'], action='stop')
- # Get facts for newly created Trail
- trail = get_trail_facts(module, client, ct_params['Name'])
-
- # If we are in check mode create a fake return structure for the newly minted trail
- if module.check_mode:
- acct_id = '123456789012'
- try:
- sts_client = module.client('sts')
- acct_id = sts_client.get_caller_identity()['Account']
- except (BotoCoreError, ClientError):
- pass
- trail = dict()
- trail.update(ct_params)
- if 'EnableLogFileValidation' not in ct_params:
- ct_params['EnableLogFileValidation'] = False
- trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation']
- trail.pop('EnableLogFileValidation')
- fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name']
- trail['HasCustomEventSelectors'] = False
- trail['HomeRegion'] = region
- trail['TrailARN'] = fake_arn
- trail['IsLogging'] = enable_logging
- trail['tags'] = tags
- # Populate trail facts in output
- results['trail'] = camel_dict_to_snake_dict(trail)
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py b/lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
deleted file mode 100644
index 7939f8c5c8..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
+++ /dev/null
@@ -1,464 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = r'''
----
-module: cloudwatchevent_rule
-short_description: Manage CloudWatch Event rules and targets
-description:
- - This module creates and manages CloudWatch event rules and targets.
-version_added: "2.2"
-extends_documentation_fragment:
- - aws
- - ec2
-author: "Jim Dalton (@jsdalton) <jim.dalton@gmail.com>"
-requirements:
- - python >= 2.6
- - boto3
-notes:
- - A rule must contain at least an I(event_pattern) or I(schedule_expression). A
- rule can have both an I(event_pattern) and a I(schedule_expression), in which
- case the rule will trigger on matching events as well as on a schedule.
- - When specifying targets, I(input) and I(input_path) are mutually-exclusive
- and optional parameters.
-options:
- name:
- description:
- - The name of the rule you are creating, updating or deleting. No spaces
- or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+)).
- required: true
- type: str
- schedule_expression:
- description:
- - A cron or rate expression that defines the schedule the rule will
- trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes)).
- required: false
- type: str
- event_pattern:
- description:
- - A string pattern (in valid JSON format) that is used to match against
- incoming events to determine if the rule should be triggered.
- required: false
- type: str
- state:
- description:
- - Whether the rule is present (and enabled), disabled, or absent.
- choices: ["present", "disabled", "absent"]
- default: present
- required: false
- type: str
- description:
- description:
- - A description of the rule.
- required: false
- type: str
- role_arn:
- description:
- - The Amazon Resource Name (ARN) of the IAM role associated with the rule.
- required: false
- type: str
- targets:
- type: list
- elements: dict
- description:
- - A list of targets to add to or update for the rule.
- suboptions:
- id:
- type: str
- required: true
- description: The unique target assignment ID.
- arn:
- type: str
- required: true
- description: The ARN associated with the target.
- role_arn:
- type: str
- description: The ARN of the IAM role to be used for this target when the rule is triggered.
- input:
- type: str
- description:
- - A JSON object that will override the event data when passed to the target.
- - If neither I(input) nor I(input_path) is specified, then the entire
- event is passed to the target in JSON form.
- input_path:
- type: str
- description:
- - A JSONPath string (e.g. C($.detail)) that specifies the part of the event data to be
- passed to the target.
- - If neither I(input) nor I(input_path) is specified, then the entire
- event is passed to the target in JSON form.
- ecs_parameters:
- type: dict
- description:
- - Contains the ECS task definition and task count to be used, if the event target is an ECS task.
- suboptions:
- task_definition_arn:
- type: str
- description: The full ARN of the task definition.
- task_count:
- type: int
- description: The number of tasks to create based on I(task_definition).
- required: false
-'''
-
-EXAMPLES = '''
-- cloudwatchevent_rule:
- name: MyCronTask
- schedule_expression: "cron(0 20 * * ? *)"
- description: Run my scheduled task
- targets:
- - id: MyTargetId
- arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
-
-- cloudwatchevent_rule:
- name: MyDisabledCronTask
- schedule_expression: "rate(5 minutes)"
- description: Run my disabled scheduled task
- state: disabled
- targets:
- - id: MyOtherTargetId
- arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
- input: '{"foo": "bar"}'
-
-- cloudwatchevent_rule:
- name: MyCronTask
- state: absent
-'''
-
-RETURN = '''
-rule:
- description: CloudWatch Event rule data.
- returned: success
- type: dict
- sample:
- arn: 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask'
- description: 'Run my scheduled task'
- name: 'MyCronTask'
- schedule_expression: 'cron(0 20 * * ? *)'
- state: 'ENABLED'
-targets:
- description: CloudWatch Event target(s) assigned to the rule.
- returned: success
- type: list
- sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]"
-'''
-
-try:
- import botocore
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-
-class CloudWatchEventRule(object):
- def __init__(self, module, name, client, schedule_expression=None,
- event_pattern=None, description=None, role_arn=None):
- self.name = name
- self.client = client
- self.changed = False
- self.schedule_expression = schedule_expression
- self.event_pattern = event_pattern
- self.description = description
- self.role_arn = role_arn
- self.module = module
-
- def describe(self):
- """Returns the existing details of the rule in AWS"""
- try:
- rule_info = self.client.describe_rule(Name=self.name)
- except botocore.exceptions.ClientError as e:
- error_code = e.response.get('Error', {}).get('Code')
- if error_code == 'ResourceNotFoundException':
- return {}
- self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name)
- except botocore.exceptions.BotoCoreError as e:
- self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name)
- return self._snakify(rule_info)
-
- def put(self, enabled=True):
- """Creates or updates the rule in AWS"""
- request = {
- 'Name': self.name,
- 'State': "ENABLED" if enabled else "DISABLED",
- }
- if self.schedule_expression:
- request['ScheduleExpression'] = self.schedule_expression
- if self.event_pattern:
- request['EventPattern'] = self.event_pattern
- if self.description:
- request['Description'] = self.description
- if self.role_arn:
- request['RoleArn'] = self.role_arn
- try:
- response = self.client.put_rule(**request)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Could not create/update rule %s" % self.name)
- self.changed = True
- return response
-
- def delete(self):
- """Deletes the rule in AWS"""
- self.remove_all_targets()
-
- try:
- response = self.client.delete_rule(Name=self.name)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Could not delete rule %s" % self.name)
- self.changed = True
- return response
-
- def enable(self):
- """Enables the rule in AWS"""
- try:
- response = self.client.enable_rule(Name=self.name)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Could not enable rule %s" % self.name)
- self.changed = True
- return response
-
- def disable(self):
- """Disables the rule in AWS"""
- try:
- response = self.client.disable_rule(Name=self.name)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Could not disable rule %s" % self.name)
- self.changed = True
- return response
-
- def list_targets(self):
- """Lists the existing targets for the rule in AWS"""
- try:
- targets = self.client.list_targets_by_rule(Rule=self.name)
- except botocore.exceptions.ClientError as e:
- error_code = e.response.get('Error', {}).get('Code')
- if error_code == 'ResourceNotFoundException':
- return []
- self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name)
- except botocore.exceptions.BotoCoreError as e:
- self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name)
- return self._snakify(targets)['targets']
-
- def put_targets(self, targets):
- """Creates or updates the provided targets on the rule in AWS"""
- if not targets:
- return
- request = {
- 'Rule': self.name,
- 'Targets': self._targets_request(targets),
- }
- try:
- response = self.client.put_targets(**request)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Could not create/update rule targets for rule %s" % self.name)
- self.changed = True
- return response
-
- def remove_targets(self, target_ids):
- """Removes the provided targets from the rule in AWS"""
- if not target_ids:
- return
- request = {
- 'Rule': self.name,
- 'Ids': target_ids
- }
- try:
- response = self.client.remove_targets(**request)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Could not remove rule targets from rule %s" % self.name)
- self.changed = True
- return response
-
- def remove_all_targets(self):
- """Removes all targets on rule"""
- targets = self.list_targets()
- return self.remove_targets([t['id'] for t in targets])
-
- def _targets_request(self, targets):
- """Formats each target for the request"""
- targets_request = []
- for target in targets:
- target_request = {
- 'Id': target['id'],
- 'Arn': target['arn']
- }
- if 'input' in target:
- target_request['Input'] = target['input']
- if 'input_path' in target:
- target_request['InputPath'] = target['input_path']
- if 'role_arn' in target:
- target_request['RoleArn'] = target['role_arn']
- if 'ecs_parameters' in target:
- target_request['EcsParameters'] = {}
- ecs_parameters = target['ecs_parameters']
- if 'task_definition_arn' in target['ecs_parameters']:
- target_request['EcsParameters']['TaskDefinitionArn'] = ecs_parameters['task_definition_arn']
- if 'task_count' in target['ecs_parameters']:
- target_request['EcsParameters']['TaskCount'] = ecs_parameters['task_count']
- targets_request.append(target_request)
- return targets_request
-
- def _snakify(self, dict):
- """Converts camel case to snake case"""
- return camel_dict_to_snake_dict(dict)
-
-
-class CloudWatchEventRuleManager(object):
- RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn']
-
- def __init__(self, rule, targets):
- self.rule = rule
- self.targets = targets
-
- def ensure_present(self, enabled=True):
- """Ensures the rule and targets are present and synced"""
- rule_description = self.rule.describe()
- if rule_description:
- # Rule exists so update rule, targets and state
- self._sync_rule(enabled)
- self._sync_targets()
- self._sync_state(enabled)
- else:
- # Rule does not exist, so create new rule and targets
- self._create(enabled)
-
- def ensure_disabled(self):
- """Ensures the rule and targets are present, but disabled, and synced"""
- self.ensure_present(enabled=False)
-
- def ensure_absent(self):
- """Ensures the rule and targets are absent"""
- rule_description = self.rule.describe()
- if not rule_description:
- # Rule doesn't exist so don't need to delete
- return
- self.rule.delete()
-
- def fetch_aws_state(self):
- """Retrieves rule and target state from AWS"""
- aws_state = {
- 'rule': {},
- 'targets': [],
- 'changed': self.rule.changed
- }
- rule_description = self.rule.describe()
- if not rule_description:
- return aws_state
-
- # Don't need to include response metadata noise in response
- del rule_description['response_metadata']
-
- aws_state['rule'] = rule_description
- aws_state['targets'].extend(self.rule.list_targets())
- return aws_state
-
- def _sync_rule(self, enabled=True):
- """Syncs local rule state with AWS"""
- if not self._rule_matches_aws():
- self.rule.put(enabled)
-
- def _sync_targets(self):
- """Syncs local targets with AWS"""
- # Identify and remove extraneous targets on AWS
- target_ids_to_remove = self._remote_target_ids_to_remove()
- if target_ids_to_remove:
- self.rule.remove_targets(target_ids_to_remove)
-
- # Identify targets that need to be added or updated on AWS
- targets_to_put = self._targets_to_put()
- if targets_to_put:
- self.rule.put_targets(targets_to_put)
-
- def _sync_state(self, enabled=True):
- """Syncs local rule state with AWS"""
- remote_state = self._remote_state()
- if enabled and remote_state != 'ENABLED':
- self.rule.enable()
- elif not enabled and remote_state != 'DISABLED':
- self.rule.disable()
-
- def _create(self, enabled=True):
- """Creates rule and targets on AWS"""
- self.rule.put(enabled)
- self.rule.put_targets(self.targets)
-
- def _rule_matches_aws(self):
- """Checks if the local rule data matches AWS"""
- aws_rule_data = self.rule.describe()
-
- # The rule matches AWS only if all rule data fields are equal
- # to their corresponding local value defined in the task
- return all([
- getattr(self.rule, field) == aws_rule_data.get(field, None)
- for field in self.RULE_FIELDS
- ])
-
- def _targets_to_put(self):
- """Returns a list of targets that need to be updated or added remotely"""
- remote_targets = self.rule.list_targets()
- return [t for t in self.targets if t not in remote_targets]
-
- def _remote_target_ids_to_remove(self):
- """Returns a list of targets that need to be removed remotely"""
- target_ids = [t['id'] for t in self.targets]
- remote_targets = self.rule.list_targets()
- return [
- rt['id'] for rt in remote_targets if rt['id'] not in target_ids
- ]
-
- def _remote_state(self):
- """Returns the remote state from AWS"""
- description = self.rule.describe()
- if not description:
- return
- return description['state']
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- schedule_expression=dict(),
- event_pattern=dict(),
- state=dict(choices=['present', 'disabled', 'absent'],
- default='present'),
- description=dict(),
- role_arn=dict(),
- targets=dict(type='list', default=[]),
- )
- module = AnsibleAWSModule(argument_spec=argument_spec)
-
- rule_data = dict(
- [(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS]
- )
- targets = module.params.get('targets')
- state = module.params.get('state')
- client = module.client('events')
-
- cwe_rule = CloudWatchEventRule(module, client=client, **rule_data)
- cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets)
-
- if state == 'present':
- cwe_rule_manager.ensure_present()
- elif state == 'disabled':
- cwe_rule_manager.ensure_disabled()
- elif state == 'absent':
- cwe_rule_manager.ensure_absent()
- else:
- module.fail_json(msg="Invalid state '{0}' provided".format(state))
-
- module.exit_json(**cwe_rule_manager.fetch_aws_state())
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group.py b/lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group.py
deleted file mode 100644
index 7ffd6671a0..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group.py
+++ /dev/null
@@ -1,319 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: cloudwatchlogs_log_group
-short_description: create or delete log_group in CloudWatchLogs
-notes:
- - For details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html).
-description:
- - Create or delete log_group in CloudWatchLogs.
-version_added: "2.5"
-author:
- - Willian Ricardo (@willricardo) <willricardo@gmail.com>
-requirements: [ json, botocore, boto3 ]
-options:
- state:
- description:
- - Whether the rule is present or absent.
- choices: ["present", "absent"]
- default: present
- required: false
- type: str
- log_group_name:
- description:
- - The name of the log group.
- required: true
- type: str
- kms_key_id:
- description:
- - The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
- required: false
- type: str
- tags:
- description:
- - The key-value pairs to use for the tags.
- required: false
- type: dict
- retention:
- description:
- - The number of days to retain the log events in the specified log group.
- - "Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]"
- - Mutually exclusive with I(purge_retention_policy).
- required: false
- type: int
- purge_retention_policy:
- description:
- - "Whether to purge the retention policy or not."
- - "Mutually exclusive with I(retention) and I(overwrite)."
- default: false
- required: false
- type: bool
- version_added: "2.10"
- overwrite:
- description:
- - Whether an existing log group should be overwritten on create.
- - Mutually exclusive with I(purge_retention_policy).
- default: false
- required: false
- type: bool
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- cloudwatchlogs_log_group:
- log_group_name: test-log-group
-
-- cloudwatchlogs_log_group:
- state: present
- log_group_name: test-log-group
- tags: { "Name": "test-log-group", "Env" : "QA" }
-
-- cloudwatchlogs_log_group:
- state: present
- log_group_name: test-log-group
- tags: { "Name": "test-log-group", "Env" : "QA" }
- kms_key_id: arn:aws:kms:region:account-id:key/key-id
-
-- cloudwatchlogs_log_group:
- state: absent
- log_group_name: test-log-group
-
-'''
-
-RETURN = '''
-log_groups:
- description: Return the list of complex objects representing log groups
- returned: success
- type: complex
- contains:
- log_group_name:
- description: The name of the log group.
- returned: always
- type: str
- creation_time:
- description: The creation time of the log group.
- returned: always
- type: int
- retention_in_days:
- description: The number of days to retain the log events in the specified log group.
- returned: always
- type: int
- metric_filter_count:
- description: The number of metric filters.
- returned: always
- type: int
- arn:
- description: The Amazon Resource Name (ARN) of the log group.
- returned: always
- type: str
- stored_bytes:
- description: The number of bytes stored.
- returned: always
- type: str
- kms_key_id:
- description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
- returned: always
- type: str
-'''
-
-import traceback
-from ansible.module_utils._text import to_native
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info
-
-try:
- import botocore
-except ImportError:
- pass # will be detected by imported HAS_BOTO3
-
-
-def create_log_group(client, log_group_name, kms_key_id, tags, retention, module):
- request = {'logGroupName': log_group_name}
- if kms_key_id:
- request['kmsKeyId'] = kms_key_id
- if tags:
- request['tags'] = tags
-
- try:
- client.create_log_group(**request)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)),
- exception=traceback.format_exc())
-
- if retention:
- input_retention_policy(client=client,
- log_group_name=log_group_name,
- retention=retention, module=module)
-
- desc_log_group = describe_log_group(client=client,
- log_group_name=log_group_name,
- module=module)
-
- if 'logGroups' in desc_log_group:
- for i in desc_log_group['logGroups']:
- if log_group_name == i['logGroupName']:
- return i
- module.fail_json(msg="The aws CloudWatchLogs log group was not created. \n please try again!")
-
-
-def input_retention_policy(client, log_group_name, retention, module):
- try:
- permited_values = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]
-
- if retention in permited_values:
- response = client.put_retention_policy(logGroupName=log_group_name,
- retentionInDays=retention)
- else:
- delete_log_group(client=client, log_group_name=log_group_name, module=module)
- module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]")
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
- exception=traceback.format_exc())
-
-
-def delete_retention_policy(client, log_group_name, module):
- try:
- client.delete_retention_policy(logGroupName=log_group_name)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable to delete retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Unable to delete retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
- exception=traceback.format_exc())
-
-
-def delete_log_group(client, log_group_name, module):
- desc_log_group = describe_log_group(client=client,
- log_group_name=log_group_name,
- module=module)
-
- try:
- if 'logGroups' in desc_log_group:
- for i in desc_log_group['logGroups']:
- if log_group_name == i['logGroupName']:
- client.delete_log_group(logGroupName=log_group_name)
-
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)),
- exception=traceback.format_exc())
-
-
-def describe_log_group(client, log_group_name, module):
- try:
- desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name)
- return desc_log_group
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
- exception=traceback.format_exc())
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- log_group_name=dict(required=True, type='str'),
- state=dict(choices=['present', 'absent'],
- default='present'),
- kms_key_id=dict(required=False, type='str'),
- tags=dict(required=False, type='dict'),
- retention=dict(required=False, type='int'),
- purge_retention_policy=dict(required=False, type='bool', default=False),
- overwrite=dict(required=False, type='bool', default=False)
- ))
-
- mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']]
- module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive)
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required.')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
-
- state = module.params.get('state')
- changed = False
-
- # Determine if the log group exists
- desc_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
- found_log_group = {}
- for i in desc_log_group.get('logGroups', []):
- if module.params['log_group_name'] == i['logGroupName']:
- found_log_group = i
- break
-
- if state == 'present':
- if found_log_group:
- if module.params['overwrite'] is True:
- changed = True
- delete_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
- found_log_group = create_log_group(client=logs,
- log_group_name=module.params['log_group_name'],
- kms_key_id=module.params['kms_key_id'],
- tags=module.params['tags'],
- retention=module.params['retention'],
- module=module)
- elif module.params['purge_retention_policy']:
- if found_log_group.get('retentionInDays'):
- changed = True
- delete_retention_policy(client=logs,
- log_group_name=module.params['log_group_name'],
- module=module)
- elif module.params['retention'] != found_log_group.get('retentionInDays'):
- if module.params['retention'] is not None:
- changed = True
- input_retention_policy(client=logs,
- log_group_name=module.params['log_group_name'],
- retention=module.params['retention'],
- module=module)
- found_log_group['retentionInDays'] = module.params['retention']
-
- elif not found_log_group:
- changed = True
- found_log_group = create_log_group(client=logs,
- log_group_name=module.params['log_group_name'],
- kms_key_id=module.params['kms_key_id'],
- tags=module.params['tags'],
- retention=module.params['retention'],
- module=module)
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(found_log_group))
-
- elif state == 'absent':
- if found_log_group:
- changed = True
- delete_log_group(client=logs,
- log_group_name=module.params['log_group_name'],
- module=module)
-
- module.exit_json(changed=changed)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group_info.py b/lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group_info.py
deleted file mode 100644
index e098f28cc3..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group_info.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: cloudwatchlogs_log_group_info
-short_description: Get information about log_group in CloudWatchLogs
-description:
- - Lists the specified log groups. You can list all your log groups or filter the results by prefix.
- - This module was called C(cloudwatchlogs_log_group_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.5"
-author:
- - Willian Ricardo (@willricardo) <willricardo@gmail.com>
-requirements: [ botocore, boto3 ]
-options:
- log_group_name:
- description:
- - The name or prefix of the log group to filter by.
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-- cloudwatchlogs_log_group_info:
- log_group_name: test-log-group
-'''
-
-RETURN = '''
-log_groups:
- description: Return the list of complex objects representing log groups
- returned: success
- type: complex
- contains:
- log_group_name:
- description: The name of the log group.
- returned: always
- type: str
- creation_time:
- description: The creation time of the log group.
- returned: always
- type: int
- retention_in_days:
- description: The number of days to retain the log events in the specified log group.
- returned: always
- type: int
- metric_filter_count:
- description: The number of metric filters.
- returned: always
- type: int
- arn:
- description: The Amazon Resource Name (ARN) of the log group.
- returned: always
- type: str
- stored_bytes:
- description: The number of bytes stored.
- returned: always
- type: str
- kms_key_id:
- description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
- returned: always
- type: str
-'''
-
-import traceback
-from ansible.module_utils._text import to_native
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info
-
-try:
- import botocore
-except ImportError:
- pass # will be detected by imported HAS_BOTO3
-
-
-def describe_log_group(client, log_group_name, module):
- params = {}
- if log_group_name:
- params['logGroupNamePrefix'] = log_group_name
- try:
- paginator = client.get_paginator('describe_log_groups')
- desc_log_group = paginator.paginate(**params).build_full_result()
- return desc_log_group
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
- exception=traceback.format_exc())
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- log_group_name=dict(),
- ))
-
- module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._name == 'cloudwatchlogs_log_group_facts':
- module.deprecate("The 'cloudwatchlogs_log_group_facts' module has been renamed to 'cloudwatchlogs_log_group_info'", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required.')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
-
- desc_log_group = describe_log_group(client=logs,
- log_group_name=module.params['log_group_name'],
- module=module)
- final_log_group_snake = []
-
- for log_group in desc_log_group['logGroups']:
- final_log_group_snake.append(camel_dict_to_snake_dict(log_group))
-
- desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake)
- module.exit_json(**desc_log_group_result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group_metric_filter.py b/lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group_metric_filter.py
deleted file mode 100644
index 9589259df7..0000000000
--- a/lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group_metric_filter.py
+++ /dev/null
@@ -1,221 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: cloudwatchlogs_log_group_metric_filter
-version_added: "2.10"
-author:
- - "Markus Bergholz (@markuman)"
-short_description: Manage CloudWatch log group metric filter
-description:
- - Create, modify and delete CloudWatch log group metric filter.
- - CloudWatch log group metric filter can be use with M(ec2_metric_alarm).
-requirements:
- - boto3
- - botocore
-options:
- state:
- description:
- - Whether the rule is present or absent.
- choices: ["present", "absent"]
- required: true
- type: str
- log_group_name:
- description:
- - The name of the log group where the metric filter is applied on.
- required: true
- type: str
- filter_name:
- description:
- - A name for the metric filter you create.
- required: true
- type: str
- filter_pattern:
- description:
- - A filter pattern for extracting metric data out of ingested log events. Required when I(state=present).
- type: str
- metric_transformation:
- description:
- - A collection of information that defines how metric data gets emitted. Required when I(state=present).
- type: dict
- suboptions:
- metric_name:
- description:
- - The name of the cloudWatch metric.
- type: str
- metric_namespace:
- description:
- - The namespace of the cloudWatch metric.
- type: str
- metric_value:
- description:
- - The value to publish to the cloudWatch metric when a filter pattern matches a log event.
- type: str
- default_value:
- description:
- - The value to emit when a filter pattern does not match a log event.
- type: float
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: set metric filter on log group /fluentd/testcase
- cloudwatchlogs_log_group_metric_filter:
- log_group_name: /fluentd/testcase
- filter_name: BoxFreeStorage
- filter_pattern: '{($.value = *) && ($.hostname = "box")}'
- state: present
- metric_transformation:
- metric_name: box_free_space
- metric_namespace: fluentd_metrics
- metric_value: "$.value"
-
-- name: delete metric filter on log group /fluentd/testcase
- cloudwatchlogs_log_group_metric_filter:
- log_group_name: /fluentd/testcase
- filter_name: BoxFreeStorage
- state: absent
-'''
-
-RETURN = """
-metric_filters:
- description: Return the origin response value
- returned: success
- type: list
- contains:
- creation_time:
- filter_name:
- filter_pattern:
- log_group_name:
- metric_filter_count:
-"""
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError, WaiterError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def metricTransformationHandler(metricTransformations, originMetricTransformations=None):
-
- if originMetricTransformations:
- change = False
- originMetricTransformations = camel_dict_to_snake_dict(
- originMetricTransformations)
- for item in ["default_value", "metric_name", "metric_namespace", "metric_value"]:
- if metricTransformations.get(item) != originMetricTransformations.get(item):
- change = True
- else:
- change = True
-
- defaultValue = metricTransformations.get("default_value")
- if isinstance(defaultValue, int) or isinstance(defaultValue, float):
- retval = [
- {
- 'metricName': metricTransformations.get("metric_name"),
- 'metricNamespace': metricTransformations.get("metric_namespace"),
- 'metricValue': metricTransformations.get("metric_value"),
- 'defaultValue': defaultValue
- }
- ]
- else:
- retval = [
- {
- 'metricName': metricTransformations.get("metric_name"),
- 'metricNamespace': metricTransformations.get("metric_namespace"),
- 'metricValue': metricTransformations.get("metric_value"),
- }
- ]
-
- return retval, change
-
-
-def main():
-
- arg_spec = dict(
- state=dict(type='str', required=True, choices=['present', 'absent']),
- log_group_name=dict(type='str', required=True),
- filter_name=dict(type='str', required=True),
- filter_pattern=dict(type='str'),
- metric_transformation=dict(type='dict', options=dict(
- metric_name=dict(type='str'),
- metric_namespace=dict(type='str'),
- metric_value=dict(type='str'),
- default_value=dict(type='float')
- )),
- )
-
- module = AnsibleAWSModule(
- argument_spec=arg_spec,
- supports_check_mode=True,
- required_if=[('state', 'present', ['metric_transformation', 'filter_pattern'])]
- )
-
- log_group_name = module.params.get("log_group_name")
- filter_name = module.params.get("filter_name")
- filter_pattern = module.params.get("filter_pattern")
- metric_transformation = module.params.get("metric_transformation")
- state = module.params.get("state")
-
- cwl = module.client('logs')
-
- # check if metric filter exists
- response = cwl.describe_metric_filters(
- logGroupName=log_group_name,
- filterNamePrefix=filter_name
- )
-
- if len(response.get("metricFilters")) == 1:
- originMetricTransformations = response.get(
- "metricFilters")[0].get("metricTransformations")[0]
- originFilterPattern = response.get("metricFilters")[
- 0].get("filterPattern")
- else:
- originMetricTransformations = None
- originFilterPattern = None
- change = False
- metricTransformation = None
-
- if state == "absent" and originMetricTransformations:
- if not module.check_mode:
- response = cwl.delete_metric_filter(
- logGroupName=log_group_name,
- filterName=filter_name
- )
- change = True
- metricTransformation = [camel_dict_to_snake_dict(item) for item in [originMetricTransformations]]
-
- elif state == "present":
- metricTransformation, change = metricTransformationHandler(
- metricTransformations=metric_transformation, originMetricTransformations=originMetricTransformations)
-
- change = change or filter_pattern != originFilterPattern
-
- if change:
- if not module.check_mode:
- response = cwl.put_metric_filter(
- logGroupName=log_group_name,
- filterName=filter_name,
- filterPattern=filter_pattern,
- metricTransformations=metricTransformation
- )
-
- metricTransformation = [camel_dict_to_snake_dict(item) for item in metricTransformation]
-
- module.exit_json(changed=change, metric_filters=metricTransformation)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/data_pipeline.py b/lib/ansible/modules/cloud/amazon/data_pipeline.py
deleted file mode 100644
index b687cfa600..0000000000
--- a/lib/ansible/modules/cloud/amazon/data_pipeline.py
+++ /dev/null
@@ -1,652 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: data_pipeline
-version_added: "2.4"
-author:
- - Raghu Udiyar (@raags) <raghusiddarth@gmail.com>
- - Sloane Hertel (@s-hertel) <shertel@redhat.com>
-requirements: [ "boto3" ]
-short_description: Create and manage AWS Datapipelines
-extends_documentation_fragment:
- - aws
- - ec2
-description:
- - Create and manage AWS Datapipelines. Creation is not idempotent in AWS, so the C(uniqueId) is created by hashing the options (minus objects)
- given to the datapipeline.
- - The pipeline definition must be in the format given here
- U(https://docs.aws.amazon.com/datapipeline/latest/APIReference/API_PutPipelineDefinition.html#API_PutPipelineDefinition_RequestSyntax).
- - Operations will wait for a configurable amount of time to ensure the pipeline is in the requested state.
-options:
- name:
- description:
- - The name of the Datapipeline to create/modify/delete.
- required: true
- type: str
- description:
- description:
- - An optional description for the pipeline being created.
- default: ''
- type: str
- objects:
- type: list
- elements: dict
- description:
- - A list of pipeline object definitions, each of which is a dict that takes the keys I(id), I(name) and I(fields).
- suboptions:
- id:
- description:
- - The ID of the object.
- type: str
- name:
- description:
- - The name of the object.
- type: str
- fields:
- description:
- - Key-value pairs that define the properties of the object.
- - The value is specified as a reference to another object I(refValue) or as a string value I(stringValue)
- but not as both.
- type: list
- elements: dict
- suboptions:
- key:
- type: str
- description:
- - The field identifier.
- stringValue:
- type: str
- description:
- - The field value.
- - Exactly one of I(stringValue) and I(refValue) may be specified.
- refValue:
- type: str
- description:
- - The field value, expressed as the identifier of another object.
- - Exactly one of I(stringValue) and I(refValue) may be specified.
- parameters:
- description:
- - A list of parameter objects (dicts) in the pipeline definition.
- type: list
- elements: dict
- suboptions:
- id:
- description:
- - The ID of the parameter object.
- attributes:
- description:
- - A list of attributes (dicts) of the parameter object.
- type: list
- elements: dict
- suboptions:
- key:
- description: The field identifier.
- type: str
- stringValue:
- description: The field value.
- type: str
-
- values:
- description:
- - A list of parameter values (dicts) in the pipeline definition.
- type: list
- elements: dict
- suboptions:
- id:
- description: The ID of the parameter value
- type: str
- stringValue:
- description: The field value
- type: str
- timeout:
- description:
- - Time in seconds to wait for the pipeline to transition to the requested state, fail otherwise.
- default: 300
- type: int
- state:
- description:
- - The requested state of the pipeline.
- choices: ['present', 'absent', 'active', 'inactive']
- default: present
- type: str
- tags:
- description:
- - A dict of key:value pair(s) to add to the pipeline.
- type: dict
- version:
- description:
- - The version option has never had any effect and will be removed in
- Ansible 2.14
- type: str
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Create pipeline
-- data_pipeline:
- name: test-dp
- region: us-west-2
- objects: "{{pipelineObjects}}"
- parameters: "{{pipelineParameters}}"
- values: "{{pipelineValues}}"
- tags:
- key1: val1
- key2: val2
- state: present
-
-# Example populating and activating a pipeline that demonstrates two ways of providing pipeline objects
-- data_pipeline:
- name: test-dp
- objects:
- - "id": "DefaultSchedule"
- "name": "Every 1 day"
- "fields":
- - "key": "period"
- "stringValue": "1 days"
- - "key": "type"
- "stringValue": "Schedule"
- - "key": "startAt"
- "stringValue": "FIRST_ACTIVATION_DATE_TIME"
- - "id": "Default"
- "name": "Default"
- "fields": [ { "key": "resourceRole", "stringValue": "my_resource_role" },
- { "key": "role", "stringValue": "DataPipelineDefaultRole" },
- { "key": "pipelineLogUri", "stringValue": "s3://my_s3_log.txt" },
- { "key": "scheduleType", "stringValue": "cron" },
- { "key": "schedule", "refValue": "DefaultSchedule" },
- { "key": "failureAndRerunMode", "stringValue": "CASCADE" } ]
- state: active
-
-# Activate pipeline
-- data_pipeline:
- name: test-dp
- region: us-west-2
- state: active
-
-# Delete pipeline
-- data_pipeline:
- name: test-dp
- region: us-west-2
- state: absent
-
-'''
-
-RETURN = '''
-changed:
- description: whether the data pipeline has been modified
- type: bool
- returned: always
- sample:
- changed: true
-result:
- description:
- - Contains the data pipeline data (data_pipeline) and a return message (msg).
- If the data pipeline exists data_pipeline will contain the keys description, name,
- pipeline_id, state, tags, and unique_id. If the data pipeline does not exist then
- data_pipeline will be an empty dict. The msg describes the status of the operation.
- returned: always
- type: dict
-'''
-
-import hashlib
-import json
-import time
-import traceback
-
-try:
- import boto3
- from botocore.exceptions import ClientError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict
-from ansible.module_utils._text import to_text
-
-
-DP_ACTIVE_STATES = ['ACTIVE', 'SCHEDULED']
-DP_INACTIVE_STATES = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING']
-DP_ACTIVATING_STATE = 'ACTIVATING'
-DP_DEACTIVATING_STATE = 'DEACTIVATING'
-PIPELINE_DOESNT_EXIST = '^.*Pipeline with id: {0} does not exist$'
-
-
-class DataPipelineNotFound(Exception):
- pass
-
-
-class TimeOutException(Exception):
- pass
-
-
-def pipeline_id(client, name):
- """Return pipeline id for the given pipeline name
-
- :param object client: boto3 datapipeline client
- :param string name: pipeline name
- :returns: pipeline id
- :raises: DataPipelineNotFound
-
- """
- pipelines = client.list_pipelines()
- for dp in pipelines['pipelineIdList']:
- if dp['name'] == name:
- return dp['id']
- raise DataPipelineNotFound
-
-
-def pipeline_description(client, dp_id):
- """Return pipeline description list
-
- :param object client: boto3 datapipeline client
- :returns: pipeline description dictionary
- :raises: DataPipelineNotFound
-
- """
- try:
- return client.describe_pipelines(pipelineIds=[dp_id])
- except ClientError as e:
- raise DataPipelineNotFound
-
-
-def pipeline_field(client, dp_id, field):
- """Return a pipeline field from the pipeline description.
-
- The available fields are listed in describe_pipelines output.
-
- :param object client: boto3 datapipeline client
- :param string dp_id: pipeline id
- :param string field: pipeline description field
- :returns: pipeline field information
-
- """
- dp_description = pipeline_description(client, dp_id)
- for field_key in dp_description['pipelineDescriptionList'][0]['fields']:
- if field_key['key'] == field:
- return field_key['stringValue']
- raise KeyError("Field key {0} not found!".format(field))
-
-
-def run_with_timeout(timeout, func, *func_args, **func_kwargs):
- """Run func with the provided args and kwargs, and wait utill
- timeout for truthy return value
-
- :param int timeout: time to wait for status
- :param function func: function to run, should return True or False
- :param args func_args: function args to pass to func
- :param kwargs func_kwargs: function key word args
- :returns: True if func returns truthy within timeout
- :raises: TimeOutException
-
- """
-
- for count in range(timeout // 10):
- if func(*func_args, **func_kwargs):
- return True
- else:
- # check every 10s
- time.sleep(10)
-
- raise TimeOutException
-
-
-def check_dp_exists(client, dp_id):
- """Check if datapipeline exists
-
- :param object client: boto3 datapipeline client
- :param string dp_id: pipeline id
- :returns: True or False
-
- """
- try:
- # pipeline_description raises DataPipelineNotFound
- if pipeline_description(client, dp_id):
- return True
- else:
- return False
- except DataPipelineNotFound:
- return False
-
-
-def check_dp_status(client, dp_id, status):
- """Checks if datapipeline matches states in status list
-
- :param object client: boto3 datapipeline client
- :param string dp_id: pipeline id
- :param list status: list of states to check against
- :returns: True or False
-
- """
- if not isinstance(status, list):
- raise AssertionError()
- if pipeline_field(client, dp_id, field="@pipelineState") in status:
- return True
- else:
- return False
-
-
-def pipeline_status_timeout(client, dp_id, status, timeout):
- args = (client, dp_id, status)
- return run_with_timeout(timeout, check_dp_status, *args)
-
-
-def pipeline_exists_timeout(client, dp_id, timeout):
- args = (client, dp_id)
- return run_with_timeout(timeout, check_dp_exists, *args)
-
-
-def activate_pipeline(client, module):
- """Activates pipeline
-
- """
- dp_name = module.params.get('name')
- timeout = module.params.get('timeout')
-
- try:
- dp_id = pipeline_id(client, dp_name)
- except DataPipelineNotFound:
- module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name))
-
- if pipeline_field(client, dp_id, field="@pipelineState") in DP_ACTIVE_STATES:
- changed = False
- else:
- try:
- client.activate_pipeline(pipelineId=dp_id)
- except ClientError as e:
- if e.response["Error"]["Code"] == "InvalidRequestException":
- module.fail_json(msg="You need to populate your pipeline before activation.")
- try:
- pipeline_status_timeout(client, dp_id, status=DP_ACTIVE_STATES,
- timeout=timeout)
- except TimeOutException:
- if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED":
- # activated but completed more rapidly than it was checked
- pass
- else:
- module.fail_json(msg=('Data Pipeline {0} failed to activate '
- 'within timeout {1} seconds').format(dp_name, timeout))
- changed = True
-
- data_pipeline = get_result(client, dp_id)
- result = {'data_pipeline': data_pipeline,
- 'msg': 'Data Pipeline {0} activated.'.format(dp_name)}
-
- return (changed, result)
-
-
-def deactivate_pipeline(client, module):
- """Deactivates pipeline
-
- """
- dp_name = module.params.get('name')
- timeout = module.params.get('timeout')
-
- try:
- dp_id = pipeline_id(client, dp_name)
- except DataPipelineNotFound:
- module.fail_json(msg='Data Pipeline {0} not found'.format(dp_name))
-
- if pipeline_field(client, dp_id, field="@pipelineState") in DP_INACTIVE_STATES:
- changed = False
- else:
- client.deactivate_pipeline(pipelineId=dp_id)
- try:
- pipeline_status_timeout(client, dp_id, status=DP_INACTIVE_STATES,
- timeout=timeout)
- except TimeOutException:
- module.fail_json(msg=('Data Pipeline {0} failed to deactivate'
- 'within timeout {1} seconds').format(dp_name, timeout))
- changed = True
-
- data_pipeline = get_result(client, dp_id)
- result = {'data_pipeline': data_pipeline,
- 'msg': 'Data Pipeline {0} deactivated.'.format(dp_name)}
-
- return (changed, result)
-
-
-def _delete_dp_with_check(dp_id, client, timeout):
- client.delete_pipeline(pipelineId=dp_id)
- try:
- pipeline_status_timeout(client=client, dp_id=dp_id, status=[PIPELINE_DOESNT_EXIST], timeout=timeout)
- except DataPipelineNotFound:
- return True
-
-
-def delete_pipeline(client, module):
- """Deletes pipeline
-
- """
- dp_name = module.params.get('name')
- timeout = module.params.get('timeout')
-
- try:
- dp_id = pipeline_id(client, dp_name)
- _delete_dp_with_check(dp_id, client, timeout)
- changed = True
- except DataPipelineNotFound:
- changed = False
- except TimeOutException:
- module.fail_json(msg=('Data Pipeline {0} failed to delete'
- 'within timeout {1} seconds').format(dp_name, timeout))
- result = {'data_pipeline': {},
- 'msg': 'Data Pipeline {0} deleted'.format(dp_name)}
-
- return (changed, result)
-
-
-def build_unique_id(module):
- data = dict(module.params)
- # removing objects from the unique id so we can update objects or populate the pipeline after creation without needing to make a new pipeline
- [data.pop(each, None) for each in ('objects', 'timeout')]
- json_data = json.dumps(data, sort_keys=True).encode("utf-8")
- hashed_data = hashlib.md5(json_data).hexdigest()
- return hashed_data
-
-
-def format_tags(tags):
- """ Reformats tags
-
- :param dict tags: dict of data pipeline tags (e.g. {key1: val1, key2: val2, key3: val3})
- :returns: list of dicts (e.g. [{key: key1, value: val1}, {key: key2, value: val2}, {key: key3, value: val3}])
-
- """
- return [dict(key=k, value=v) for k, v in tags.items()]
-
-
-def get_result(client, dp_id):
- """ Get the current state of the data pipeline and reformat it to snake_case for exit_json
-
- :param object client: boto3 datapipeline client
- :param string dp_id: pipeline id
- :returns: reformatted dict of pipeline description
-
- """
- # pipeline_description returns a pipelineDescriptionList of length 1
- # dp is a dict with keys "description" (str), "fields" (list), "name" (str), "pipelineId" (str), "tags" (dict)
- dp = pipeline_description(client, dp_id)['pipelineDescriptionList'][0]
-
- # Get uniqueId and pipelineState in fields to add to the exit_json result
- dp["unique_id"] = pipeline_field(client, dp_id, field="uniqueId")
- dp["pipeline_state"] = pipeline_field(client, dp_id, field="@pipelineState")
-
- # Remove fields; can't make a list snake_case and most of the data is redundant
- del dp["fields"]
-
- # Note: tags is already formatted fine so we don't need to do anything with it
-
- # Reformat data pipeline and add reformatted fields back
- dp = camel_dict_to_snake_dict(dp)
- return dp
-
-
-def diff_pipeline(client, module, objects, unique_id, dp_name):
- """Check if there's another pipeline with the same unique_id and if so, checks if the object needs to be updated
- """
- result = {}
- changed = False
- create_dp = False
-
- # See if there is already a pipeline with the same unique_id
- unique_id = build_unique_id(module)
- try:
- dp_id = pipeline_id(client, dp_name)
- dp_unique_id = to_text(pipeline_field(client, dp_id, field="uniqueId"))
- if dp_unique_id != unique_id:
- # A change is expected but not determined. Updated to a bool in create_pipeline().
- changed = "NEW_VERSION"
- create_dp = True
- # Unique ids are the same - check if pipeline needs modification
- else:
- dp_objects = client.get_pipeline_definition(pipelineId=dp_id)['pipelineObjects']
- # Definition needs to be updated
- if dp_objects != objects:
- changed, msg = define_pipeline(client, module, objects, dp_id)
- # No changes
- else:
- msg = 'Data Pipeline {0} is present'.format(dp_name)
- data_pipeline = get_result(client, dp_id)
- result = {'data_pipeline': data_pipeline,
- 'msg': msg}
- except DataPipelineNotFound:
- create_dp = True
-
- return create_dp, changed, result
-
-
-def define_pipeline(client, module, objects, dp_id):
- """Puts pipeline definition
-
- """
- dp_name = module.params.get('name')
-
- if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED":
- msg = 'Data Pipeline {0} is unable to be updated while in state FINISHED.'.format(dp_name)
- changed = False
-
- elif objects:
- parameters = module.params.get('parameters')
- values = module.params.get('values')
-
- try:
- client.put_pipeline_definition(pipelineId=dp_id,
- pipelineObjects=objects,
- parameterObjects=parameters,
- parameterValues=values)
- msg = 'Data Pipeline {0} has been updated.'.format(dp_name)
- changed = True
- except ClientError as e:
- module.fail_json(msg="Failed to put the definition for pipeline {0}. Check that string/reference fields"
- "are not empty and that the number of objects in the pipeline does not exceed maximum allowed"
- "objects".format(dp_name), exception=traceback.format_exc())
- else:
- changed = False
- msg = ""
-
- return changed, msg
-
-
-def create_pipeline(client, module):
- """Creates datapipeline. Uses uniqueId to achieve idempotency.
-
- """
- dp_name = module.params.get('name')
- objects = module.params.get('objects', None)
- description = module.params.get('description', '')
- tags = module.params.get('tags')
- timeout = module.params.get('timeout')
-
- unique_id = build_unique_id(module)
- create_dp, changed, result = diff_pipeline(client, module, objects, unique_id, dp_name)
-
- if changed == "NEW_VERSION":
- # delete old version
- changed, creation_result = delete_pipeline(client, module)
-
- # There isn't a pipeline or it has different parameters than the pipeline in existence.
- if create_dp:
- # Make pipeline
- try:
- tags = format_tags(tags)
- dp = client.create_pipeline(name=dp_name,
- uniqueId=unique_id,
- description=description,
- tags=tags)
- dp_id = dp['pipelineId']
- pipeline_exists_timeout(client, dp_id, timeout)
- except ClientError as e:
- module.fail_json(msg="Failed to create the data pipeline {0}.".format(dp_name), exception=traceback.format_exc())
- except TimeOutException:
- module.fail_json(msg=('Data Pipeline {0} failed to create'
- 'within timeout {1} seconds').format(dp_name, timeout))
- # Put pipeline definition
- changed, msg = define_pipeline(client, module, objects, dp_id)
-
- changed = True
- data_pipeline = get_result(client, dp_id)
- result = {'data_pipeline': data_pipeline,
- 'msg': 'Data Pipeline {0} created.'.format(dp_name) + msg}
-
- return (changed, result)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- name=dict(required=True),
- version=dict(removed_in_version='2.14'),
- description=dict(required=False, default=''),
- objects=dict(required=False, type='list', default=[]),
- parameters=dict(required=False, type='list', default=[]),
- timeout=dict(required=False, type='int', default=300),
- state=dict(default='present', choices=['present', 'absent',
- 'active', 'inactive']),
- tags=dict(required=False, type='dict', default={}),
- values=dict(required=False, type='list', default=[])
- )
- )
- module = AnsibleModule(argument_spec, supports_check_mode=False)
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required for the datapipeline module!')
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- if not region:
- module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
- client = boto3_conn(module, conn_type='client',
- resource='datapipeline', region=region,
- endpoint=ec2_url, **aws_connect_kwargs)
- except ClientError as e:
- module.fail_json(msg="Can't authorize connection - " + str(e))
-
- state = module.params.get('state')
- if state == 'present':
- changed, result = create_pipeline(client, module)
- elif state == 'absent':
- changed, result = delete_pipeline(client, module)
- elif state == 'active':
- changed, result = activate_pipeline(client, module)
- elif state == 'inactive':
- changed, result = deactivate_pipeline(client, module)
-
- module.exit_json(result=result, changed=changed)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/dms_endpoint.py b/lib/ansible/modules/cloud/amazon/dms_endpoint.py
deleted file mode 100644
index 992bf4df06..0000000000
--- a/lib/ansible/modules/cloud/amazon/dms_endpoint.py
+++ /dev/null
@@ -1,472 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: dms_endpoint
-short_description: Creates or destroys a data migration services endpoint
-description:
- - Creates or destroys a data migration services endpoint,
- that can be used to replicate data.
-version_added: "2.9"
-options:
- state:
- description:
- - State of the endpoint.
- default: present
- choices: ['present', 'absent']
- type: str
- endpointidentifier:
- description:
- - An identifier name for the endpoint.
- type: str
- required: true
- endpointtype:
- description:
- - Type of endpoint we want to manage.
- choices: ['source', 'target']
- type: str
- required: true
- enginename:
- description:
- - Database engine that we want to use, please refer to
- the AWS DMS for more information on the supported
- engines and their limitations.
- choices: ['mysql', 'oracle', 'postgres', 'mariadb', 'aurora',
- 'redshift', 's3', 'db2', 'azuredb', 'sybase',
- 'dynamodb', 'mongodb', 'sqlserver']
- type: str
- required: true
- username:
- description:
- - Username our endpoint will use to connect to the database.
- type: str
- password:
- description:
- - Password used to connect to the database
- this attribute can only be written
- the AWS API does not return this parameter.
- type: str
- servername:
- description:
- - Servername that the endpoint will connect to.
- type: str
- port:
- description:
- - TCP port for access to the database.
- type: int
- databasename:
- description:
- - Name for the database on the origin or target side.
- type: str
- extraconnectionattributes:
- description:
- - Extra attributes for the database connection, the AWS documentation
- states " For more information about extra connection attributes,
- see the documentation section for your data store."
- type: str
- kmskeyid:
- description:
- - Encryption key to use to encrypt replication storage and
- connection information.
- type: str
- tags:
- description:
- - A list of tags to add to the endpoint.
- type: dict
- certificatearn:
- description:
- - Amazon Resource Name (ARN) for the certificate.
- type: str
- sslmode:
- description:
- - Mode used for the SSL connection.
- default: none
- choices: ['none', 'require', 'verify-ca', 'verify-full']
- type: str
- serviceaccessrolearn:
- description:
- - Amazon Resource Name (ARN) for the service access role that you
- want to use to create the endpoint.
- type: str
- externaltabledefinition:
- description:
- - The external table definition.
- type: str
- dynamodbsettings:
- description:
- - Settings in JSON format for the target Amazon DynamoDB endpoint
- if source or target is dynamodb.
- type: dict
- s3settings:
- description:
- - S3 buckets settings for the target Amazon S3 endpoint.
- type: dict
- dmstransfersettings:
- description:
- - The settings in JSON format for the DMS transfer type of
- source endpoint.
- type: dict
- mongodbsettings:
- description:
- - Settings in JSON format for the source MongoDB endpoint.
- type: dict
- kinesissettings:
- description:
- - Settings in JSON format for the target Amazon Kinesis
- Data Streams endpoint.
- type: dict
- elasticsearchsettings:
- description:
- - Settings in JSON format for the target Elasticsearch endpoint.
- type: dict
- wait:
- description:
- - Whether Ansible should wait for the object to be deleted when I(state=absent).
- type: bool
- default: false
- timeout:
- description:
- - Time in seconds we should wait for when deleting a resource.
- - Required when I(wait=true).
- type: int
- retries:
- description:
- - number of times we should retry when deleting a resource
- - Required when I(wait=true).
- type: int
-author:
- - "Rui Moreira (@ruimoreira)"
-extends_documentation_fragment:
-- aws
-- ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details
-# Endpoint Creation
-- dms_endpoint:
- state: absent
- endpointidentifier: 'testsource'
- endpointtype: source
- enginename: aurora
- username: testing1
- password: testint1234
- servername: testing.domain.com
- port: 3306
- databasename: 'testdb'
- sslmode: none
- wait: false
-'''
-
-RETURN = ''' # '''
-__metaclass__ = type
-import traceback
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-backoff_params = dict(tries=5, delay=1, backoff=1.5)
-
-
-@AWSRetry.backoff(**backoff_params)
-def describe_endpoints(connection, endpoint_identifier):
- """ checks if the endpoint exists """
- try:
- endpoint_filter = dict(Name='endpoint-id',
- Values=[endpoint_identifier])
- return connection.describe_endpoints(Filters=[endpoint_filter])
- except botocore.exceptions.ClientError:
- return {'Endpoints': []}
-
-
-@AWSRetry.backoff(**backoff_params)
-def dms_delete_endpoint(client, **params):
- """deletes the DMS endpoint based on the EndpointArn"""
- if module.params.get('wait'):
- return delete_dms_endpoint(client)
- else:
- return client.delete_endpoint(**params)
-
-
-@AWSRetry.backoff(**backoff_params)
-def dms_create_endpoint(client, **params):
- """ creates the DMS endpoint"""
- return client.create_endpoint(**params)
-
-
-@AWSRetry.backoff(**backoff_params)
-def dms_modify_endpoint(client, **params):
- """ updates the endpoint"""
- return client.modify_endpoint(**params)
-
-
-@AWSRetry.backoff(**backoff_params)
-def get_endpoint_deleted_waiter(client):
- return client.get_waiter('endpoint_deleted')
-
-
-def endpoint_exists(endpoint):
- """ Returns boolean based on the existence of the endpoint
- :param endpoint: dict containing the described endpoint
- :return: bool
- """
- return bool(len(endpoint['Endpoints']))
-
-
-def delete_dms_endpoint(connection):
- try:
- endpoint = describe_endpoints(connection,
- module.params.get('endpointidentifier'))
- endpoint_arn = endpoint['Endpoints'][0].get('EndpointArn')
- delete_arn = dict(
- EndpointArn=endpoint_arn
- )
- if module.params.get('wait'):
-
- delete_output = connection.delete_endpoint(**delete_arn)
- delete_waiter = get_endpoint_deleted_waiter(connection)
- delete_waiter.wait(
- Filters=[{
- 'Name': 'endpoint-arn',
- 'Values': [endpoint_arn]
-
- }],
- WaiterConfig={
- 'Delay': module.params.get('timeout'),
- 'MaxAttempts': module.params.get('retries')
- }
- )
- return delete_output
- else:
- return connection.delete_endpoint(**delete_arn)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to delete the DMS endpoint.",
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Failed to delete the DMS endpoint.",
- exception=traceback.format_exc())
-
-
-def create_module_params():
- """
- Reads the module parameters and returns a dict
- :return: dict
- """
- endpoint_parameters = dict(
- EndpointIdentifier=module.params.get('endpointidentifier'),
- EndpointType=module.params.get('endpointtype'),
- EngineName=module.params.get('enginename'),
- Username=module.params.get('username'),
- Password=module.params.get('password'),
- ServerName=module.params.get('servername'),
- Port=module.params.get('port'),
- DatabaseName=module.params.get('databasename'),
- SslMode=module.params.get('sslmode')
- )
- if module.params.get('EndpointArn'):
- endpoint_parameters['EndpointArn'] = module.params.get('EndpointArn')
- if module.params.get('certificatearn'):
- endpoint_parameters['CertificateArn'] = \
- module.params.get('certificatearn')
-
- if module.params.get('dmstransfersettings'):
- endpoint_parameters['DmsTransferSettings'] = \
- module.params.get('dmstransfersettings')
-
- if module.params.get('extraconnectionattributes'):
- endpoint_parameters['ExtraConnectionAttributes'] =\
- module.params.get('extraconnectionattributes')
-
- if module.params.get('kmskeyid'):
- endpoint_parameters['KmsKeyId'] = module.params.get('kmskeyid')
-
- if module.params.get('tags'):
- endpoint_parameters['Tags'] = module.params.get('tags')
-
- if module.params.get('serviceaccessrolearn'):
- endpoint_parameters['ServiceAccessRoleArn'] = \
- module.params.get('serviceaccessrolearn')
-
- if module.params.get('externaltabledefinition'):
- endpoint_parameters['ExternalTableDefinition'] = \
- module.params.get('externaltabledefinition')
-
- if module.params.get('dynamodbsettings'):
- endpoint_parameters['DynamoDbSettings'] = \
- module.params.get('dynamodbsettings')
-
- if module.params.get('s3settings'):
- endpoint_parameters['S3Settings'] = module.params.get('s3settings')
-
- if module.params.get('mongodbsettings'):
- endpoint_parameters['MongoDbSettings'] = \
- module.params.get('mongodbsettings')
-
- if module.params.get('kinesissettings'):
- endpoint_parameters['KinesisSettings'] = \
- module.params.get('kinesissettings')
-
- if module.params.get('elasticsearchsettings'):
- endpoint_parameters['ElasticsearchSettings'] = \
- module.params.get('elasticsearchsettings')
-
- if module.params.get('wait'):
- endpoint_parameters['wait'] = module.boolean(module.params.get('wait'))
-
- if module.params.get('timeout'):
- endpoint_parameters['timeout'] = module.params.get('timeout')
-
- if module.params.get('retries'):
- endpoint_parameters['retries'] = module.params.get('retries')
-
- return endpoint_parameters
-
-
-def compare_params(param_described):
- """
- Compares the dict obtained from the describe DMS endpoint and
- what we are reading from the values in the template We can
- never compare the password as boto3's method for describing
- a DMS endpoint does not return the value for
- the password for security reasons ( I assume )
- """
- modparams = create_module_params()
- changed = False
- for paramname in modparams:
- if paramname == 'Password' or paramname in param_described \
- and param_described[paramname] == modparams[paramname] or \
- str(param_described[paramname]).lower() \
- == modparams[paramname]:
- pass
- else:
- changed = True
- return changed
-
-
-def modify_dms_endpoint(connection):
-
- try:
- params = create_module_params()
- return dms_modify_endpoint(connection, **params)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to update DMS endpoint.",
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Failed to update DMS endpoint.",
- exception=traceback.format_exc())
-
-
-def create_dms_endpoint(connection):
- """
- Function to create the dms endpoint
- :param connection: boto3 aws connection
- :return: information about the dms endpoint object
- """
-
- try:
- params = create_module_params()
- return dms_create_endpoint(connection, **params)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to create DMS endpoint.",
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Failed to create DMS endpoint.",
- exception=traceback.format_exc())
-
-
-def main():
- argument_spec = dict(
- state=dict(choices=['present', 'absent'], default='present'),
- endpointidentifier=dict(required=True),
- endpointtype=dict(choices=['source', 'target'], required=True),
- enginename=dict(choices=['mysql', 'oracle', 'postgres', 'mariadb',
- 'aurora', 'redshift', 's3', 'db2', 'azuredb',
- 'sybase', 'dynamodb', 'mongodb', 'sqlserver'],
- required=True),
- username=dict(),
- password=dict(no_log=True),
- servername=dict(),
- port=dict(type='int'),
- databasename=dict(),
- extraconnectionattributes=dict(),
- kmskeyid=dict(),
- tags=dict(type='dict'),
- certificatearn=dict(),
- sslmode=dict(choices=['none', 'require', 'verify-ca', 'verify-full'],
- default='none'),
- serviceaccessrolearn=dict(),
- externaltabledefinition=dict(),
- dynamodbsettings=dict(type='dict'),
- s3settings=dict(type='dict'),
- dmstransfersettings=dict(type='dict'),
- mongodbsettings=dict(type='dict'),
- kinesissettings=dict(type='dict'),
- elasticsearchsettings=dict(type='dict'),
- wait=dict(type='bool', default=False),
- timeout=dict(type='int'),
- retries=dict(type='int')
- )
- global module
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- required_if=[
- ["state", "absent", ["wait"]],
- ["wait", "True", ["timeout"]],
- ["wait", "True", ["retries"]],
- ],
- supports_check_mode=False
- )
- exit_message = None
- changed = False
-
- state = module.params.get('state')
-
- dmsclient = module.client('dms')
- endpoint = describe_endpoints(dmsclient,
- module.params.get('endpointidentifier'))
- if state == 'present':
- if endpoint_exists(endpoint):
- module.params['EndpointArn'] = \
- endpoint['Endpoints'][0].get('EndpointArn')
- params_changed = compare_params(endpoint["Endpoints"][0])
- if params_changed:
- updated_dms = modify_dms_endpoint(dmsclient)
- exit_message = updated_dms
- changed = True
- else:
- module.exit_json(changed=False, msg="Endpoint Already Exists")
- else:
- dms_properties = create_dms_endpoint(dmsclient)
- exit_message = dms_properties
- changed = True
- elif state == 'absent':
- if endpoint_exists(endpoint):
- delete_results = delete_dms_endpoint(dmsclient)
- exit_message = delete_results
- changed = True
- else:
- changed = False
- exit_message = 'DMS Endpoint does not exist'
-
- module.exit_json(changed=changed, msg=exit_message)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/dms_replication_subnet_group.py b/lib/ansible/modules/cloud/amazon/dms_replication_subnet_group.py
deleted file mode 100644
index 97d0567d01..0000000000
--- a/lib/ansible/modules/cloud/amazon/dms_replication_subnet_group.py
+++ /dev/null
@@ -1,238 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: dms_replication_subnet_group
-short_description: creates or destroys a data migration services subnet group
-description:
- - Creates or destroys a data migration services subnet group.
-version_added: "2.9"
-options:
- state:
- description:
- - State of the subnet group.
- default: present
- choices: ['present', 'absent']
- type: str
- identifier:
- description:
- - The name for the replication subnet group.
- This value is stored as a lowercase string.
- Must contain no more than 255 alphanumeric characters,
- periods, spaces, underscores, or hyphens. Must not be "default".
- type: str
- required: true
- description:
- description:
- - The description for the subnet group.
- type: str
- required: true
- subnet_ids:
- description:
- - A list containing the subnet ids for the replication subnet group,
- needs to be at least 2 items in the list.
- type: list
- elements: str
- required: true
-author:
- - "Rui Moreira (@ruimoreira)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- dms_replication_subnet_group:
- state: present
- identifier: "dev-sngroup"
- description: "Development Subnet Group asdasdas"
- subnet_ids: ['subnet-id1','subnet-id2']
-'''
-
-RETURN = ''' # '''
-
-import traceback
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-backoff_params = dict(tries=5, delay=1, backoff=1.5)
-
-
-@AWSRetry.backoff(**backoff_params)
-def describe_subnet_group(connection, subnet_group):
- """checks if instance exists"""
- try:
- subnet_group_filter = dict(Name='replication-subnet-group-id',
- Values=[subnet_group])
- return connection.describe_replication_subnet_groups(Filters=[subnet_group_filter])
- except botocore.exceptions.ClientError:
- return {'ReplicationSubnetGroups': []}
-
-
-@AWSRetry.backoff(**backoff_params)
-def replication_subnet_group_create(connection, **params):
- """ creates the replication subnet group """
- return connection.create_replication_subnet_group(**params)
-
-
-@AWSRetry.backoff(**backoff_params)
-def replication_subnet_group_modify(connection, **modify_params):
- return connection.modify_replication_subnet_group(**modify_params)
-
-
-@AWSRetry.backoff(**backoff_params)
-def replication_subnet_group_delete(module, connection):
- subnetid = module.params.get('identifier')
- delete_parameters = dict(ReplicationSubnetGroupIdentifier=subnetid)
- return connection.delete_replication_subnet_group(**delete_parameters)
-
-
-def replication_subnet_exists(subnet):
- """ Returns boolean based on the existence of the endpoint
- :param endpoint: dict containing the described endpoint
- :return: bool
- """
- return bool(len(subnet['ReplicationSubnetGroups']))
-
-
-def create_module_params(module):
- """
- Reads the module parameters and returns a dict
- :return: dict
- """
- instance_parameters = dict(
- # ReplicationSubnetGroupIdentifier gets translated to lower case anyway by the API
- ReplicationSubnetGroupIdentifier=module.params.get('identifier').lower(),
- ReplicationSubnetGroupDescription=module.params.get('description'),
- SubnetIds=module.params.get('subnet_ids'),
- )
-
- return instance_parameters
-
-
-def compare_params(module, param_described):
- """
- Compares the dict obtained from the describe function and
- what we are reading from the values in the template We can
- never compare passwords as boto3's method for describing
- a DMS endpoint does not return the value for
- the password for security reasons ( I assume )
- """
- modparams = create_module_params(module)
- changed = False
- # need to sanitize values that get returned from the API
- if 'VpcId' in param_described.keys():
- param_described.pop('VpcId')
- if 'SubnetGroupStatus' in param_described.keys():
- param_described.pop('SubnetGroupStatus')
- for paramname in modparams.keys():
- if paramname in param_described.keys() and \
- param_described.get(paramname) == modparams[paramname]:
- pass
- elif paramname == 'SubnetIds':
- subnets = []
- for subnet in param_described.get('Subnets'):
- subnets.append(subnet.get('SubnetIdentifier'))
- for modulesubnet in modparams['SubnetIds']:
- if modulesubnet in subnets:
- pass
- else:
- changed = True
- return changed
-
-
-def create_replication_subnet_group(module, connection):
- try:
- params = create_module_params(module)
- return replication_subnet_group_create(connection, **params)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to create DMS replication subnet group.",
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Failed to create DMS replication subnet group.",
- exception=traceback.format_exc())
-
-
-def modify_replication_subnet_group(module, connection):
- try:
- modify_params = create_module_params(module)
- return replication_subnet_group_modify(connection, **modify_params)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to Modify the DMS replication subnet group.",
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Failed to Modify the DMS replication subnet group.",
- exception=traceback.format_exc())
-
-
-def main():
- argument_spec = dict(
- state=dict(type='str', choices=['present', 'absent'], default='present'),
- identifier=dict(type='str', required=True),
- description=dict(type='str', required=True),
- subnet_ids=dict(type='list', elements='str', required=True),
- )
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
- exit_message = None
- changed = False
-
- state = module.params.get('state')
- dmsclient = module.client('dms')
- subnet_group = describe_subnet_group(dmsclient,
- module.params.get('identifier'))
- if state == 'present':
- if replication_subnet_exists(subnet_group):
- if compare_params(module, subnet_group["ReplicationSubnetGroups"][0]):
- if not module.check_mode:
- exit_message = modify_replication_subnet_group(module, dmsclient)
- else:
- exit_message = dmsclient
- changed = True
- else:
- exit_message = "No changes to Subnet group"
- else:
- if not module.check_mode:
- exit_message = create_replication_subnet_group(module, dmsclient)
- changed = True
- else:
- exit_message = "Check mode enabled"
-
- elif state == 'absent':
- if replication_subnet_exists(subnet_group):
- if not module.check_mode:
- replication_subnet_group_delete(module, dmsclient)
- changed = True
- exit_message = "Replication subnet group Deleted"
- else:
- exit_message = dmsclient
- changed = True
-
- else:
- changed = False
- exit_message = "Replication subnet group does not exist"
-
- module.exit_json(changed=changed, msg=exit_message)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/dynamodb_table.py b/lib/ansible/modules/cloud/amazon/dynamodb_table.py
deleted file mode 100644
index 4e4a863643..0000000000
--- a/lib/ansible/modules/cloud/amazon/dynamodb_table.py
+++ /dev/null
@@ -1,522 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: dynamodb_table
-short_description: Create, update or delete AWS Dynamo DB tables
-version_added: "2.0"
-description:
- - Create or delete AWS Dynamo DB tables.
- - Can update the provisioned throughput on existing tables.
- - Returns the status of the specified table.
-author: Alan Loi (@loia)
-requirements:
- - "boto >= 2.37.0"
- - "boto3 >= 1.4.4 (for tagging)"
-options:
- state:
- description:
- - Create or delete the table.
- choices: ['present', 'absent']
- default: 'present'
- type: str
- name:
- description:
- - Name of the table.
- required: true
- type: str
- hash_key_name:
- description:
- - Name of the hash key.
- - Required when C(state=present).
- type: str
- hash_key_type:
- description:
- - Type of the hash key.
- choices: ['STRING', 'NUMBER', 'BINARY']
- default: 'STRING'
- type: str
- range_key_name:
- description:
- - Name of the range key.
- type: str
- range_key_type:
- description:
- - Type of the range key.
- choices: ['STRING', 'NUMBER', 'BINARY']
- default: 'STRING'
- type: str
- read_capacity:
- description:
- - Read throughput capacity (units) to provision.
- default: 1
- type: int
- write_capacity:
- description:
- - Write throughput capacity (units) to provision.
- default: 1
- type: int
- indexes:
- description:
- - list of dictionaries describing indexes to add to the table. global indexes can be updated. local indexes don't support updates or have throughput.
- - "required options: ['name', 'type', 'hash_key_name']"
- - "other options: ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']"
- suboptions:
- name:
- description: The name of the index.
- type: str
- required: true
- type:
- description:
- - The type of index.
- - "Valid types: C(all), C(global_all), C(global_include), C(global_keys_only), C(include), C(keys_only)"
- type: str
- required: true
- hash_key_name:
- description: The name of the hash-based key.
- required: true
- type: str
- hash_key_type:
- description: The type of the hash-based key.
- type: str
- range_key_name:
- description: The name of the range-based key.
- type: str
- range_key_type:
- type: str
- description: The type of the range-based key.
- includes:
- type: list
- description: A list of fields to include when using C(global_include) or C(include) indexes.
- read_capacity:
- description:
- - Read throughput capacity (units) to provision for the index.
- type: int
- write_capacity:
- description:
- - Write throughput capacity (units) to provision for the index.
- type: int
- default: []
- version_added: "2.1"
- type: list
- elements: dict
- tags:
- version_added: "2.4"
- description:
- - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag.
- - 'For example: C({"key":"value"}) and C({"key":"value","key2":"value2"})'
- type: dict
- wait_for_active_timeout:
- version_added: "2.4"
- description:
- - how long before wait gives up, in seconds. only used when tags is set
- default: 60
- type: int
-extends_documentation_fragment:
- - aws
- - ec2
-"""
-
-EXAMPLES = '''
-# Create dynamo table with hash and range primary key
-- dynamodb_table:
- name: my-table
- region: us-east-1
- hash_key_name: id
- hash_key_type: STRING
- range_key_name: create_time
- range_key_type: NUMBER
- read_capacity: 2
- write_capacity: 2
- tags:
- tag_name: tag_value
-
-# Update capacity on existing dynamo table
-- dynamodb_table:
- name: my-table
- region: us-east-1
- read_capacity: 10
- write_capacity: 10
-
-# set index on existing dynamo table
-- dynamodb_table:
- name: my-table
- region: us-east-1
- indexes:
- - name: NamedIndex
- type: global_include
- hash_key_name: id
- range_key_name: create_time
- includes:
- - other_field
- - other_field2
- read_capacity: 10
- write_capacity: 10
-
-# Delete dynamo table
-- dynamodb_table:
- name: my-table
- region: us-east-1
- state: absent
-'''
-
-RETURN = '''
-table_status:
- description: The current status of the table.
- returned: success
- type: str
- sample: ACTIVE
-'''
-
-import time
-import traceback
-
-try:
- import boto
- import boto.dynamodb2
- from boto.dynamodb2.table import Table
- from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex, GlobalAllIndex, GlobalIncludeIndex, GlobalKeysOnlyIndex, IncludeIndex, KeysOnlyIndex
- from boto.dynamodb2.types import STRING, NUMBER, BINARY
- from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError
- from boto.dynamodb2.exceptions import ValidationException
- HAS_BOTO = True
-
- DYNAMO_TYPE_MAP = {
- 'STRING': STRING,
- 'NUMBER': NUMBER,
- 'BINARY': BINARY
- }
-
-except ImportError:
- HAS_BOTO = False
-
-try:
- import botocore
- from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_conn
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
-
-
-DYNAMO_TYPE_DEFAULT = 'STRING'
-INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name']
-INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']
-INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']
-
-
-def create_or_update_dynamo_table(connection, module, boto3_dynamodb=None, boto3_sts=None, region=None):
- table_name = module.params.get('name')
- hash_key_name = module.params.get('hash_key_name')
- hash_key_type = module.params.get('hash_key_type')
- range_key_name = module.params.get('range_key_name')
- range_key_type = module.params.get('range_key_type')
- read_capacity = module.params.get('read_capacity')
- write_capacity = module.params.get('write_capacity')
- all_indexes = module.params.get('indexes')
- tags = module.params.get('tags')
- wait_for_active_timeout = module.params.get('wait_for_active_timeout')
-
- for index in all_indexes:
- validate_index(index, module)
-
- schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type)
-
- throughput = {
- 'read': read_capacity,
- 'write': write_capacity
- }
-
- indexes, global_indexes = get_indexes(all_indexes)
-
- result = dict(
- region=region,
- table_name=table_name,
- hash_key_name=hash_key_name,
- hash_key_type=hash_key_type,
- range_key_name=range_key_name,
- range_key_type=range_key_type,
- read_capacity=read_capacity,
- write_capacity=write_capacity,
- indexes=all_indexes,
- )
-
- try:
- table = Table(table_name, connection=connection)
-
- if dynamo_table_exists(table):
- result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
- else:
- if not module.check_mode:
- Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes)
- result['changed'] = True
-
- if not module.check_mode:
- result['table_status'] = table.describe()['Table']['TableStatus']
-
- if tags:
- # only tables which are active can be tagged
- wait_until_table_active(module, table, wait_for_active_timeout)
- account_id = get_account_id(boto3_sts)
- boto3_dynamodb.tag_resource(
- ResourceArn='arn:aws:dynamodb:' +
- region +
- ':' +
- account_id +
- ':table/' +
- table_name,
- Tags=ansible_dict_to_boto3_tag_list(tags))
- result['tags'] = tags
-
- except BotoServerError:
- result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
-
-def get_account_id(boto3_sts):
- return boto3_sts.get_caller_identity()["Account"]
-
-
-def wait_until_table_active(module, table, wait_timeout):
- max_wait_time = time.time() + wait_timeout
- while (max_wait_time > time.time()) and (table.describe()['Table']['TableStatus'] != 'ACTIVE'):
- time.sleep(5)
- if max_wait_time <= time.time():
- # waiting took too long
- module.fail_json(msg="timed out waiting for table to exist")
-
-
-def delete_dynamo_table(connection, module):
- table_name = module.params.get('name')
-
- result = dict(
- region=module.params.get('region'),
- table_name=table_name,
- )
-
- try:
- table = Table(table_name, connection=connection)
-
- if dynamo_table_exists(table):
- if not module.check_mode:
- table.delete()
- result['changed'] = True
-
- else:
- result['changed'] = False
-
- except BotoServerError:
- result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc()
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
-
-def dynamo_table_exists(table):
- try:
- table.describe()
- return True
-
- except JSONResponseError as e:
- if e.message and e.message.startswith('Requested resource not found'):
- return False
- else:
- raise e
-
-
-def update_dynamo_table(table, throughput=None, check_mode=False, global_indexes=None):
- table.describe() # populate table details
- throughput_changed = False
- global_indexes_changed = False
- if has_throughput_changed(table, throughput):
- if not check_mode:
- throughput_changed = table.update(throughput=throughput)
- else:
- throughput_changed = True
-
- removed_indexes, added_indexes, index_throughput_changes = get_changed_global_indexes(table, global_indexes)
- if removed_indexes:
- if not check_mode:
- for name, index in removed_indexes.items():
- global_indexes_changed = table.delete_global_secondary_index(name) or global_indexes_changed
- else:
- global_indexes_changed = True
-
- if added_indexes:
- if not check_mode:
- for name, index in added_indexes.items():
- global_indexes_changed = table.create_global_secondary_index(global_index=index) or global_indexes_changed
- else:
- global_indexes_changed = True
-
- if index_throughput_changes:
- if not check_mode:
- # todo: remove try once boto has https://github.com/boto/boto/pull/3447 fixed
- try:
- global_indexes_changed = table.update_global_secondary_index(global_indexes=index_throughput_changes) or global_indexes_changed
- except ValidationException:
- pass
- else:
- global_indexes_changed = True
-
- return throughput_changed or global_indexes_changed
-
-
-def has_throughput_changed(table, new_throughput):
- if not new_throughput:
- return False
-
- return new_throughput['read'] != table.throughput['read'] or \
- new_throughput['write'] != table.throughput['write']
-
-
-def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type):
- if range_key_name:
- schema = [
- HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])),
- RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
- ]
- else:
- schema = [
- HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
- ]
- return schema
-
-
-def get_changed_global_indexes(table, global_indexes):
- table.describe()
-
- table_index_info = dict((index.name, index.schema()) for index in table.global_indexes)
- table_index_objects = dict((index.name, index) for index in table.global_indexes)
- set_index_info = dict((index.name, index.schema()) for index in global_indexes)
- set_index_objects = dict((index.name, index) for index in global_indexes)
-
- removed_indexes = dict((name, index) for name, index in table_index_info.items() if name not in set_index_info)
- added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.items() if name not in table_index_info)
- # todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed
- # for name, index in set_index_objects.items():
- # if (name not in added_indexes and
- # (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or
- # index.throughput['write'] != str(table_index_objects[name].throughput['write']))):
- # index_throughput_changes[name] = index.throughput
- # todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed
- index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.items() if name not in added_indexes)
-
- return removed_indexes, added_indexes, index_throughput_changes
-
-
-def validate_index(index, module):
- for key, val in index.items():
- if key not in INDEX_OPTIONS:
- module.fail_json(msg='%s is not a valid option for an index' % key)
- for required_option in INDEX_REQUIRED_OPTIONS:
- if required_option not in index:
- module.fail_json(msg='%s is a required option for an index' % required_option)
- if index['type'] not in INDEX_TYPE_OPTIONS:
- module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
-
-
-def get_indexes(all_indexes):
- indexes = []
- global_indexes = []
- for index in all_indexes:
- name = index['name']
- schema = get_schema_param(index.get('hash_key_name'), index.get('hash_key_type'), index.get('range_key_name'), index.get('range_key_type'))
- throughput = {
- 'read': index.get('read_capacity', 1),
- 'write': index.get('write_capacity', 1)
- }
-
- if index['type'] == 'all':
- indexes.append(AllIndex(name, parts=schema))
-
- elif index['type'] == 'global_all':
- global_indexes.append(GlobalAllIndex(name, parts=schema, throughput=throughput))
-
- elif index['type'] == 'global_include':
- global_indexes.append(GlobalIncludeIndex(name, parts=schema, throughput=throughput, includes=index['includes']))
-
- elif index['type'] == 'global_keys_only':
- global_indexes.append(GlobalKeysOnlyIndex(name, parts=schema, throughput=throughput))
-
- elif index['type'] == 'include':
- indexes.append(IncludeIndex(name, parts=schema, includes=index['includes']))
-
- elif index['type'] == 'keys_only':
- indexes.append(KeysOnlyIndex(name, parts=schema))
-
- return indexes, global_indexes
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(default='present', choices=['present', 'absent']),
- name=dict(required=True, type='str'),
- hash_key_name=dict(type='str'),
- hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
- range_key_name=dict(type='str'),
- range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
- read_capacity=dict(default=1, type='int'),
- write_capacity=dict(default=1, type='int'),
- indexes=dict(default=[], type='list'),
- tags=dict(type='dict'),
- wait_for_active_timeout=dict(default=60, type='int'),
- ))
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True)
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- if not HAS_BOTO3 and module.params.get('tags'):
- module.fail_json(msg='boto3 required when using tags for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
- if not region:
- module.fail_json(msg='region must be specified')
-
- try:
- connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
- except (NoAuthHandlerFound, AnsibleAWSError) as e:
- module.fail_json(msg=str(e))
-
- if module.params.get('tags'):
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- boto3_dynamodb = boto3_conn(module, conn_type='client', resource='dynamodb', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- if not hasattr(boto3_dynamodb, 'tag_resource'):
- module.fail_json(msg='boto3 connection does not have tag_resource(), likely due to using an old version')
- boto3_sts = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc())
- else:
- boto3_dynamodb = None
- boto3_sts = None
-
- state = module.params.get('state')
- if state == 'present':
- create_or_update_dynamo_table(connection, module, boto3_dynamodb, boto3_sts, region)
- elif state == 'absent':
- delete_dynamo_table(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/dynamodb_ttl.py b/lib/ansible/modules/cloud/amazon/dynamodb_ttl.py
deleted file mode 100644
index 1096d9a4de..0000000000
--- a/lib/ansible/modules/cloud/amazon/dynamodb_ttl.py
+++ /dev/null
@@ -1,174 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: dynamodb_ttl
-short_description: Set TTL for a given DynamoDB table
-description:
-- Uses boto3 to set TTL.
-- Requires botocore version 1.5.24 or higher.
-version_added: "2.4"
-options:
- state:
- description:
- - State to set DynamoDB table to.
- choices: ['enable', 'disable']
- required: false
- type: str
- table_name:
- description:
- - Name of the DynamoDB table to work on.
- required: true
- type: str
- attribute_name:
- description:
- - The name of the Time To Live attribute used to store the expiration time for items in the table.
- - This appears to be required by the API even when disabling TTL.
- required: true
- type: str
-
-author: Ted Timmons (@tedder)
-extends_documentation_fragment:
-- aws
-- ec2
-requirements: [ botocore>=1.5.24, boto3 ]
-'''
-
-EXAMPLES = '''
-- name: enable TTL on my cowfacts table
- dynamodb_ttl:
- state: enable
- table_name: cowfacts
- attribute_name: cow_deleted_date
-
-- name: disable TTL on my cowfacts table
- dynamodb_ttl:
- state: disable
- table_name: cowfacts
- attribute_name: cow_deleted_date
-'''
-
-RETURN = '''
-current_status:
- description: current or new TTL specification.
- type: dict
- returned: always
- sample:
- - { "AttributeName": "deploy_timestamp", "TimeToLiveStatus": "ENABLED" }
- - { "AttributeName": "deploy_timestamp", "Enabled": true }
-'''
-
-import distutils.version
-import traceback
-
-try:
- import botocore
-except ImportError:
- pass
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info
-
-
-def get_current_ttl_state(c, table_name):
- '''Fetch the state dict for a table.'''
- current_state = c.describe_time_to_live(TableName=table_name)
- return current_state.get('TimeToLiveDescription')
-
-
-def does_state_need_changing(attribute_name, desired_state, current_spec):
- '''Run checks to see if the table needs to be modified. Basically a dirty check.'''
- if not current_spec:
- # we don't have an entry (or a table?)
- return True
-
- if desired_state.lower() == 'enable' and current_spec.get('TimeToLiveStatus') not in ['ENABLING', 'ENABLED']:
- return True
- if desired_state.lower() == 'disable' and current_spec.get('TimeToLiveStatus') not in ['DISABLING', 'DISABLED']:
- return True
- if attribute_name != current_spec.get('AttributeName'):
- return True
-
- return False
-
-
-def set_ttl_state(c, table_name, state, attribute_name):
- '''Set our specification. Returns the update_time_to_live specification dict,
- which is different than the describe_* call.'''
- is_enabled = False
- if state.lower() == 'enable':
- is_enabled = True
-
- ret = c.update_time_to_live(
- TableName=table_name,
- TimeToLiveSpecification={
- 'Enabled': is_enabled,
- 'AttributeName': attribute_name
- }
- )
-
- return ret.get('TimeToLiveSpecification')
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(choices=['enable', 'disable']),
- table_name=dict(required=True),
- attribute_name=dict(required=True))
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- )
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
- elif distutils.version.StrictVersion(botocore.__version__) < distutils.version.StrictVersion('1.5.24'):
- # TTL was added in this version.
- module.fail_json(msg='Found botocore in version {0}, but >= {1} is required for TTL support'.format(botocore.__version__, '1.5.24'))
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- dbclient = boto3_conn(module, conn_type='client', resource='dynamodb', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg=str(e))
-
- result = {'changed': False}
- state = module.params['state']
-
- # wrap all our calls to catch the standard exceptions. We don't pass `module` in to the
- # methods so it's easier to do here.
- try:
- current_state = get_current_ttl_state(dbclient, module.params['table_name'])
-
- if does_state_need_changing(module.params['attribute_name'], module.params['state'], current_state):
- # changes needed
- new_state = set_ttl_state(dbclient, module.params['table_name'], module.params['state'], module.params['attribute_name'])
- result['current_status'] = new_state
- result['changed'] = True
- else:
- # no changes needed
- result['current_status'] = current_state
-
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.ParamValidationError as e:
- module.fail_json(msg=e.message, exception=traceback.format_exc())
- except ValueError as e:
- module.fail_json(msg=str(e))
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_ami_copy.py b/lib/ansible/modules/cloud/amazon/ec2_ami_copy.py
deleted file mode 100644
index fdb59659f7..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_ami_copy.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_ami_copy
-short_description: copies AMI between AWS regions, return new image id
-description:
- - Copies AMI from a source region to a destination region. B(Since version 2.3 this module depends on boto3.)
-version_added: "2.0"
-options:
- source_region:
- description:
- - The source region the AMI should be copied from.
- required: true
- type: str
- source_image_id:
- description:
- - The ID of the AMI in source region that should be copied.
- required: true
- type: str
- name:
- description:
- - The name of the new AMI to copy. (As of 2.3 the default is 'default', in prior versions it was 'null'.)
- default: "default"
- type: str
- description:
- description:
- - An optional human-readable string describing the contents and purpose of the new AMI.
- type: str
- encrypted:
- description:
- - Whether or not the destination snapshots of the copied AMI should be encrypted.
- version_added: "2.2"
- type: bool
- kms_key_id:
- description:
- - KMS key id used to encrypt the image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
- version_added: "2.2"
- type: str
- wait:
- description:
- - Wait for the copied AMI to be in state 'available' before returning.
- type: bool
- default: 'no'
- wait_timeout:
- description:
- - How long before wait gives up, in seconds. Prior to 2.3 the default was 1200.
- - From 2.3-2.5 this option was deprecated in favor of boto3 waiter defaults.
- This was reenabled in 2.6 to allow timeouts greater than 10 minutes.
- default: 600
- type: int
- tags:
- description:
- - 'A hash/dictionary of tags to add to the new copied AMI: C({"key":"value"}) and C({"key":"value","key":"value"})'
- type: dict
- tag_equality:
- description:
- - Whether to use tags if the source AMI already exists in the target region. If this is set, and all tags match
- in an existing AMI, the AMI will not be copied again.
- default: false
- type: bool
- version_added: 2.6
-author:
-- Amir Moulavi (@amir343) <amir.moulavi@gmail.com>
-- Tim C (@defunctio) <defunct@defunct.io>
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - boto3
-'''
-
-EXAMPLES = '''
-# Basic AMI Copy
-- ec2_ami_copy:
- source_region: us-east-1
- region: eu-west-1
- source_image_id: ami-xxxxxxx
-
-# AMI copy wait until available
-- ec2_ami_copy:
- source_region: us-east-1
- region: eu-west-1
- source_image_id: ami-xxxxxxx
- wait: yes
- wait_timeout: 1200 # Default timeout is 600
- register: image_id
-
-# Named AMI copy
-- ec2_ami_copy:
- source_region: us-east-1
- region: eu-west-1
- source_image_id: ami-xxxxxxx
- name: My-Awesome-AMI
- description: latest patch
-
-# Tagged AMI copy (will not copy the same AMI twice)
-- ec2_ami_copy:
- source_region: us-east-1
- region: eu-west-1
- source_image_id: ami-xxxxxxx
- tags:
- Name: My-Super-AMI
- Patch: 1.2.3
- tag_equality: yes
-
-# Encrypted AMI copy
-- ec2_ami_copy:
- source_region: us-east-1
- region: eu-west-1
- source_image_id: ami-xxxxxxx
- encrypted: yes
-
-# Encrypted AMI copy with specified key
-- ec2_ami_copy:
- source_region: us-east-1
- region: eu-west-1
- source_image_id: ami-xxxxxxx
- encrypted: yes
- kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
-'''
-
-RETURN = '''
-image_id:
- description: AMI ID of the copied AMI
- returned: always
- type: str
- sample: ami-e689729e
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list
-from ansible.module_utils._text import to_native
-
-try:
- from botocore.exceptions import ClientError, NoCredentialsError, WaiterError, BotoCoreError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def copy_image(module, ec2):
- """
- Copies an AMI
-
- module : AnsibleModule object
- ec2: ec2 connection object
- """
-
- image = None
- changed = False
- tags = module.params.get('tags')
-
- params = {'SourceRegion': module.params.get('source_region'),
- 'SourceImageId': module.params.get('source_image_id'),
- 'Name': module.params.get('name'),
- 'Description': module.params.get('description'),
- 'Encrypted': module.params.get('encrypted'),
- }
- if module.params.get('kms_key_id'):
- params['KmsKeyId'] = module.params.get('kms_key_id')
-
- try:
- if module.params.get('tag_equality'):
- filters = [{'Name': 'tag:%s' % k, 'Values': [v]} for (k, v) in module.params.get('tags').items()]
- filters.append(dict(Name='state', Values=['available', 'pending']))
- images = ec2.describe_images(Filters=filters)
- if len(images['Images']) > 0:
- image = images['Images'][0]
- if not image:
- image = ec2.copy_image(**params)
- image_id = image['ImageId']
- if tags:
- ec2.create_tags(Resources=[image_id],
- Tags=ansible_dict_to_boto3_tag_list(tags))
- changed = True
-
- if module.params.get('wait'):
- delay = 15
- max_attempts = module.params.get('wait_timeout') // delay
- image_id = image.get('ImageId')
- ec2.get_waiter('image_available').wait(
- ImageIds=[image_id],
- WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
- )
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(image))
- except WaiterError as e:
- module.fail_json_aws(e, msg='An error occurred waiting for the image to become available')
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Could not copy AMI")
- except Exception as e:
- module.fail_json(msg='Unhandled exception. (%s)' % to_native(e))
-
-
-def main():
- argument_spec = dict(
- source_region=dict(required=True),
- source_image_id=dict(required=True),
- name=dict(default='default'),
- description=dict(default=''),
- encrypted=dict(type='bool', default=False, required=False),
- kms_key_id=dict(type='str', required=False),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=600),
- tags=dict(type='dict'),
- tag_equality=dict(type='bool', default=False))
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
- # TODO: Check botocore version
- ec2 = module.client('ec2')
- copy_image(module, ec2)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_asg.py b/lib/ansible/modules/cloud/amazon/ec2_asg.py
deleted file mode 100644
index 9c72e11d52..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_asg.py
+++ /dev/null
@@ -1,1831 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: ec2_asg
-short_description: Create or delete AWS AutoScaling Groups (ASGs)
-description:
- - Can create or delete AWS AutoScaling Groups.
- - Can be used with the M(ec2_lc) module to manage Launch Configurations.
-version_added: "1.6"
-author: "Gareth Rushgrove (@garethr)"
-requirements: [ "boto3", "botocore" ]
-options:
- state:
- description:
- - Register or deregister the instance.
- choices: ['present', 'absent']
- default: present
- type: str
- name:
- description:
- - Unique name for group to be created or deleted.
- required: true
- type: str
- load_balancers:
- description:
- - List of ELB names to use for the group. Use for classic load balancers.
- type: list
- elements: str
- target_group_arns:
- description:
- - List of target group ARNs to use for the group. Use for application load balancers.
- version_added: "2.4"
- type: list
- elements: str
- availability_zones:
- description:
- - List of availability zone names in which to create the group.
- - Defaults to all the availability zones in the region if I(vpc_zone_identifier) is not set.
- type: list
- elements: str
- launch_config_name:
- description:
- - Name of the Launch configuration to use for the group. See the M(ec2_lc) module for managing these.
- - If unspecified then the current group value will be used. One of I(launch_config_name) or I(launch_template) must be provided.
- type: str
- launch_template:
- description:
- - Dictionary describing the Launch Template to use
- suboptions:
- version:
- description:
- - The version number of the launch template to use.
- - Defaults to latest version if not provided.
- type: str
- launch_template_name:
- description:
- - The name of the launch template. Only one of I(launch_template_name) or I(launch_template_id) is required.
- type: str
- launch_template_id:
- description:
- - The id of the launch template. Only one of I(launch_template_name) or I(launch_template_id) is required.
- type: str
- type: dict
- version_added: "2.8"
- min_size:
- description:
- - Minimum number of instances in group, if unspecified then the current group value will be used.
- type: int
- max_size:
- description:
- - Maximum number of instances in group, if unspecified then the current group value will be used.
- type: int
- max_instance_lifetime:
- description:
- - The maximum amount of time, in seconds, that an instance can be in service.
- - Maximum instance lifetime must be equal to 0, between 604800 and 31536000 seconds (inclusive), or not specified.
- - Value of 0 removes lifetime restriction.
- version_added: "2.10"
- type: int
- mixed_instances_policy:
- description:
- - A mixed instance policy to use for the ASG.
- - Only used when the ASG is configured to use a Launch Template (I(launch_template)).
- - 'See also U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-mixedinstancespolicy.html)'
- required: false
- version_added: "2.10"
- suboptions:
- instance_types:
- description:
- - A list of instance_types.
- type: list
- elements: str
- type: dict
- placement_group:
- description:
- - Physical location of your cluster placement group created in Amazon EC2.
- version_added: "2.3"
- type: str
- desired_capacity:
- description:
- - Desired number of instances in group, if unspecified then the current group value will be used.
- type: int
- replace_all_instances:
- description:
- - In a rolling fashion, replace all instances that used the old launch configuration with one from the new launch configuration.
- It increases the ASG size by I(replace_batch_size), waits for the new instances to be up and running.
- After that, it terminates a batch of old instances, waits for the replacements, and repeats, until all old instances are replaced.
- Once that's done the ASG size is reduced back to the expected size.
- version_added: "1.8"
- default: false
- type: bool
- replace_batch_size:
- description:
- - Number of instances you'd like to replace at a time. Used with I(replace_all_instances).
- required: false
- version_added: "1.8"
- default: 1
- type: int
- replace_instances:
- description:
- - List of I(instance_ids) belonging to the named AutoScalingGroup that you would like to terminate and be replaced with instances
- matching the current launch configuration.
- version_added: "1.8"
- type: list
- elements: str
- lc_check:
- description:
- - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current I(launch_config).
- version_added: "1.8"
- default: true
- type: bool
- lt_check:
- description:
- - Check to make sure instances that are being replaced with I(replace_instances) do not already have the current
- I(launch_template or I(launch_template) I(version).
- version_added: "2.8"
- default: true
- type: bool
- vpc_zone_identifier:
- description:
- - List of VPC subnets to use
- type: list
- elements: str
- tags:
- description:
- - A list of tags to add to the Auto Scale Group.
- - Optional key is I(propagate_at_launch), which defaults to true.
- - When I(propagate_at_launch) is true the tags will be propagated to the Instances created.
- version_added: "1.7"
- type: list
- elements: dict
- health_check_period:
- description:
- - Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
- required: false
- default: 300
- version_added: "1.7"
- type: int
- health_check_type:
- description:
- - The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
- required: false
- default: EC2
- version_added: "1.7"
- choices: ['EC2', 'ELB']
- type: str
- default_cooldown:
- description:
- - The number of seconds after a scaling activity completes before another can begin.
- default: 300
- version_added: "2.0"
- type: int
- wait_timeout:
- description:
- - How long to wait for instances to become viable when replaced. If you experience the error "Waited too long for ELB instances to be healthy",
- try increasing this value.
- default: 300
- type: int
- version_added: "1.8"
- wait_for_instances:
- description:
- - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all
- instances have a lifecycle_state of "InService" and a health_status of "Healthy".
- version_added: "1.9"
- default: true
- type: bool
- termination_policies:
- description:
- - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- - Using I(termination_policies=Default) when modifying an existing AutoScalingGroup will result in the existing policy being retained
- instead of changed to C(Default).
- - 'Valid values include: C(Default), C(OldestInstance), C(NewestInstance), C(OldestLaunchConfiguration), C(ClosestToNextInstanceHour)'
- - 'Full documentation of valid values can be found in the AWS documentation:'
- - 'U(https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#custom-termination-policy)'
- default: Default
- version_added: "2.0"
- type: list
- elements: str
- notification_topic:
- description:
- - A SNS topic ARN to send auto scaling notifications to.
- version_added: "2.2"
- type: str
- notification_types:
- description:
- - A list of auto scaling events to trigger notifications on.
- default:
- - 'autoscaling:EC2_INSTANCE_LAUNCH'
- - 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR'
- - 'autoscaling:EC2_INSTANCE_TERMINATE'
- - 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
- required: false
- version_added: "2.2"
- type: list
- elements: str
- suspend_processes:
- description:
- - A list of scaling processes to suspend.
- - 'Valid values include:'
- - C(Launch), C(Terminate), C(HealthCheck), C(ReplaceUnhealthy), C(AZRebalance), C(AlarmNotification), C(ScheduledActions), C(AddToLoadBalancer)
- - 'Full documentation of valid values can be found in the AWS documentation:'
- - 'U(https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)'
- default: []
- version_added: "2.3"
- type: list
- elements: str
- metrics_collection:
- description:
- - Enable ASG metrics collection.
- type: bool
- default: false
- version_added: "2.6"
- metrics_granularity:
- description:
- - When I(metrics_collection=true) this will determine the granularity of metrics collected by CloudWatch.
- default: "1Minute"
- version_added: "2.6"
- type: str
- metrics_list:
- description:
- - List of autoscaling metrics to collect when I(metrics_collection=true).
- default:
- - 'GroupMinSize'
- - 'GroupMaxSize'
- - 'GroupDesiredCapacity'
- - 'GroupInServiceInstances'
- - 'GroupPendingInstances'
- - 'GroupStandbyInstances'
- - 'GroupTerminatingInstances'
- - 'GroupTotalInstances'
- version_added: "2.6"
- type: list
- elements: str
-extends_documentation_fragment:
- - aws
- - ec2
-"""
-
-EXAMPLES = '''
-# Basic configuration with Launch Configuration
-
-- ec2_asg:
- name: special
- load_balancers: [ 'lb1', 'lb2' ]
- availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
- launch_config_name: 'lc-1'
- min_size: 1
- max_size: 10
- desired_capacity: 5
- vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
- tags:
- - environment: production
- propagate_at_launch: no
-
-# Rolling ASG Updates
-
-# Below is an example of how to assign a new launch config to an ASG and terminate old instances.
-#
-# All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
-# a rolling fashion with instances using the current launch configuration, "my_new_lc".
-#
-# This could also be considered a rolling deploy of a pre-baked AMI.
-#
-# If this is a newly created group, the instances will not be replaced since all instances
-# will have the current launch configuration.
-
-- name: create launch config
- ec2_lc:
- name: my_new_lc
- image_id: ami-lkajsf
- key_name: mykey
- region: us-east-1
- security_groups: sg-23423
- instance_type: m1.small
- assign_public_ip: yes
-
-- ec2_asg:
- name: myasg
- launch_config_name: my_new_lc
- health_check_period: 60
- health_check_type: ELB
- replace_all_instances: yes
- min_size: 5
- max_size: 5
- desired_capacity: 5
- region: us-east-1
-
-# To only replace a couple of instances instead of all of them, supply a list
-# to "replace_instances":
-
-- ec2_asg:
- name: myasg
- launch_config_name: my_new_lc
- health_check_period: 60
- health_check_type: ELB
- replace_instances:
- - i-b345231
- - i-24c2931
- min_size: 5
- max_size: 5
- desired_capacity: 5
- region: us-east-1
-
-# Basic Configuration with Launch Template
-
-- ec2_asg:
- name: special
- load_balancers: [ 'lb1', 'lb2' ]
- availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
- launch_template:
- version: '1'
- launch_template_name: 'lt-example'
- launch_template_id: 'lt-123456'
- min_size: 1
- max_size: 10
- desired_capacity: 5
- vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
- tags:
- - environment: production
- propagate_at_launch: no
-
-# Basic Configuration with Launch Template using mixed instance policy
-
-- ec2_asg:
- name: special
- load_balancers: [ 'lb1', 'lb2' ]
- availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
- launch_template:
- version: '1'
- launch_template_name: 'lt-example'
- launch_template_id: 'lt-123456'
- mixed_instances_policy:
- instance_types:
- - t3a.large
- - t3.large
- - t2.large
- min_size: 1
- max_size: 10
- desired_capacity: 5
- vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
- tags:
- - environment: production
- propagate_at_launch: no
-'''
-
-RETURN = '''
----
-auto_scaling_group_name:
- description: The unique name of the auto scaling group
- returned: success
- type: str
- sample: "myasg"
-auto_scaling_group_arn:
- description: The unique ARN of the autoscaling group
- returned: success
- type: str
- sample: "arn:aws:autoscaling:us-east-1:123456789012:autoScalingGroup:6a09ad6d-eeee-1234-b987-ee123ced01ad:autoScalingGroupName/myasg"
-availability_zones:
- description: The availability zones for the auto scaling group
- returned: success
- type: list
- sample: [
- "us-east-1d"
- ]
-created_time:
- description: Timestamp of create time of the auto scaling group
- returned: success
- type: str
- sample: "2017-11-08T14:41:48.272000+00:00"
-default_cooldown:
- description: The default cooldown time in seconds.
- returned: success
- type: int
- sample: 300
-desired_capacity:
- description: The number of EC2 instances that should be running in this group.
- returned: success
- type: int
- sample: 3
-healthcheck_period:
- description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
- returned: success
- type: int
- sample: 30
-healthcheck_type:
- description: The service you want the health status from, one of "EC2" or "ELB".
- returned: success
- type: str
- sample: "ELB"
-healthy_instances:
- description: Number of instances in a healthy state
- returned: success
- type: int
- sample: 5
-in_service_instances:
- description: Number of instances in service
- returned: success
- type: int
- sample: 3
-instance_facts:
- description: Dictionary of EC2 instances and their status as it relates to the ASG.
- returned: success
- type: dict
- sample: {
- "i-0123456789012": {
- "health_status": "Healthy",
- "launch_config_name": "public-webapp-production-1",
- "lifecycle_state": "InService"
- }
- }
-instances:
- description: list of instance IDs in the ASG
- returned: success
- type: list
- sample: [
- "i-0123456789012"
- ]
-launch_config_name:
- description: >
- Name of launch configuration associated with the ASG. Same as launch_configuration_name,
- provided for compatibility with ec2_asg module.
- returned: success
- type: str
- sample: "public-webapp-production-1"
-load_balancers:
- description: List of load balancers names attached to the ASG.
- returned: success
- type: list
- sample: ["elb-webapp-prod"]
-max_instance_lifetime:
- description: The maximum amount of time, in seconds, that an instance can be in service.
- returned: success
- type: int
- sample: 604800
-max_size:
- description: Maximum size of group
- returned: success
- type: int
- sample: 3
-min_size:
- description: Minimum size of group
- returned: success
- type: int
- sample: 1
-mixed_instance_policy:
- description: Returns the list of instance types if a mixed instance policy is set.
- returned: success
- type: list
- sample: ["t3.micro", "t3a.micro"]
-pending_instances:
- description: Number of instances in pending state
- returned: success
- type: int
- sample: 1
-tags:
- description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
- returned: success
- type: list
- sample: [
- {
- "key": "Name",
- "value": "public-webapp-production-1",
- "resource_id": "public-webapp-production-1",
- "resource_type": "auto-scaling-group",
- "propagate_at_launch": "true"
- },
- {
- "key": "env",
- "value": "production",
- "resource_id": "public-webapp-production-1",
- "resource_type": "auto-scaling-group",
- "propagate_at_launch": "true"
- }
- ]
-target_group_arns:
- description: List of ARNs of the target groups that the ASG populates
- returned: success
- type: list
- sample: [
- "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b",
- "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234"
- ]
-target_group_names:
- description: List of names of the target groups that the ASG populates
- returned: success
- type: list
- sample: [
- "target-group-host-hello",
- "target-group-path-world"
- ]
-termination_policies:
- description: A list of termination policies for the group.
- returned: success
- type: list
- sample: ["Default"]
-unhealthy_instances:
- description: Number of instances in an unhealthy state
- returned: success
- type: int
- sample: 0
-viable_instances:
- description: Number of instances in a viable state
- returned: success
- type: int
- sample: 1
-vpc_zone_identifier:
- description: VPC zone ID / subnet id for the auto scaling group
- returned: success
- type: str
- sample: "subnet-a31ef45f"
-metrics_collection:
- description: List of enabled AutosSalingGroup metrics
- returned: success
- type: list
- sample: [
- {
- "Granularity": "1Minute",
- "Metric": "GroupInServiceInstances"
- }
- ]
-'''
-
-import time
-import traceback
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (
- AWSRetry,
- camel_dict_to_snake_dict
-)
-
-try:
- import botocore
-except ImportError:
- pass # will be detected by imported HAS_BOTO3
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-
-ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity',
- 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName',
- 'LoadBalancerNames', 'MaxInstanceLifetime', 'MaxSize', 'MinSize',
- 'AutoScalingGroupName', 'PlacementGroup', 'TerminationPolicies',
- 'VPCZoneIdentifier')
-
-INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
-
-backoff_params = dict(tries=10, delay=3, backoff=1.5)
-
-
-@AWSRetry.backoff(**backoff_params)
-def describe_autoscaling_groups(connection, group_name):
- pg = connection.get_paginator('describe_auto_scaling_groups')
- return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', [])
-
-
-@AWSRetry.backoff(**backoff_params)
-def deregister_lb_instances(connection, lb_name, instance_id):
- connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)])
-
-
-@AWSRetry.backoff(**backoff_params)
-def describe_instance_health(connection, lb_name, instances):
- params = dict(LoadBalancerName=lb_name)
- if instances:
- params.update(Instances=instances)
- return connection.describe_instance_health(**params)
-
-
-@AWSRetry.backoff(**backoff_params)
-def describe_target_health(connection, target_group_arn, instances):
- return connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=instances)
-
-
-@AWSRetry.backoff(**backoff_params)
-def suspend_asg_processes(connection, asg_name, processes):
- connection.suspend_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes)
-
-
-@AWSRetry.backoff(**backoff_params)
-def resume_asg_processes(connection, asg_name, processes):
- connection.resume_processes(AutoScalingGroupName=asg_name, ScalingProcesses=processes)
-
-
-@AWSRetry.backoff(**backoff_params)
-def describe_launch_configurations(connection, launch_config_name):
- pg = connection.get_paginator('describe_launch_configurations')
- return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result()
-
-
-@AWSRetry.backoff(**backoff_params)
-def describe_launch_templates(connection, launch_template):
- if launch_template['launch_template_id'] is not None:
- try:
- lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template['launch_template_id']])
- return lt
- except (botocore.exceptions.ClientError) as e:
- module.fail_json(msg="No launch template found matching: %s" % launch_template)
- else:
- try:
- lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template['launch_template_name']])
- return lt
- except (botocore.exceptions.ClientError) as e:
- module.fail_json(msg="No launch template found matching: %s" % launch_template)
-
-
-@AWSRetry.backoff(**backoff_params)
-def create_asg(connection, **params):
- connection.create_auto_scaling_group(**params)
-
-
-@AWSRetry.backoff(**backoff_params)
-def put_notification_config(connection, asg_name, topic_arn, notification_types):
- connection.put_notification_configuration(
- AutoScalingGroupName=asg_name,
- TopicARN=topic_arn,
- NotificationTypes=notification_types
- )
-
-
-@AWSRetry.backoff(**backoff_params)
-def del_notification_config(connection, asg_name, topic_arn):
- connection.delete_notification_configuration(
- AutoScalingGroupName=asg_name,
- TopicARN=topic_arn
- )
-
-
-@AWSRetry.backoff(**backoff_params)
-def attach_load_balancers(connection, asg_name, load_balancers):
- connection.attach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers)
-
-
-@AWSRetry.backoff(**backoff_params)
-def detach_load_balancers(connection, asg_name, load_balancers):
- connection.detach_load_balancers(AutoScalingGroupName=asg_name, LoadBalancerNames=load_balancers)
-
-
-@AWSRetry.backoff(**backoff_params)
-def attach_lb_target_groups(connection, asg_name, target_group_arns):
- connection.attach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns)
-
-
-@AWSRetry.backoff(**backoff_params)
-def detach_lb_target_groups(connection, asg_name, target_group_arns):
- connection.detach_load_balancer_target_groups(AutoScalingGroupName=asg_name, TargetGroupARNs=target_group_arns)
-
-
-@AWSRetry.backoff(**backoff_params)
-def update_asg(connection, **params):
- connection.update_auto_scaling_group(**params)
-
-
-@AWSRetry.backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params)
-def delete_asg(connection, asg_name, force_delete):
- connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete)
-
-
-@AWSRetry.backoff(**backoff_params)
-def terminate_asg_instance(connection, instance_id, decrement_capacity):
- connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id,
- ShouldDecrementDesiredCapacity=decrement_capacity)
-
-
-def enforce_required_arguments_for_create():
- ''' As many arguments are not required for autoscale group deletion
- they cannot be mandatory arguments for the module, so we enforce
- them here '''
- missing_args = []
- if module.params.get('launch_config_name') is None and module.params.get('launch_template') is None:
- module.fail_json(msg="Missing either launch_config_name or launch_template for autoscaling group create")
- for arg in ('min_size', 'max_size'):
- if module.params[arg] is None:
- missing_args.append(arg)
- if missing_args:
- module.fail_json(msg="Missing required arguments for autoscaling group create: %s" % ",".join(missing_args))
-
-
-def get_properties(autoscaling_group):
- properties = dict(
- healthy_instances=0,
- in_service_instances=0,
- unhealthy_instances=0,
- pending_instances=0,
- viable_instances=0,
- terminating_instances=0
- )
- instance_facts = dict()
- autoscaling_group_instances = autoscaling_group.get('Instances')
-
- if autoscaling_group_instances:
- properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances]
- for i in autoscaling_group_instances:
- instance_facts[i['InstanceId']] = {
- 'health_status': i['HealthStatus'],
- 'lifecycle_state': i['LifecycleState']
- }
- if 'LaunchConfigurationName' in i:
- instance_facts[i['InstanceId']]['launch_config_name'] = i['LaunchConfigurationName']
- elif 'LaunchTemplate' in i:
- instance_facts[i['InstanceId']]['launch_template'] = i['LaunchTemplate']
-
- if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService':
- properties['viable_instances'] += 1
-
- if i['HealthStatus'] == 'Healthy':
- properties['healthy_instances'] += 1
- else:
- properties['unhealthy_instances'] += 1
-
- if i['LifecycleState'] == 'InService':
- properties['in_service_instances'] += 1
- if i['LifecycleState'] == 'Terminating':
- properties['terminating_instances'] += 1
- if i['LifecycleState'] == 'Pending':
- properties['pending_instances'] += 1
- else:
- properties['instances'] = []
-
- properties['auto_scaling_group_name'] = autoscaling_group.get('AutoScalingGroupName')
- properties['auto_scaling_group_arn'] = autoscaling_group.get('AutoScalingGroupARN')
- properties['availability_zones'] = autoscaling_group.get('AvailabilityZones')
- properties['created_time'] = autoscaling_group.get('CreatedTime')
- properties['instance_facts'] = instance_facts
- properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames')
- if 'LaunchConfigurationName' in autoscaling_group:
- properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName')
- else:
- properties['launch_template'] = autoscaling_group.get('LaunchTemplate')
- properties['tags'] = autoscaling_group.get('Tags')
- properties['max_instance_lifetime'] = autoscaling_group.get('MaxInstanceLifetime')
- properties['min_size'] = autoscaling_group.get('MinSize')
- properties['max_size'] = autoscaling_group.get('MaxSize')
- properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity')
- properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown')
- properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod')
- properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType')
- properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown')
- properties['termination_policies'] = autoscaling_group.get('TerminationPolicies')
- properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs')
- properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier')
- raw_mixed_instance_object = autoscaling_group.get('MixedInstancesPolicy')
- if raw_mixed_instance_object:
- properties['mixed_instances_policy'] = [x['InstanceType'] for x in raw_mixed_instance_object.get('LaunchTemplate').get('Overrides')]
-
- metrics = autoscaling_group.get('EnabledMetrics')
- if metrics:
- metrics.sort(key=lambda x: x["Metric"])
- properties['metrics_collection'] = metrics
-
- if properties['target_group_arns']:
- elbv2_connection = module.client('elbv2')
- tg_paginator = elbv2_connection.get_paginator('describe_target_groups')
- tg_result = tg_paginator.paginate(
- TargetGroupArns=properties['target_group_arns']
- ).build_full_result()
- target_groups = tg_result['TargetGroups']
- else:
- target_groups = []
-
- properties['target_group_names'] = [
- tg['TargetGroupName']
- for tg in target_groups
- ]
-
- return properties
-
-
-def get_launch_object(connection, ec2_connection):
- launch_object = dict()
- launch_config_name = module.params.get('launch_config_name')
- launch_template = module.params.get('launch_template')
- mixed_instances_policy = module.params.get('mixed_instances_policy')
- if launch_config_name is None and launch_template is None:
- return launch_object
- elif launch_config_name:
- try:
- launch_configs = describe_launch_configurations(connection, launch_config_name)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json(msg="Failed to describe launch configurations",
- exception=traceback.format_exc())
- if len(launch_configs['LaunchConfigurations']) == 0:
- module.fail_json(msg="No launch config found with name %s" % launch_config_name)
- launch_object = {"LaunchConfigurationName": launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName']}
- return launch_object
- elif launch_template:
- lt = describe_launch_templates(ec2_connection, launch_template)['LaunchTemplates'][0]
- if launch_template['version'] is not None:
- launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": launch_template['version']}}
- else:
- launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": str(lt['LatestVersionNumber'])}}
-
- if mixed_instances_policy:
- instance_types = mixed_instances_policy.get('instance_types', [])
- policy = {
- 'LaunchTemplate': {
- 'LaunchTemplateSpecification': launch_object['LaunchTemplate']
- }
- }
- if instance_types:
- policy['LaunchTemplate']['Overrides'] = []
- for instance_type in instance_types:
- instance_type_dict = {'InstanceType': instance_type}
- policy['LaunchTemplate']['Overrides'].append(instance_type_dict)
- launch_object['MixedInstancesPolicy'] = policy
- return launch_object
-
-
-def elb_dreg(asg_connection, group_name, instance_id):
- as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
- wait_timeout = module.params.get('wait_timeout')
- count = 1
- if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB':
- elb_connection = module.client('elb')
- else:
- return
-
- for lb in as_group['LoadBalancerNames']:
- deregister_lb_instances(elb_connection, lb, instance_id)
- module.debug("De-registering %s from ELB %s" % (instance_id, lb))
-
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time() and count > 0:
- count = 0
- for lb in as_group['LoadBalancerNames']:
- lb_instances = describe_instance_health(elb_connection, lb, [])
- for i in lb_instances['InstanceStates']:
- if i['InstanceId'] == instance_id and i['State'] == "InService":
- count += 1
- module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description']))
- time.sleep(10)
-
- if wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime()))
-
-
-def elb_healthy(asg_connection, elb_connection, group_name):
- healthy_instances = set()
- as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
- props = get_properties(as_group)
- # get healthy, inservice instances from ASG
- instances = []
- for instance, settings in props['instance_facts'].items():
- if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
- instances.append(dict(InstanceId=instance))
- module.debug("ASG considers the following instances InService and Healthy: %s" % instances)
- module.debug("ELB instance status:")
- lb_instances = list()
- for lb in as_group.get('LoadBalancerNames'):
- # we catch a race condition that sometimes happens if the instance exists in the ASG
- # but has not yet show up in the ELB
- try:
- lb_instances = describe_instance_health(elb_connection, lb, instances)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'InvalidInstance':
- return None
-
- module.fail_json(msg="Failed to get load balancer.",
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Failed to get load balancer.",
- exception=traceback.format_exc())
-
- for i in lb_instances.get('InstanceStates'):
- if i['State'] == "InService":
- healthy_instances.add(i['InstanceId'])
- module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State']))
- return len(healthy_instances)
-
-
-def tg_healthy(asg_connection, elbv2_connection, group_name):
- healthy_instances = set()
- as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
- props = get_properties(as_group)
- # get healthy, inservice instances from ASG
- instances = []
- for instance, settings in props['instance_facts'].items():
- if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
- instances.append(dict(Id=instance))
- module.debug("ASG considers the following instances InService and Healthy: %s" % instances)
- module.debug("Target Group instance status:")
- tg_instances = list()
- for tg in as_group.get('TargetGroupARNs'):
- # we catch a race condition that sometimes happens if the instance exists in the ASG
- # but has not yet show up in the ELB
- try:
- tg_instances = describe_target_health(elbv2_connection, tg, instances)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'InvalidInstance':
- return None
-
- module.fail_json(msg="Failed to get target group.",
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Failed to get target group.",
- exception=traceback.format_exc())
-
- for i in tg_instances.get('TargetHealthDescriptions'):
- if i['TargetHealth']['State'] == "healthy":
- healthy_instances.add(i['Target']['Id'])
- module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State']))
- return len(healthy_instances)
-
-
-def wait_for_elb(asg_connection, group_name):
- wait_timeout = module.params.get('wait_timeout')
-
- # if the health_check_type is ELB, we want to query the ELBs directly for instance
- # status as to avoid health_check_grace period that is awarded to ASG instances
- as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
-
- if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB':
- module.debug("Waiting for ELB to consider instances healthy.")
- elb_connection = module.client('elb')
-
- wait_timeout = time.time() + wait_timeout
- healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
-
- while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
- healthy_instances = elb_healthy(asg_connection, elb_connection, group_name)
- module.debug("ELB thinks %s instances are healthy." % healthy_instances)
- time.sleep(10)
- if wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
- module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances)
-
-
-def wait_for_target_group(asg_connection, group_name):
- wait_timeout = module.params.get('wait_timeout')
-
- # if the health_check_type is ELB, we want to query the ELBs directly for instance
- # status as to avoid health_check_grace period that is awarded to ASG instances
- as_group = describe_autoscaling_groups(asg_connection, group_name)[0]
-
- if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB':
- module.debug("Waiting for Target Group to consider instances healthy.")
- elbv2_connection = module.client('elbv2')
-
- wait_timeout = time.time() + wait_timeout
- healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
-
- while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
- healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name)
- module.debug("Target Group thinks %s instances are healthy." % healthy_instances)
- time.sleep(10)
- if wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
- module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances)
-
-
-def suspend_processes(ec2_connection, as_group):
- suspend_processes = set(module.params.get('suspend_processes'))
-
- try:
- suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']])
- except AttributeError:
- # New ASG being created, no suspended_processes defined yet
- suspended_processes = set()
-
- if suspend_processes == suspended_processes:
- return False
-
- resume_processes = list(suspended_processes - suspend_processes)
- if resume_processes:
- resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes)
-
- if suspend_processes:
- suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes))
-
- return True
-
-
-def create_autoscaling_group(connection):
- group_name = module.params.get('name')
- load_balancers = module.params['load_balancers']
- target_group_arns = module.params['target_group_arns']
- availability_zones = module.params['availability_zones']
- launch_config_name = module.params.get('launch_config_name')
- launch_template = module.params.get('launch_template')
- mixed_instances_policy = module.params.get('mixed_instances_policy')
- min_size = module.params['min_size']
- max_size = module.params['max_size']
- max_instance_lifetime = module.params.get('max_instance_lifetime')
- placement_group = module.params.get('placement_group')
- desired_capacity = module.params.get('desired_capacity')
- vpc_zone_identifier = module.params.get('vpc_zone_identifier')
- set_tags = module.params.get('tags')
- health_check_period = module.params.get('health_check_period')
- health_check_type = module.params.get('health_check_type')
- default_cooldown = module.params.get('default_cooldown')
- wait_for_instances = module.params.get('wait_for_instances')
- wait_timeout = module.params.get('wait_timeout')
- termination_policies = module.params.get('termination_policies')
- notification_topic = module.params.get('notification_topic')
- notification_types = module.params.get('notification_types')
- metrics_collection = module.params.get('metrics_collection')
- metrics_granularity = module.params.get('metrics_granularity')
- metrics_list = module.params.get('metrics_list')
-
- try:
- as_groups = describe_autoscaling_groups(connection, group_name)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json(msg="Failed to describe auto scaling groups.",
- exception=traceback.format_exc())
-
- ec2_connection = module.client('ec2')
-
- if vpc_zone_identifier:
- vpc_zone_identifier = ','.join(vpc_zone_identifier)
-
- asg_tags = []
- for tag in set_tags:
- for k, v in tag.items():
- if k != 'propagate_at_launch':
- asg_tags.append(dict(Key=k,
- Value=to_native(v),
- PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)),
- ResourceType='auto-scaling-group',
- ResourceId=group_name))
- if not as_groups:
- if not vpc_zone_identifier and not availability_zones:
- availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for
- zone in ec2_connection.describe_availability_zones()['AvailabilityZones']]
-
- enforce_required_arguments_for_create()
-
- if desired_capacity is None:
- desired_capacity = min_size
- ag = dict(
- AutoScalingGroupName=group_name,
- MinSize=min_size,
- MaxSize=max_size,
- DesiredCapacity=desired_capacity,
- Tags=asg_tags,
- HealthCheckGracePeriod=health_check_period,
- HealthCheckType=health_check_type,
- DefaultCooldown=default_cooldown,
- TerminationPolicies=termination_policies)
- if vpc_zone_identifier:
- ag['VPCZoneIdentifier'] = vpc_zone_identifier
- if availability_zones:
- ag['AvailabilityZones'] = availability_zones
- if placement_group:
- ag['PlacementGroup'] = placement_group
- if load_balancers:
- ag['LoadBalancerNames'] = load_balancers
- if target_group_arns:
- ag['TargetGroupARNs'] = target_group_arns
- if max_instance_lifetime:
- ag['MaxInstanceLifetime'] = max_instance_lifetime
-
- launch_object = get_launch_object(connection, ec2_connection)
- if 'LaunchConfigurationName' in launch_object:
- ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName']
- elif 'LaunchTemplate' in launch_object:
- if 'MixedInstancesPolicy' in launch_object:
- ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy']
- else:
- ag['LaunchTemplate'] = launch_object['LaunchTemplate']
- else:
- module.fail_json(msg="Missing LaunchConfigurationName or LaunchTemplate",
- exception=traceback.format_exc())
-
- try:
- create_asg(connection, **ag)
- if metrics_collection:
- connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list)
-
- all_ag = describe_autoscaling_groups(connection, group_name)
- if len(all_ag) == 0:
- module.fail_json(msg="No auto scaling group found with the name %s" % group_name)
- as_group = all_ag[0]
- suspend_processes(connection, as_group)
- if wait_for_instances:
- wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
- if load_balancers:
- wait_for_elb(connection, group_name)
- # Wait for target group health if target group(s)defined
- if target_group_arns:
- wait_for_target_group(connection, group_name)
- if notification_topic:
- put_notification_config(connection, group_name, notification_topic, notification_types)
- as_group = describe_autoscaling_groups(connection, group_name)[0]
- asg_properties = get_properties(as_group)
- changed = True
- return changed, asg_properties
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to create Autoscaling Group.",
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Failed to create Autoscaling Group.",
- exception=traceback.format_exc())
- else:
- as_group = as_groups[0]
- initial_asg_properties = get_properties(as_group)
- changed = False
-
- if suspend_processes(connection, as_group):
- changed = True
-
- # process tag changes
- if len(set_tags) > 0:
- have_tags = as_group.get('Tags')
- want_tags = asg_tags
- if have_tags:
- have_tags.sort(key=lambda x: x["Key"])
- if want_tags:
- want_tags.sort(key=lambda x: x["Key"])
- dead_tags = []
- have_tag_keyvals = [x['Key'] for x in have_tags]
- want_tag_keyvals = [x['Key'] for x in want_tags]
-
- for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals):
- changed = True
- dead_tags.append(dict(ResourceId=as_group['AutoScalingGroupName'],
- ResourceType='auto-scaling-group', Key=dead_tag))
- have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag]
- if dead_tags:
- connection.delete_tags(Tags=dead_tags)
-
- zipped = zip(have_tags, want_tags)
- if len(have_tags) != len(want_tags) or not all(x == y for x, y in zipped):
- changed = True
- connection.create_or_update_tags(Tags=asg_tags)
-
- # Handle load balancer attachments/detachments
- # Attach load balancers if they are specified but none currently exist
- if load_balancers and not as_group['LoadBalancerNames']:
- changed = True
- try:
- attach_load_balancers(connection, group_name, load_balancers)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to update Autoscaling Group.",
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Failed to update Autoscaling Group.",
- exception=traceback.format_exc())
-
- # Update load balancers if they are specified and one or more already exists
- elif as_group['LoadBalancerNames']:
- change_load_balancers = load_balancers is not None
- # Get differences
- if not load_balancers:
- load_balancers = list()
- wanted_elbs = set(load_balancers)
-
- has_elbs = set(as_group['LoadBalancerNames'])
- # check if all requested are already existing
- if has_elbs - wanted_elbs and change_load_balancers:
- # if wanted contains less than existing, then we need to delete some
- elbs_to_detach = has_elbs.difference(wanted_elbs)
- if elbs_to_detach:
- changed = True
- try:
- detach_load_balancers(connection, group_name, list(elbs_to_detach))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json(msg="Failed to detach load balancers %s: %s." % (elbs_to_detach, to_native(e)),
- exception=traceback.format_exc())
- if wanted_elbs - has_elbs:
- # if has contains less than wanted, then we need to add some
- elbs_to_attach = wanted_elbs.difference(has_elbs)
- if elbs_to_attach:
- changed = True
- try:
- attach_load_balancers(connection, group_name, list(elbs_to_attach))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json(msg="Failed to attach load balancers %s: %s." % (elbs_to_attach, to_native(e)),
- exception=traceback.format_exc())
-
- # Handle target group attachments/detachments
- # Attach target groups if they are specified but none currently exist
- if target_group_arns and not as_group['TargetGroupARNs']:
- changed = True
- try:
- attach_lb_target_groups(connection, group_name, target_group_arns)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to update Autoscaling Group.",
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Failed to update Autoscaling Group.",
- exception=traceback.format_exc())
- # Update target groups if they are specified and one or more already exists
- elif target_group_arns is not None and as_group['TargetGroupARNs']:
- # Get differences
- wanted_tgs = set(target_group_arns)
- has_tgs = set(as_group['TargetGroupARNs'])
- # check if all requested are already existing
- if has_tgs.issuperset(wanted_tgs):
- # if wanted contains less than existing, then we need to delete some
- tgs_to_detach = has_tgs.difference(wanted_tgs)
- if tgs_to_detach:
- changed = True
- try:
- detach_lb_target_groups(connection, group_name, list(tgs_to_detach))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json(msg="Failed to detach load balancer target groups %s: %s" % (tgs_to_detach, to_native(e)),
- exception=traceback.format_exc())
- if wanted_tgs.issuperset(has_tgs):
- # if has contains less than wanted, then we need to add some
- tgs_to_attach = wanted_tgs.difference(has_tgs)
- if tgs_to_attach:
- changed = True
- try:
- attach_lb_target_groups(connection, group_name, list(tgs_to_attach))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json(msg="Failed to attach load balancer target groups %s: %s" % (tgs_to_attach, to_native(e)),
- exception=traceback.format_exc())
-
- # check for attributes that aren't required for updating an existing ASG
- # check if min_size/max_size/desired capacity have been specified and if not use ASG values
- if min_size is None:
- min_size = as_group['MinSize']
- if max_size is None:
- max_size = as_group['MaxSize']
- if desired_capacity is None:
- desired_capacity = as_group['DesiredCapacity']
- ag = dict(
- AutoScalingGroupName=group_name,
- MinSize=min_size,
- MaxSize=max_size,
- DesiredCapacity=desired_capacity,
- HealthCheckGracePeriod=health_check_period,
- HealthCheckType=health_check_type,
- DefaultCooldown=default_cooldown,
- TerminationPolicies=termination_policies)
-
- # Get the launch object (config or template) if one is provided in args or use the existing one attached to ASG if not.
- launch_object = get_launch_object(connection, ec2_connection)
- if 'LaunchConfigurationName' in launch_object:
- ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName']
- elif 'LaunchTemplate' in launch_object:
- if 'MixedInstancesPolicy' in launch_object:
- ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy']
- else:
- ag['LaunchTemplate'] = launch_object['LaunchTemplate']
- else:
- try:
- ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName']
- except Exception:
- launch_template = as_group['LaunchTemplate']
- # Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg.
- ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']}
-
- if availability_zones:
- ag['AvailabilityZones'] = availability_zones
- if vpc_zone_identifier:
- ag['VPCZoneIdentifier'] = vpc_zone_identifier
- if max_instance_lifetime is not None:
- ag['MaxInstanceLifetime'] = max_instance_lifetime
-
- try:
- update_asg(connection, **ag)
-
- if metrics_collection:
- connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list)
- else:
- connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list)
-
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json(msg="Failed to update autoscaling group: %s" % to_native(e),
- exception=traceback.format_exc())
- if notification_topic:
- try:
- put_notification_config(connection, group_name, notification_topic, notification_types)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to update Autoscaling Group notifications.",
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Failed to update Autoscaling Group notifications.",
- exception=traceback.format_exc())
- if wait_for_instances:
- wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
- # Wait for ELB health if ELB(s)defined
- if load_balancers:
- module.debug('\tWAITING FOR ELB HEALTH')
- wait_for_elb(connection, group_name)
- # Wait for target group health if target group(s)defined
-
- if target_group_arns:
- module.debug('\tWAITING FOR TG HEALTH')
- wait_for_target_group(connection, group_name)
-
- try:
- as_group = describe_autoscaling_groups(connection, group_name)[0]
- asg_properties = get_properties(as_group)
- if asg_properties != initial_asg_properties:
- changed = True
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to read existing Autoscaling Groups.",
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json(msg="Failed to read existing Autoscaling Groups.",
- exception=traceback.format_exc())
- return changed, asg_properties
-
-
-def delete_autoscaling_group(connection):
- group_name = module.params.get('name')
- notification_topic = module.params.get('notification_topic')
- wait_for_instances = module.params.get('wait_for_instances')
- wait_timeout = module.params.get('wait_timeout')
-
- if notification_topic:
- del_notification_config(connection, group_name, notification_topic)
- groups = describe_autoscaling_groups(connection, group_name)
- if groups:
- wait_timeout = time.time() + wait_timeout
- if not wait_for_instances:
- delete_asg(connection, group_name, force_delete=True)
- else:
- updated_params = dict(AutoScalingGroupName=group_name, MinSize=0, MaxSize=0, DesiredCapacity=0)
- update_asg(connection, **updated_params)
- instances = True
- while instances and wait_for_instances and wait_timeout >= time.time():
- tmp_groups = describe_autoscaling_groups(connection, group_name)
- if tmp_groups:
- tmp_group = tmp_groups[0]
- if not tmp_group.get('Instances'):
- instances = False
- time.sleep(10)
-
- if wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
-
- delete_asg(connection, group_name, force_delete=False)
- while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time():
- time.sleep(5)
- if wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime())
- return True
-
- return False
-
-
-def get_chunks(l, n):
- for i in range(0, len(l), n):
- yield l[i:i + n]
-
-
-def update_size(connection, group, max_size, min_size, dc):
- module.debug("setting ASG sizes")
- module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size))
- updated_group = dict()
- updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName']
- updated_group['MinSize'] = min_size
- updated_group['MaxSize'] = max_size
- updated_group['DesiredCapacity'] = dc
- update_asg(connection, **updated_group)
-
-
-def replace(connection):
- batch_size = module.params.get('replace_batch_size')
- wait_timeout = module.params.get('wait_timeout')
- wait_for_instances = module.params.get('wait_for_instances')
- group_name = module.params.get('name')
- max_size = module.params.get('max_size')
- min_size = module.params.get('min_size')
- desired_capacity = module.params.get('desired_capacity')
- launch_config_name = module.params.get('launch_config_name')
- # Required to maintain the default value being set to 'true'
- if launch_config_name:
- lc_check = module.params.get('lc_check')
- else:
- lc_check = False
- # Mirror above behavior for Launch Templates
- launch_template = module.params.get('launch_template')
- if launch_template:
- lt_check = module.params.get('lt_check')
- else:
- lt_check = False
- replace_instances = module.params.get('replace_instances')
- replace_all_instances = module.params.get('replace_all_instances')
-
- as_group = describe_autoscaling_groups(connection, group_name)[0]
- if desired_capacity is None:
- desired_capacity = as_group['DesiredCapacity']
-
- if wait_for_instances:
- wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances')
-
- props = get_properties(as_group)
- instances = props['instances']
- if replace_all_instances:
- # If replacing all instances, then set replace_instances to current set
- # This allows replace_instances and replace_all_instances to behave same
- replace_instances = instances
- if replace_instances:
- instances = replace_instances
-
- # check to see if instances are replaceable if checking launch configs
- if launch_config_name:
- new_instances, old_instances = get_instances_by_launch_config(props, lc_check, instances)
- elif launch_template:
- new_instances, old_instances = get_instances_by_launch_template(props, lt_check, instances)
-
- num_new_inst_needed = desired_capacity - len(new_instances)
-
- if lc_check or lt_check:
- if num_new_inst_needed == 0 and old_instances:
- module.debug("No new instances needed, but old instances are present. Removing old instances")
- terminate_batch(connection, old_instances, instances, True)
- as_group = describe_autoscaling_groups(connection, group_name)[0]
- props = get_properties(as_group)
- changed = True
- return changed, props
-
- # we don't want to spin up extra instances if not necessary
- if num_new_inst_needed < batch_size:
- module.debug("Overriding batch size to %s" % num_new_inst_needed)
- batch_size = num_new_inst_needed
-
- if not old_instances:
- changed = False
- return changed, props
-
- # check if min_size/max_size/desired capacity have been specified and if not use ASG values
- if min_size is None:
- min_size = as_group['MinSize']
- if max_size is None:
- max_size = as_group['MaxSize']
-
- # set temporary settings and wait for them to be reached
- # This should get overwritten if the number of instances left is less than the batch size.
-
- as_group = describe_autoscaling_groups(connection, group_name)[0]
- update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
-
- if wait_for_instances:
- wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances')
- wait_for_elb(connection, group_name)
- wait_for_target_group(connection, group_name)
-
- as_group = describe_autoscaling_groups(connection, group_name)[0]
- props = get_properties(as_group)
- instances = props['instances']
- if replace_instances:
- instances = replace_instances
-
- module.debug("beginning main loop")
- for i in get_chunks(instances, batch_size):
- # break out of this loop if we have enough new instances
- break_early, desired_size, term_instances = terminate_batch(connection, i, instances, False)
-
- if wait_for_instances:
- wait_for_term_inst(connection, term_instances)
- wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances')
- wait_for_elb(connection, group_name)
- wait_for_target_group(connection, group_name)
-
- if break_early:
- module.debug("breaking loop")
- break
-
- update_size(connection, as_group, max_size, min_size, desired_capacity)
- as_group = describe_autoscaling_groups(connection, group_name)[0]
- asg_properties = get_properties(as_group)
- module.debug("Rolling update complete.")
- changed = True
- return changed, asg_properties
-
-
-def get_instances_by_launch_config(props, lc_check, initial_instances):
- new_instances = []
- old_instances = []
- # old instances are those that have the old launch config
- if lc_check:
- for i in props['instances']:
- # Check if migrating from launch_template to launch_config first
- if 'launch_template' in props['instance_facts'][i]:
- old_instances.append(i)
- elif props['instance_facts'][i].get('launch_config_name') == props['launch_config_name']:
- new_instances.append(i)
- else:
- old_instances.append(i)
-
- else:
- module.debug("Comparing initial instances with current: %s" % initial_instances)
- for i in props['instances']:
- if i not in initial_instances:
- new_instances.append(i)
- else:
- old_instances.append(i)
-
- module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
- module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
-
- return new_instances, old_instances
-
-
-def get_instances_by_launch_template(props, lt_check, initial_instances):
- new_instances = []
- old_instances = []
- # old instances are those that have the old launch template or version of the same launch template
- if lt_check:
- for i in props['instances']:
- # Check if migrating from launch_config_name to launch_template_name first
- if 'launch_config_name' in props['instance_facts'][i]:
- old_instances.append(i)
- elif props['instance_facts'][i].get('launch_template') == props['launch_template']:
- new_instances.append(i)
- else:
- old_instances.append(i)
- else:
- module.debug("Comparing initial instances with current: %s" % initial_instances)
- for i in props['instances']:
- if i not in initial_instances:
- new_instances.append(i)
- else:
- old_instances.append(i)
-
- module.debug("New instances: %s, %s" % (len(new_instances), new_instances))
- module.debug("Old instances: %s, %s" % (len(old_instances), old_instances))
-
- return new_instances, old_instances
-
-
-def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances):
- instances_to_terminate = []
- instances = (inst_id for inst_id in replace_instances if inst_id in props['instances'])
- # check to make sure instances given are actually in the given ASG
- # and they have a non-current launch config
- if 'launch_config_name' in module.params:
- if lc_check:
- for i in instances:
- if (
- 'launch_template' in props['instance_facts'][i]
- or props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']
- ):
- instances_to_terminate.append(i)
- else:
- for i in instances:
- if i in initial_instances:
- instances_to_terminate.append(i)
- elif 'launch_template' in module.params:
- if lt_check:
- for i in instances:
- if (
- 'launch_config_name' in props['instance_facts'][i]
- or props['instance_facts'][i]['launch_template'] != props['launch_template']
- ):
- instances_to_terminate.append(i)
- else:
- for i in instances:
- if i in initial_instances:
- instances_to_terminate.append(i)
-
- return instances_to_terminate
-
-
-def terminate_batch(connection, replace_instances, initial_instances, leftovers=False):
- batch_size = module.params.get('replace_batch_size')
- min_size = module.params.get('min_size')
- desired_capacity = module.params.get('desired_capacity')
- group_name = module.params.get('name')
- lc_check = module.params.get('lc_check')
- lt_check = module.params.get('lt_check')
- decrement_capacity = False
- break_loop = False
-
- as_group = describe_autoscaling_groups(connection, group_name)[0]
- if desired_capacity is None:
- desired_capacity = as_group['DesiredCapacity']
-
- props = get_properties(as_group)
- desired_size = as_group['MinSize']
- if module.params.get('launch_config_name'):
- new_instances, old_instances = get_instances_by_launch_config(props, lc_check, initial_instances)
- else:
- new_instances, old_instances = get_instances_by_launch_template(props, lt_check, initial_instances)
- num_new_inst_needed = desired_capacity - len(new_instances)
-
- # check to make sure instances given are actually in the given ASG
- # and they have a non-current launch config
- instances_to_terminate = list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances)
-
- module.debug("new instances needed: %s" % num_new_inst_needed)
- module.debug("new instances: %s" % new_instances)
- module.debug("old instances: %s" % old_instances)
- module.debug("batch instances: %s" % ",".join(instances_to_terminate))
-
- if num_new_inst_needed == 0:
- decrement_capacity = True
- if as_group['MinSize'] != min_size:
- if min_size is None:
- min_size = as_group['MinSize']
- updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size)
- update_asg(connection, **updated_params)
- module.debug("Updating minimum size back to original of %s" % min_size)
- # if are some leftover old instances, but we are already at capacity with new ones
- # we don't want to decrement capacity
- if leftovers:
- decrement_capacity = False
- break_loop = True
- instances_to_terminate = old_instances
- desired_size = min_size
- module.debug("No new instances needed")
-
- if num_new_inst_needed < batch_size and num_new_inst_needed != 0:
- instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
- decrement_capacity = False
- break_loop = False
- module.debug("%s new instances needed" % num_new_inst_needed)
-
- module.debug("decrementing capacity: %s" % decrement_capacity)
-
- for instance_id in instances_to_terminate:
- elb_dreg(connection, group_name, instance_id)
- module.debug("terminating instance: %s" % instance_id)
- terminate_asg_instance(connection, instance_id, decrement_capacity)
-
- # we wait to make sure the machines we marked as Unhealthy are
- # no longer in the list
-
- return break_loop, desired_size, instances_to_terminate
-
-
-def wait_for_term_inst(connection, term_instances):
- wait_timeout = module.params.get('wait_timeout')
- group_name = module.params.get('name')
- as_group = describe_autoscaling_groups(connection, group_name)[0]
- count = 1
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time() and count > 0:
- module.debug("waiting for instances to terminate")
- count = 0
- as_group = describe_autoscaling_groups(connection, group_name)[0]
- props = get_properties(as_group)
- instance_facts = props['instance_facts']
- instances = (i for i in instance_facts if i in term_instances)
- for i in instances:
- lifecycle = instance_facts[i]['lifecycle_state']
- health = instance_facts[i]['health_status']
- module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health))
- if lifecycle.startswith('Terminating') or health == 'Unhealthy':
- count += 1
- time.sleep(10)
-
- if wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime())
-
-
-def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop):
- # make sure we have the latest stats after that last loop.
- as_group = describe_autoscaling_groups(connection, group_name)[0]
- props = get_properties(as_group)
- module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop]))
- # now we make sure that we have enough instances in a viable state
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time() and desired_size > props[prop]:
- module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop]))
- time.sleep(10)
- as_group = describe_autoscaling_groups(connection, group_name)[0]
- props = get_properties(as_group)
- if wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime())
- module.debug("Reached %s: %s" % (prop, desired_size))
- return props
-
-
-def asg_exists(connection):
- group_name = module.params.get('name')
- as_group = describe_autoscaling_groups(connection, group_name)
- return bool(len(as_group))
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True, type='str'),
- load_balancers=dict(type='list'),
- target_group_arns=dict(type='list'),
- availability_zones=dict(type='list'),
- launch_config_name=dict(type='str'),
- launch_template=dict(
- type='dict',
- default=None,
- options=dict(
- version=dict(type='str'),
- launch_template_name=dict(type='str'),
- launch_template_id=dict(type='str'),
- )
- ),
- min_size=dict(type='int'),
- max_size=dict(type='int'),
- max_instance_lifetime=dict(type='int'),
- mixed_instances_policy=dict(
- type='dict',
- default=None,
- options=dict(
- instance_types=dict(
- type='list',
- elements='str'
- ),
- )
- ),
- placement_group=dict(type='str'),
- desired_capacity=dict(type='int'),
- vpc_zone_identifier=dict(type='list'),
- replace_batch_size=dict(type='int', default=1),
- replace_all_instances=dict(type='bool', default=False),
- replace_instances=dict(type='list', default=[]),
- lc_check=dict(type='bool', default=True),
- lt_check=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=300),
- state=dict(default='present', choices=['present', 'absent']),
- tags=dict(type='list', default=[]),
- health_check_period=dict(type='int', default=300),
- health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
- default_cooldown=dict(type='int', default=300),
- wait_for_instances=dict(type='bool', default=True),
- termination_policies=dict(type='list', default='Default'),
- notification_topic=dict(type='str', default=None),
- notification_types=dict(
- type='list',
- default=[
- 'autoscaling:EC2_INSTANCE_LAUNCH',
- 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
- 'autoscaling:EC2_INSTANCE_TERMINATE',
- 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
- ]
- ),
- suspend_processes=dict(type='list', default=[]),
- metrics_collection=dict(type='bool', default=False),
- metrics_granularity=dict(type='str', default='1Minute'),
- metrics_list=dict(
- type='list',
- default=[
- 'GroupMinSize',
- 'GroupMaxSize',
- 'GroupDesiredCapacity',
- 'GroupInServiceInstances',
- 'GroupPendingInstances',
- 'GroupStandbyInstances',
- 'GroupTerminatingInstances',
- 'GroupTotalInstances'
- ]
- )
- )
-
- global module
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- mutually_exclusive=[
- ['replace_all_instances', 'replace_instances'],
- ['launch_config_name', 'launch_template']
- ]
- )
-
- if (
- module.params.get('max_instance_lifetime') is not None
- and not module.botocore_at_least('1.13.21')
- ):
- module.fail_json(
- msg='Botocore needs to be version 1.13.21 or higher to use max_instance_lifetime.'
- )
-
- if (
- module.params.get('mixed_instances_policy') is not None
- and not module.botocore_at_least('1.12.45')
- ):
- module.fail_json(
- msg='Botocore needs to be version 1.12.45 or higher to use mixed_instances_policy.'
- )
-
- state = module.params.get('state')
- replace_instances = module.params.get('replace_instances')
- replace_all_instances = module.params.get('replace_all_instances')
-
- connection = module.client('autoscaling')
- changed = create_changed = replace_changed = False
- exists = asg_exists(connection)
-
- if state == 'present':
- create_changed, asg_properties = create_autoscaling_group(connection)
- elif state == 'absent':
- changed = delete_autoscaling_group(connection)
- module.exit_json(changed=changed)
-
- # Only replace instances if asg existed at start of call
- if (
- exists
- and (replace_all_instances or replace_instances)
- and (module.params.get('launch_config_name') or module.params.get('launch_template'))
- ):
- replace_changed, asg_properties = replace(connection)
-
- if create_changed or replace_changed:
- changed = True
-
- module.exit_json(changed=changed, **asg_properties)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_asg_info.py b/lib/ansible/modules/cloud/amazon/ec2_asg_info.py
deleted file mode 100644
index 283f68b0d3..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_asg_info.py
+++ /dev/null
@@ -1,414 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_asg_info
-short_description: Gather information about ec2 Auto Scaling Groups (ASGs) in AWS
-description:
- - Gather information about ec2 Auto Scaling Groups (ASGs) in AWS
- - This module was called C(ec2_asg_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.2"
-requirements: [ boto3 ]
-author: "Rob White (@wimnat)"
-options:
- name:
- description:
- - The prefix or name of the auto scaling group(s) you are searching for.
- - "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match."
- type: str
- required: false
- tags:
- description:
- - >
- A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling
- group(s) you are searching for.
- required: false
- type: dict
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Find all groups
-- ec2_asg_info:
- register: asgs
-
-# Find a group with matching name/prefix
-- ec2_asg_info:
- name: public-webserver-asg
- register: asgs
-
-# Find a group with matching tags
-- ec2_asg_info:
- tags:
- project: webapp
- env: production
- register: asgs
-
-# Find a group with matching name/prefix and tags
-- ec2_asg_info:
- name: myproject
- tags:
- env: production
- register: asgs
-
-# Fail if no groups are found
-- ec2_asg_info:
- name: public-webserver-asg
- register: asgs
- failed_when: "{{ asgs.results | length == 0 }}"
-
-# Fail if more than 1 group is found
-- ec2_asg_info:
- name: public-webserver-asg
- register: asgs
- failed_when: "{{ asgs.results | length > 1 }}"
-'''
-
-RETURN = '''
----
-auto_scaling_group_arn:
- description: The Amazon Resource Name of the ASG
- returned: success
- type: str
- sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1"
-auto_scaling_group_name:
- description: Name of autoscaling group
- returned: success
- type: str
- sample: "public-webapp-production-1"
-availability_zones:
- description: List of Availability Zones that are enabled for this ASG.
- returned: success
- type: list
- sample: ["us-west-2a", "us-west-2b", "us-west-2a"]
-created_time:
- description: The date and time this ASG was created, in ISO 8601 format.
- returned: success
- type: str
- sample: "2015-11-25T00:05:36.309Z"
-default_cooldown:
- description: The default cooldown time in seconds.
- returned: success
- type: int
- sample: 300
-desired_capacity:
- description: The number of EC2 instances that should be running in this group.
- returned: success
- type: int
- sample: 3
-health_check_period:
- description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
- returned: success
- type: int
- sample: 30
-health_check_type:
- description: The service you want the health status from, one of "EC2" or "ELB".
- returned: success
- type: str
- sample: "ELB"
-instances:
- description: List of EC2 instances and their status as it relates to the ASG.
- returned: success
- type: list
- sample: [
- {
- "availability_zone": "us-west-2a",
- "health_status": "Healthy",
- "instance_id": "i-es22ad25",
- "launch_configuration_name": "public-webapp-production-1",
- "lifecycle_state": "InService",
- "protected_from_scale_in": "false"
- }
- ]
-launch_config_name:
- description: >
- Name of launch configuration associated with the ASG. Same as launch_configuration_name,
- provided for compatibility with ec2_asg module.
- returned: success
- type: str
- sample: "public-webapp-production-1"
-launch_configuration_name:
- description: Name of launch configuration associated with the ASG.
- returned: success
- type: str
- sample: "public-webapp-production-1"
-load_balancer_names:
- description: List of load balancers names attached to the ASG.
- returned: success
- type: list
- sample: ["elb-webapp-prod"]
-max_size:
- description: Maximum size of group
- returned: success
- type: int
- sample: 3
-min_size:
- description: Minimum size of group
- returned: success
- type: int
- sample: 1
-new_instances_protected_from_scale_in:
- description: Whether or not new instances a protected from automatic scaling in.
- returned: success
- type: bool
- sample: "false"
-placement_group:
- description: Placement group into which instances are launched, if any.
- returned: success
- type: str
- sample: None
-status:
- description: The current state of the group when DeleteAutoScalingGroup is in progress.
- returned: success
- type: str
- sample: None
-tags:
- description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
- returned: success
- type: list
- sample: [
- {
- "key": "Name",
- "value": "public-webapp-production-1",
- "resource_id": "public-webapp-production-1",
- "resource_type": "auto-scaling-group",
- "propagate_at_launch": "true"
- },
- {
- "key": "env",
- "value": "production",
- "resource_id": "public-webapp-production-1",
- "resource_type": "auto-scaling-group",
- "propagate_at_launch": "true"
- }
- ]
-target_group_arns:
- description: List of ARNs of the target groups that the ASG populates
- returned: success
- type: list
- sample: [
- "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b",
- "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234"
- ]
-target_group_names:
- description: List of names of the target groups that the ASG populates
- returned: success
- type: list
- sample: [
- "target-group-host-hello",
- "target-group-path-world"
- ]
-termination_policies:
- description: A list of termination policies for the group.
- returned: success
- type: str
- sample: ["Default"]
-'''
-
-import re
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-
-def match_asg_tags(tags_to_match, asg):
- for key, value in tags_to_match.items():
- for tag in asg['Tags']:
- if key == tag['Key'] and value == tag['Value']:
- break
- else:
- return False
- return True
-
-
-def find_asgs(conn, module, name=None, tags=None):
- """
- Args:
- conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
- name (str): Optional name of the ASG you are looking for.
- tags (dict): Optional dictionary of tags and values to search for.
-
- Basic Usage:
- >>> name = 'public-webapp-production'
- >>> tags = { 'env': 'production' }
- >>> conn = boto3.client('autoscaling', region_name='us-west-2')
- >>> results = find_asgs(name, conn)
-
- Returns:
- List
- [
- {
- "auto_scaling_group_arn": (
- "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:"
- "autoScalingGroupName/public-webapp-production"
- ),
- "auto_scaling_group_name": "public-webapp-production",
- "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
- "created_time": "2016-02-02T23:28:42.481000+00:00",
- "default_cooldown": 300,
- "desired_capacity": 2,
- "enabled_metrics": [],
- "health_check_grace_period": 300,
- "health_check_type": "ELB",
- "instances":
- [
- {
- "availability_zone": "us-west-2c",
- "health_status": "Healthy",
- "instance_id": "i-047a12cb",
- "launch_configuration_name": "public-webapp-production-1",
- "lifecycle_state": "InService",
- "protected_from_scale_in": false
- },
- {
- "availability_zone": "us-west-2a",
- "health_status": "Healthy",
- "instance_id": "i-7a29df2c",
- "launch_configuration_name": "public-webapp-production-1",
- "lifecycle_state": "InService",
- "protected_from_scale_in": false
- }
- ],
- "launch_config_name": "public-webapp-production-1",
- "launch_configuration_name": "public-webapp-production-1",
- "load_balancer_names": ["public-webapp-production-lb"],
- "max_size": 4,
- "min_size": 2,
- "new_instances_protected_from_scale_in": false,
- "placement_group": None,
- "status": None,
- "suspended_processes": [],
- "tags":
- [
- {
- "key": "Name",
- "propagate_at_launch": true,
- "resource_id": "public-webapp-production",
- "resource_type": "auto-scaling-group",
- "value": "public-webapp-production"
- },
- {
- "key": "env",
- "propagate_at_launch": true,
- "resource_id": "public-webapp-production",
- "resource_type": "auto-scaling-group",
- "value": "production"
- }
- ],
- "target_group_names": [],
- "target_group_arns": [],
- "termination_policies":
- [
- "Default"
- ],
- "vpc_zone_identifier":
- [
- "subnet-a1b1c1d1",
- "subnet-a2b2c2d2",
- "subnet-a3b3c3d3"
- ]
- }
- ]
- """
-
- try:
- asgs_paginator = conn.get_paginator('describe_auto_scaling_groups')
- asgs = asgs_paginator.paginate().build_full_result()
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to describe AutoScalingGroups')
-
- if not asgs:
- return asgs
-
- try:
- elbv2 = module.client('elbv2')
- except ClientError as e:
- # This is nice to have, not essential
- elbv2 = None
- matched_asgs = []
-
- if name is not None:
- # if the user didn't specify a name
- name_prog = re.compile(r'^' + name)
-
- for asg in asgs['AutoScalingGroups']:
- if name:
- matched_name = name_prog.search(asg['AutoScalingGroupName'])
- else:
- matched_name = True
-
- if tags:
- matched_tags = match_asg_tags(tags, asg)
- else:
- matched_tags = True
-
- if matched_name and matched_tags:
- asg = camel_dict_to_snake_dict(asg)
- # compatibility with ec2_asg module
- if 'launch_configuration_name' in asg:
- asg['launch_config_name'] = asg['launch_configuration_name']
- # workaround for https://github.com/ansible/ansible/pull/25015
- if 'target_group_ar_ns' in asg:
- asg['target_group_arns'] = asg['target_group_ar_ns']
- del(asg['target_group_ar_ns'])
- if asg.get('target_group_arns'):
- if elbv2:
- try:
- tg_paginator = elbv2.get_paginator('describe_target_groups')
- tg_result = tg_paginator.paginate(TargetGroupArns=asg['target_group_arns']).build_full_result()
- asg['target_group_names'] = [tg['TargetGroupName'] for tg in tg_result['TargetGroups']]
- except ClientError as e:
- if e.response['Error']['Code'] == 'TargetGroupNotFound':
- asg['target_group_names'] = []
- else:
- module.fail_json_aws(e, msg="Failed to describe Target Groups")
- except BotoCoreError as e:
- module.fail_json_aws(e, msg="Failed to describe Target Groups")
- else:
- asg['target_group_names'] = []
- matched_asgs.append(asg)
-
- return matched_asgs
-
-
-def main():
-
- argument_spec = dict(
- name=dict(type='str'),
- tags=dict(type='dict'),
- )
- module = AnsibleAWSModule(argument_spec=argument_spec)
- if module._name == 'ec2_asg_facts':
- module.deprecate("The 'ec2_asg_facts' module has been renamed to 'ec2_asg_info'", version='2.13')
-
- asg_name = module.params.get('name')
- asg_tags = module.params.get('tags')
-
- autoscaling = module.client('autoscaling')
-
- results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags)
- module.exit_json(results=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_asg_lifecycle_hook.py b/lib/ansible/modules/cloud/amazon/ec2_asg_lifecycle_hook.py
deleted file mode 100644
index 2d99a56392..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_asg_lifecycle_hook.py
+++ /dev/null
@@ -1,253 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2017, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'status': ['preview'],
- 'supported_by': 'community',
- 'metadata_version': '1.1'}
-
-DOCUMENTATION = """
----
-module: ec2_asg_lifecycle_hook
-short_description: Create, delete or update AWS ASG Lifecycle Hooks.
-description:
- - Will create a new hook when I(state=present) and no given Hook is found.
- - Will update an existing hook when I(state=present) and a Hook is found, but current and provided parameters differ.
- - Will delete the hook when I(state=absent) and a Hook is found.
-version_added: "2.5"
-author: Igor 'Tsigankov' Eyrich (@tsiganenok) <tsiganenok@gmail.com>
-options:
- state:
- description:
- - Create or delete Lifecycle Hook.
- - When I(state=present) updates existing hook or creates a new hook if not found.
- choices: ['present', 'absent']
- default: present
- type: str
- lifecycle_hook_name:
- description:
- - The name of the lifecycle hook.
- required: true
- type: str
- autoscaling_group_name:
- description:
- - The name of the Auto Scaling group to which you want to assign the lifecycle hook.
- required: true
- type: str
- transition:
- description:
- - The instance state to which you want to attach the lifecycle hook.
- - Required when I(state=present).
- choices: ['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']
- type: str
- role_arn:
- description:
- - The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.
- type: str
- notification_target_arn:
- description:
- - The ARN of the notification target that Auto Scaling will use to notify you when an
- instance is in the transition state for the lifecycle hook.
- - This target can be either an SQS queue or an SNS topic.
- - If you specify an empty string, this overrides the current ARN.
- type: str
- notification_meta_data:
- description:
- - Contains additional information that you want to include any time Auto Scaling sends a message to the notification target.
- type: str
- heartbeat_timeout:
- description:
- - The amount of time, in seconds, that can elapse before the lifecycle hook times out.
- When the lifecycle hook times out, Auto Scaling performs the default action.
- You can prevent the lifecycle hook from timing out by calling RecordLifecycleActionHeartbeat.
- - By default Amazon AWS will use 3600 (1 hour)
- type: int
- default_result:
- description:
- - Defines the action the Auto Scaling group should take when the lifecycle hook timeout
- elapses or if an unexpected failure occurs.
- choices: ['ABANDON', 'CONTINUE']
- default: ABANDON
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-requirements: [ boto3>=1.4.4 ]
-
-"""
-
-EXAMPLES = '''
-# Create / Update lifecycle hook
-- ec2_asg_lifecycle_hook:
- region: eu-central-1
- state: present
- autoscaling_group_name: example
- lifecycle_hook_name: example
- transition: autoscaling:EC2_INSTANCE_LAUNCHING
- heartbeat_timeout: 7000
- default_result: ABANDON
-
-# Delete lifecycle hook
-- ec2_asg_lifecycle_hook:
- region: eu-central-1
- state: absent
- autoscaling_group_name: example
- lifecycle_hook_name: example
-
-'''
-
-RETURN = '''
-
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-
-try:
- import botocore
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-
-def create_lifecycle_hook(connection, module):
- changed = False
-
- lch_name = module.params.get('lifecycle_hook_name')
- asg_name = module.params.get('autoscaling_group_name')
- transition = module.params.get('transition')
- role_arn = module.params.get('role_arn')
- notification_target_arn = module.params.get('notification_target_arn')
- notification_meta_data = module.params.get('notification_meta_data')
- heartbeat_timeout = module.params.get('heartbeat_timeout')
- default_result = module.params.get('default_result')
-
- lch_params = {
- 'LifecycleHookName': lch_name,
- 'AutoScalingGroupName': asg_name,
- 'LifecycleTransition': transition
- }
-
- if role_arn:
- lch_params['RoleARN'] = role_arn
-
- if notification_target_arn:
- lch_params['NotificationTargetARN'] = notification_target_arn
-
- if notification_meta_data:
- lch_params['NotificationMetadata'] = notification_meta_data
-
- if heartbeat_timeout:
- lch_params['HeartbeatTimeout'] = heartbeat_timeout
-
- if default_result:
- lch_params['DefaultResult'] = default_result
-
- try:
- existing_hook = connection.describe_lifecycle_hooks(
- AutoScalingGroupName=asg_name,
- LifecycleHookNames=[lch_name]
- )['LifecycleHooks']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to get Lifecycle Hook")
-
- if not existing_hook:
- changed = True
- else:
- # GlobalTimeout is not configurable, but exists in response.
- # Removing it helps to compare both dicts in order to understand
- # what changes were done.
- del(existing_hook[0]['GlobalTimeout'])
- added, removed, modified, same = dict_compare(lch_params, existing_hook[0])
- if added or removed or modified:
- changed = True
-
- if changed:
- try:
- connection.put_lifecycle_hook(**lch_params)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to create LifecycleHook")
-
- return(changed)
-
-
-def dict_compare(d1, d2):
- d1_keys = set(d1.keys())
- d2_keys = set(d2.keys())
- intersect_keys = d1_keys.intersection(d2_keys)
- added = d1_keys - d2_keys
- removed = d2_keys - d1_keys
- modified = False
- for key in d1:
- if d1[key] != d2[key]:
- modified = True
- break
-
- same = set(o for o in intersect_keys if d1[o] == d2[o])
- return added, removed, modified, same
-
-
-def delete_lifecycle_hook(connection, module):
- changed = False
-
- lch_name = module.params.get('lifecycle_hook_name')
- asg_name = module.params.get('autoscaling_group_name')
-
- try:
- all_hooks = connection.describe_lifecycle_hooks(
- AutoScalingGroupName=asg_name
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to get Lifecycle Hooks")
-
- for hook in all_hooks['LifecycleHooks']:
- if hook['LifecycleHookName'] == lch_name:
- lch_params = {
- 'LifecycleHookName': lch_name,
- 'AutoScalingGroupName': asg_name
- }
-
- try:
- connection.delete_lifecycle_hook(**lch_params)
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to delete LifecycleHook")
- else:
- pass
-
- return(changed)
-
-
-def main():
- argument_spec = dict(
- autoscaling_group_name=dict(required=True, type='str'),
- lifecycle_hook_name=dict(required=True, type='str'),
- transition=dict(type='str', choices=['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']),
- role_arn=dict(type='str'),
- notification_target_arn=dict(type='str'),
- notification_meta_data=dict(type='str'),
- heartbeat_timeout=dict(type='int'),
- default_result=dict(default='ABANDON', choices=['ABANDON', 'CONTINUE']),
- state=dict(default='present', choices=['present', 'absent'])
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[['state', 'present', ['transition']]])
- state = module.params.get('state')
-
- connection = module.client('autoscaling')
-
- changed = False
-
- if state == 'present':
- changed = create_lifecycle_hook(connection, module)
- elif state == 'absent':
- changed = delete_lifecycle_hook(connection, module)
-
- module.exit_json(changed=changed)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py b/lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py
deleted file mode 100644
index db62b76c2a..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py
+++ /dev/null
@@ -1,276 +0,0 @@
-#!/usr/bin/python
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_customer_gateway
-short_description: Manage an AWS customer gateway
-description:
- - Manage an AWS customer gateway.
-version_added: "2.2"
-author: Michael Baydoun (@MichaelBaydoun)
-requirements: [ botocore, boto3 ]
-notes:
- - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the
- first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent
- requests do not create new customer gateway resources.
- - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
- customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
-options:
- bgp_asn:
- description:
- - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present).
- type: int
- ip_address:
- description:
- - Internet-routable IP address for customers gateway, must be a static address.
- required: true
- type: str
- name:
- description:
- - Name of the customer gateway.
- required: true
- type: str
- routing:
- description:
- - The type of routing.
- choices: ['static', 'dynamic']
- default: dynamic
- version_added: '2.4'
- type: str
- state:
- description:
- - Create or terminate the Customer Gateway.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-
-# Create Customer Gateway
-- ec2_customer_gateway:
- bgp_asn: 12345
- ip_address: 1.2.3.4
- name: IndianapolisOffice
- region: us-east-1
- register: cgw
-
-# Delete Customer Gateway
-- ec2_customer_gateway:
- ip_address: 1.2.3.4
- name: IndianapolisOffice
- state: absent
- region: us-east-1
- register: cgw
-'''
-
-RETURN = '''
-gateway.customer_gateways:
- description: details about the gateway that was created.
- returned: success
- type: complex
- contains:
- bgp_asn:
- description: The Border Gateway Autonomous System Number.
- returned: when exists and gateway is available.
- sample: 65123
- type: str
- customer_gateway_id:
- description: gateway id assigned by amazon.
- returned: when exists and gateway is available.
- sample: cgw-cb6386a2
- type: str
- ip_address:
- description: ip address of your gateway device.
- returned: when exists and gateway is available.
- sample: 1.2.3.4
- type: str
- state:
- description: state of gateway.
- returned: when gateway exists and is available.
- sample: available
- type: str
- tags:
- description: Any tags on the gateway.
- returned: when gateway exists and is available, and when tags exist.
- type: list
- type:
- description: encryption type.
- returned: when gateway exists and is available.
- sample: ipsec.1
- type: str
-'''
-
-try:
- from botocore.exceptions import ClientError
- HAS_BOTOCORE = True
-except ImportError:
- HAS_BOTOCORE = False
-
-try:
- import boto3
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (boto3_conn, AWSRetry, camel_dict_to_snake_dict,
- ec2_argument_spec, get_aws_connection_info)
-
-
-class Ec2CustomerGatewayManager:
-
- def __init__(self, module):
- self.module = module
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- if not region:
- module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
- self.ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except ClientError as e:
- module.fail_json(msg=e.message)
-
- @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])
- def ensure_cgw_absent(self, gw_id):
- response = self.ec2.delete_customer_gateway(
- DryRun=False,
- CustomerGatewayId=gw_id
- )
- return response
-
- def ensure_cgw_present(self, bgp_asn, ip_address):
- if not bgp_asn:
- bgp_asn = 65000
- response = self.ec2.create_customer_gateway(
- DryRun=False,
- Type='ipsec.1',
- PublicIp=ip_address,
- BgpAsn=bgp_asn,
- )
- return response
-
- def tag_cgw_name(self, gw_id, name):
- response = self.ec2.create_tags(
- DryRun=False,
- Resources=[
- gw_id,
- ],
- Tags=[
- {
- 'Key': 'Name',
- 'Value': name
- },
- ]
- )
- return response
-
- def describe_gateways(self, ip_address):
- response = self.ec2.describe_customer_gateways(
- DryRun=False,
- Filters=[
- {
- 'Name': 'state',
- 'Values': [
- 'available',
- ]
- },
- {
- 'Name': 'ip-address',
- 'Values': [
- ip_address,
- ]
- }
- ]
- )
- return response
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- bgp_asn=dict(required=False, type='int'),
- ip_address=dict(required=True),
- name=dict(required=True),
- routing=dict(default='dynamic', choices=['dynamic', 'static']),
- state=dict(default='present', choices=['present', 'absent']),
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True,
- required_if=[
- ('routing', 'dynamic', ['bgp_asn'])
- ]
- )
-
- if not HAS_BOTOCORE:
- module.fail_json(msg='botocore is required.')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required.')
-
- gw_mgr = Ec2CustomerGatewayManager(module)
-
- name = module.params.get('name')
-
- existing = gw_mgr.describe_gateways(module.params['ip_address'])
-
- results = dict(changed=False)
- if module.params['state'] == 'present':
- if existing['CustomerGateways']:
- existing['CustomerGateway'] = existing['CustomerGateways'][0]
- results['gateway'] = existing
- if existing['CustomerGateway']['Tags']:
- tag_array = existing['CustomerGateway']['Tags']
- for key, value in enumerate(tag_array):
- if value['Key'] == 'Name':
- current_name = value['Value']
- if current_name != name:
- results['name'] = gw_mgr.tag_cgw_name(
- results['gateway']['CustomerGateway']['CustomerGatewayId'],
- module.params['name'],
- )
- results['changed'] = True
- else:
- if not module.check_mode:
- results['gateway'] = gw_mgr.ensure_cgw_present(
- module.params['bgp_asn'],
- module.params['ip_address'],
- )
- results['name'] = gw_mgr.tag_cgw_name(
- results['gateway']['CustomerGateway']['CustomerGatewayId'],
- module.params['name'],
- )
- results['changed'] = True
-
- elif module.params['state'] == 'absent':
- if existing['CustomerGateways']:
- existing['CustomerGateway'] = existing['CustomerGateways'][0]
- results['gateway'] = existing
- if not module.check_mode:
- results['gateway'] = gw_mgr.ensure_cgw_absent(
- existing['CustomerGateway']['CustomerGatewayId']
- )
- results['changed'] = True
-
- pretty_results = camel_dict_to_snake_dict(results)
- module.exit_json(**pretty_results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_customer_gateway_info.py b/lib/ansible/modules/cloud/amazon/ec2_customer_gateway_info.py
deleted file mode 100644
index 90d5130059..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_customer_gateway_info.py
+++ /dev/null
@@ -1,137 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'status': ['preview'],
- 'supported_by': 'community',
- 'metadata_version': '1.1'}
-
-DOCUMENTATION = '''
----
-module: ec2_customer_gateway_info
-short_description: Gather information about customer gateways in AWS
-description:
- - Gather information about customer gateways in AWS.
- - This module was called C(ec2_customer_gateway_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.5"
-requirements: [ boto3 ]
-author: Madhura Naniwadekar (@Madhura-CSI)
-options:
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCustomerGateways.html) for possible filters.
- type: dict
- customer_gateway_ids:
- description:
- - Get details of a specific customer gateways using customer gateway ID/IDs. This value should be provided as a list.
- type: list
- elements: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# # Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Gather information about all customer gateways
- ec2_customer_gateway_info:
-
-- name: Gather information about a filtered list of customer gateways, based on tags
- ec2_customer_gateway_info:
- region: ap-southeast-2
- filters:
- "tag:Name": test-customer-gateway
- "tag:AltName": test-customer-gateway-alt
- register: cust_gw_info
-
-- name: Gather information about a specific customer gateway by specifying customer gateway ID
- ec2_customer_gateway_info:
- region: ap-southeast-2
- customer_gateway_ids:
- - 'cgw-48841a09'
- - 'cgw-fec021ce'
- register: cust_gw_info
-'''
-
-RETURN = '''
-customer_gateways:
- description: List of one or more customer gateways.
- returned: always
- type: list
- sample: [
- {
- "bgp_asn": "65000",
- "customer_gateway_id": "cgw-fec844ce",
- "customer_gateway_name": "test-customer-gw",
- "ip_address": "110.112.113.120",
- "state": "available",
- "tags": [
- {
- "key": "Name",
- "value": "test-customer-gw"
- }
- ],
- "type": "ipsec.1"
- }
- ]
-'''
-
-import json
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
-
-
-def date_handler(obj):
- return obj.isoformat() if hasattr(obj, 'isoformat') else obj
-
-
-def list_customer_gateways(connection, module):
- params = dict()
-
- params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
- params['CustomerGatewayIds'] = module.params.get('customer_gateway_ids')
-
- try:
- result = json.loads(json.dumps(connection.describe_customer_gateways(**params), default=date_handler))
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Could not describe customer gateways")
- snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result['CustomerGateways']]
- if snaked_customer_gateways:
- for customer_gateway in snaked_customer_gateways:
- customer_gateway['tags'] = boto3_tag_list_to_ansible_dict(customer_gateway.get('tags', []))
- customer_gateway_name = customer_gateway['tags'].get('Name')
- if customer_gateway_name:
- customer_gateway['customer_gateway_name'] = customer_gateway_name
- module.exit_json(changed=False, customer_gateways=snaked_customer_gateways)
-
-
-def main():
-
- argument_spec = dict(
- customer_gateway_ids=dict(default=[], type='list'),
- filters=dict(default={}, type='dict')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- mutually_exclusive=[['customer_gateway_ids', 'filters']],
- supports_check_mode=True)
- if module._module._name == 'ec2_customer_gateway_facts':
- module._module.deprecate("The 'ec2_customer_gateway_facts' module has been renamed to 'ec2_customer_gateway_info'", version='2.13')
-
- connection = module.client('ec2')
-
- list_customer_gateways(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_eip.py b/lib/ansible/modules/cloud/amazon/ec2_eip.py
deleted file mode 100644
index 7f7cd18c5b..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_eip.py
+++ /dev/null
@@ -1,649 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_eip
-short_description: manages EC2 elastic IP (EIP) addresses.
-description:
- - This module can allocate or release an EIP.
- - This module can associate/disassociate an EIP with instances or network interfaces.
-version_added: "1.4"
-options:
- device_id:
- description:
- - The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id.
- required: false
- aliases: [ instance_id ]
- version_added: "2.0"
- type: str
- public_ip:
- description:
- - The IP address of a previously allocated EIP.
- - When I(public_ip=present) and device is specified, the EIP is associated with the device.
- - When I(public_ip=absent) and device is specified, the EIP is disassociated from the device.
- aliases: [ ip ]
- type: str
- state:
- description:
- - When C(state=present), allocate an EIP or associate an existing EIP with a device.
- - When C(state=absent), disassociate the EIP from the device and optionally release it.
- choices: ['present', 'absent']
- default: present
- type: str
- in_vpc:
- description:
- - Allocate an EIP inside a VPC or not.
- - Required if specifying an ENI with I(device_id).
- default: false
- type: bool
- version_added: "1.4"
- reuse_existing_ip_allowed:
- description:
- - Reuse an EIP that is not associated to a device (when available), instead of allocating a new one.
- default: false
- type: bool
- version_added: "1.6"
- release_on_disassociation:
- description:
- - Whether or not to automatically release the EIP when it is disassociated.
- default: false
- type: bool
- version_added: "2.0"
- private_ip_address:
- description:
- - The primary or secondary private IP address to associate with the Elastic IP address.
- version_added: "2.3"
- type: str
- allow_reassociation:
- description:
- - Specify this option to allow an Elastic IP address that is already associated with another
- network interface or instance to be re-associated with the specified instance or interface.
- default: false
- type: bool
- version_added: "2.5"
- tag_name:
- description:
- - When I(reuse_existing_ip_allowed=true), supplement with this option to only reuse
- an Elastic IP if it is tagged with I(tag_name).
- version_added: "2.9"
- type: str
- tag_value:
- description:
- - Supplements I(tag_name) but also checks that the value of the tag provided in I(tag_name) matches I(tag_value).
- version_added: "2.9"
- type: str
- public_ipv4_pool:
- description:
- - Allocates the new Elastic IP from the provided public IPv4 pool (BYOIP)
- only applies to newly allocated Elastic IPs, isn't validated when I(reuse_existing_ip_allowed=true).
- version_added: "2.9"
- type: str
- wait_timeout:
- description:
- - The I(wait_timeout) option does nothing and will be removed in Ansible 2.14.
- type: int
-extends_documentation_fragment:
- - aws
- - ec2
-author: "Rick Mendes (@rickmendes) <rmendes@illumina.com>"
-notes:
- - There may be a delay between the time the EIP is assigned and when
- the cloud instance is reachable via the new address. Use wait_for and
- pause to delay further playbook execution until the instance is reachable,
- if necessary.
- - This module returns multiple changed statuses on disassociation or release.
- It returns an overall status based on any changes occurring. It also returns
- individual changed statuses for disassociation and release.
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: associate an elastic IP with an instance
- ec2_eip:
- device_id: i-1212f003
- ip: 93.184.216.119
-
-- name: associate an elastic IP with a device
- ec2_eip:
- device_id: eni-c8ad70f3
- ip: 93.184.216.119
-
-- name: associate an elastic IP with a device and allow reassociation
- ec2_eip:
- device_id: eni-c8ad70f3
- public_ip: 93.184.216.119
- allow_reassociation: true
-
-- name: disassociate an elastic IP from an instance
- ec2_eip:
- device_id: i-1212f003
- ip: 93.184.216.119
- state: absent
-
-- name: disassociate an elastic IP with a device
- ec2_eip:
- device_id: eni-c8ad70f3
- ip: 93.184.216.119
- state: absent
-
-- name: allocate a new elastic IP and associate it with an instance
- ec2_eip:
- device_id: i-1212f003
-
-- name: allocate a new elastic IP without associating it to anything
- ec2_eip:
- state: present
- register: eip
-
-- name: output the IP
- debug:
- msg: "Allocated IP is {{ eip.public_ip }}"
-
-- name: provision new instances with ec2
- ec2:
- keypair: mykey
- instance_type: c1.medium
- image: ami-40603AD1
- wait: true
- group: webserver
- count: 3
- register: ec2
-
-- name: associate new elastic IPs with each of the instances
- ec2_eip:
- device_id: "{{ item }}"
- loop: "{{ ec2.instance_ids }}"
-
-- name: allocate a new elastic IP inside a VPC in us-west-2
- ec2_eip:
- region: us-west-2
- in_vpc: true
- register: eip
-
-- name: output the IP
- debug:
- msg: "Allocated IP inside a VPC is {{ eip.public_ip }}"
-
-- name: allocate eip - reuse unallocated ips (if found) with FREE tag
- ec2_eip:
- region: us-east-1
- in_vpc: true
- reuse_existing_ip_allowed: true
- tag_name: FREE
-
-- name: allocate eip - reuse unallocted ips if tag reserved is nope
- ec2_eip:
- region: us-east-1
- in_vpc: true
- reuse_existing_ip_allowed: true
- tag_name: reserved
- tag_value: nope
-
-- name: allocate new eip - from servers given ipv4 pool
- ec2_eip:
- region: us-east-1
- in_vpc: true
- public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02
-
-- name: allocate eip - from a given pool (if no free addresses where dev-servers tag is dynamic)
- ec2_eip:
- region: us-east-1
- in_vpc: true
- reuse_existing_ip_allowed: true
- tag_name: dev-servers
- public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02
-
-- name: allocate eip from pool - check if tag reserved_for exists and value is our hostname
- ec2_eip:
- region: us-east-1
- in_vpc: true
- reuse_existing_ip_allowed: true
- tag_name: reserved_for
- tag_value: "{{ inventory_hostname }}"
- public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02
-'''
-
-RETURN = '''
-allocation_id:
- description: allocation_id of the elastic ip
- returned: on success
- type: str
- sample: eipalloc-51aa3a6c
-public_ip:
- description: an elastic ip address
- returned: on success
- type: str
- sample: 52.88.159.209
-'''
-
-try:
- import botocore.exceptions
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list
-
-
-def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True):
- if address_is_associated_with_device(ec2, module, address, device_id, is_instance):
- return {'changed': False}
-
- # If we're in check mode, nothing else to do
- if not check_mode:
- if is_instance:
- try:
- params = dict(
- InstanceId=device_id,
- AllowReassociation=allow_reassociation,
- )
- if private_ip_address:
- params['PrivateIPAddress'] = private_ip_address
- if address['Domain'] == 'vpc':
- params['AllocationId'] = address['AllocationId']
- else:
- params['PublicIp'] = address['PublicIp']
- res = ec2.associate_address(**params)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- msg = "Couldn't associate Elastic IP address with instance '{0}'".format(device_id)
- module.fail_json_aws(e, msg=msg)
- else:
- params = dict(
- NetworkInterfaceId=device_id,
- AllocationId=address['AllocationId'],
- AllowReassociation=allow_reassociation,
- )
-
- if private_ip_address:
- params['PrivateIpAddress'] = private_ip_address
-
- try:
- res = ec2.associate_address(aws_retry=True, **params)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- msg = "Couldn't associate Elastic IP address with network interface '{0}'".format(device_id)
- module.fail_json_aws(e, msg=msg)
- if not res:
- module.fail_json_aws(e, msg='Association failed.')
-
- return {'changed': True}
-
-
-def disassociate_ip_and_device(ec2, module, address, device_id, check_mode, is_instance=True):
- if not address_is_associated_with_device(ec2, module, address, device_id, is_instance):
- return {'changed': False}
-
- # If we're in check mode, nothing else to do
- if not check_mode:
- try:
- if address['Domain'] == 'vpc':
- res = ec2.disassociate_address(
- AssociationId=address['AssociationId'], aws_retry=True
- )
- else:
- res = ec2.disassociate_address(
- PublicIp=address['PublicIp'], aws_retry=True
- )
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Dissassociation of Elastic IP failed")
-
- return {'changed': True}
-
-
-@AWSRetry.jittered_backoff()
-def find_address(ec2, module, public_ip, device_id, is_instance=True):
- """ Find an existing Elastic IP address """
- filters = []
- kwargs = {}
-
- if public_ip:
- kwargs["PublicIps"] = [public_ip]
- elif device_id:
- if is_instance:
- filters.append({"Name": 'instance-id', "Values": [device_id]})
- else:
- filters.append({'Name': 'network-interface-id', "Values": [device_id]})
-
- if len(filters) > 0:
- kwargs["Filters"] = filters
- elif len(filters) == 0 and public_ip is None:
- return None
-
- try:
- addresses = ec2.describe_addresses(**kwargs)
- except is_boto3_error_code('InvalidAddress.NotFound') as e:
- # If we're releasing and we can't find it, it's already gone...
- if module.params.get('state') == 'absent':
- module.exit_json(changed=False)
- module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses")
-
- addresses = addresses["Addresses"]
- if len(addresses) == 1:
- return addresses[0]
- elif len(addresses) > 1:
- msg = "Found more than one address using args {0}".format(kwargs)
- msg += "Addresses found: {0}".format(addresses)
- module.fail_json_aws(botocore.exceptions.ClientError, msg=msg)
-
-
-def address_is_associated_with_device(ec2, module, address, device_id, is_instance=True):
- """ Check if the elastic IP is currently associated with the device """
- address = find_address(ec2, module, address["PublicIp"], device_id, is_instance)
- if address:
- if is_instance:
- if "InstanceId" in address and address["InstanceId"] == device_id:
- return address
- else:
- if "NetworkInterfaceId" in address and address["NetworkInterfaceId"] == device_id:
- return address
- return False
-
-
-def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, tag_dict=None, public_ipv4_pool=None):
- """ Allocate a new elastic IP address (when needed) and return it """
- if reuse_existing_ip_allowed:
- filters = []
- if not domain:
- domain = 'standard'
- filters.append({'Name': 'domain', "Values": [domain]})
-
- if tag_dict is not None:
- filters += ansible_dict_to_boto3_filter_list(tag_dict)
-
- try:
- all_addresses = ec2.describe_addresses(Filters=filters, aws_retry=True)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses")
-
- all_addresses = all_addresses["Addresses"]
-
- if domain == 'vpc':
- unassociated_addresses = [a for a in all_addresses
- if not a.get('AssociationId', None)]
- else:
- unassociated_addresses = [a for a in all_addresses
- if not a['InstanceId']]
- if unassociated_addresses:
- return unassociated_addresses[0], False
-
- if public_ipv4_pool:
- return allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool), True
-
- try:
- result = ec2.allocate_address(Domain=domain, aws_retry=True), True
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address")
- return result
-
-
-def release_address(ec2, module, address, check_mode):
- """ Release a previously allocated elastic IP address """
-
- # If we're in check mode, nothing else to do
- if not check_mode:
- try:
- result = ec2.release_address(AllocationId=address['AllocationId'], aws_retry=True)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't release Elastic IP address")
-
- return {'changed': True}
-
-
-@AWSRetry.jittered_backoff()
-def describe_eni_with_backoff(ec2, module, device_id):
- try:
- return ec2.describe_network_interfaces(NetworkInterfaceIds=[device_id])
- except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound') as e:
- module.fail_json_aws(e, msg="Couldn't get list of network interfaces.")
-
-
-def find_device(ec2, module, device_id, is_instance=True):
- """ Attempt to find the EC2 instance and return it """
-
- if is_instance:
- try:
- paginator = ec2.get_paginator('describe_instances')
- reservations = list(paginator.paginate(InstanceIds=[device_id]).search('Reservations[]'))
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't get list of instances")
-
- if len(reservations) == 1:
- instances = reservations[0]['Instances']
- if len(instances) == 1:
- return instances[0]
- else:
- try:
- interfaces = describe_eni_with_backoff(ec2, module, device_id)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't get list of network interfaces.")
- if len(interfaces) == 1:
- return interfaces[0]
-
-
-def ensure_present(ec2, module, domain, address, private_ip_address, device_id,
- reuse_existing_ip_allowed, allow_reassociation, check_mode, is_instance=True):
- changed = False
-
- # Return the EIP object since we've been given a public IP
- if not address:
- if check_mode:
- return {'changed': True}
-
- address, changed = allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode)
-
- if device_id:
- # Allocate an IP for instance since no public_ip was provided
- if is_instance:
- instance = find_device(ec2, module, device_id)
- if reuse_existing_ip_allowed:
- if instance.vpc_id and len(instance.vpc_id) > 0 and domain is None:
- msg = "You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc"
- module.fail_json_aws(botocore.exceptions.ClientError, msg=msg)
-
- # Associate address object (provided or allocated) with instance
- assoc_result = associate_ip_and_device(
- ec2, module, address, private_ip_address, device_id, allow_reassociation,
- check_mode
- )
- else:
- instance = find_device(ec2, module, device_id, is_instance=False)
- # Associate address object (provided or allocated) with instance
- assoc_result = associate_ip_and_device(
- ec2, module, address, private_ip_address, device_id, allow_reassociation,
- check_mode, is_instance=False
- )
-
- changed = changed or assoc_result['changed']
-
- return {'changed': changed, 'public_ip': address['PublicIp'], 'allocation_id': address['AllocationId']}
-
-
-def ensure_absent(ec2, module, address, device_id, check_mode, is_instance=True):
- if not address:
- return {'changed': False}
-
- # disassociating address from instance
- if device_id:
- if is_instance:
- return disassociate_ip_and_device(
- ec2, module, address, device_id, check_mode
- )
- else:
- return disassociate_ip_and_device(
- ec2, module, address, device_id, check_mode, is_instance=False
- )
- # releasing address
- else:
- return release_address(ec2, module, address, check_mode)
-
-
-def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool):
- # type: (EC2Connection, str, bool, str) -> Address
- """ Overrides boto's allocate_address function to support BYOIP """
- params = {}
-
- if domain is not None:
- params['Domain'] = domain
-
- if public_ipv4_pool is not None:
- params['PublicIpv4Pool'] = public_ipv4_pool
-
- if check_mode:
- params['DryRun'] = 'true'
-
- try:
- result = ec2.allocate_address(aws_retry=True, **params)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address")
- return result
-
-
-def generate_tag_dict(module, tag_name, tag_value):
- # type: (AnsibleModule, str, str) -> Optional[Dict]
- """ Generates a dictionary to be passed as a filter to Amazon """
- if tag_name and not tag_value:
- if tag_name.startswith('tag:'):
- tag_name = tag_name.strip('tag:')
- return {'tag-key': tag_name}
-
- elif tag_name and tag_value:
- if not tag_name.startswith('tag:'):
- tag_name = 'tag:' + tag_name
- return {tag_name: tag_value}
-
- elif tag_value and not tag_name:
- module.fail_json(msg="parameters are required together: ('tag_name', 'tag_value')")
-
-
-def main():
- argument_spec = dict(
- device_id=dict(required=False, aliases=['instance_id']),
- public_ip=dict(required=False, aliases=['ip']),
- state=dict(required=False, default='present',
- choices=['present', 'absent']),
- in_vpc=dict(required=False, type='bool', default=False),
- reuse_existing_ip_allowed=dict(required=False, type='bool',
- default=False),
- release_on_disassociation=dict(required=False, type='bool', default=False),
- allow_reassociation=dict(type='bool', default=False),
- wait_timeout=dict(type='int', removed_in_version='2.14'),
- private_ip_address=dict(),
- tag_name=dict(),
- tag_value=dict(),
- public_ipv4_pool=dict()
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- required_by={
- 'private_ip_address': ['device_id'],
- },
- )
-
- ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
-
- device_id = module.params.get('device_id')
- instance_id = module.params.get('instance_id')
- public_ip = module.params.get('public_ip')
- private_ip_address = module.params.get('private_ip_address')
- state = module.params.get('state')
- in_vpc = module.params.get('in_vpc')
- domain = 'vpc' if in_vpc else None
- reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
- release_on_disassociation = module.params.get('release_on_disassociation')
- allow_reassociation = module.params.get('allow_reassociation')
- tag_name = module.params.get('tag_name')
- tag_value = module.params.get('tag_value')
- public_ipv4_pool = module.params.get('public_ipv4_pool')
-
- if instance_id:
- warnings = ["instance_id is no longer used, please use device_id going forward"]
- is_instance = True
- device_id = instance_id
- else:
- if device_id and device_id.startswith('i-'):
- is_instance = True
- elif device_id:
- if device_id.startswith('eni-') and not in_vpc:
- module.fail_json(msg="If you are specifying an ENI, in_vpc must be true")
- is_instance = False
-
- tag_dict = generate_tag_dict(module, tag_name, tag_value)
-
- try:
- if device_id:
- address = find_address(ec2, module, public_ip, device_id, is_instance=is_instance)
- else:
- address = find_address(ec2, module, public_ip, None)
-
- if state == 'present':
- if device_id:
- result = ensure_present(
- ec2, module, domain, address, private_ip_address, device_id,
- reuse_existing_ip_allowed, allow_reassociation,
- module.check_mode, is_instance=is_instance
- )
- else:
- if address:
- changed = False
- else:
- address, changed = allocate_address(
- ec2, module, domain, reuse_existing_ip_allowed,
- module.check_mode, tag_dict, public_ipv4_pool
- )
- result = {
- 'changed': changed,
- 'public_ip': address['PublicIp'],
- 'allocation_id': address['AllocationId']
- }
- else:
- if device_id:
- disassociated = ensure_absent(
- ec2, module, address, device_id, module.check_mode, is_instance=is_instance
- )
-
- if release_on_disassociation and disassociated['changed']:
- released = release_address(ec2, module, address, module.check_mode)
- result = {
- 'changed': True,
- 'disassociated': disassociated,
- 'released': released
- }
- else:
- result = {
- 'changed': disassociated['changed'],
- 'disassociated': disassociated,
- 'released': {'changed': False}
- }
- else:
- released = release_address(ec2, module, address, module.check_mode)
- result = {
- 'changed': released['changed'],
- 'disassociated': {'changed': False},
- 'released': released
- }
-
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(str(e))
-
- if instance_id:
- result['warnings'] = warnings
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_eip_info.py b/lib/ansible/modules/cloud/amazon/ec2_eip_info.py
deleted file mode 100644
index 27fda74903..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_eip_info.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_eip_info
-short_description: List EC2 EIP details
-description:
- - List details of EC2 Elastic IP addresses.
- - This module was called C(ec2_eip_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.6"
-author: "Brad Macpherson (@iiibrad)"
-options:
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and filter
- value. See U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-addresses.html#options)
- for possible filters. Filter names and values are case sensitive.
- required: false
- default: {}
- type: dict
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details or the AWS region,
-# see the AWS Guide for details.
-
-# List all EIP addresses in the current region.
-- ec2_eip_info:
- register: regional_eip_addresses
-
-# List all EIP addresses for a VM.
-- ec2_eip_info:
- filters:
- instance-id: i-123456789
- register: my_vm_eips
-
-- debug: msg="{{ my_vm_eips.addresses | json_query(\"[?private_ip_address=='10.0.0.5']\") }}"
-
-# List all EIP addresses for several VMs.
-- ec2_eip_info:
- filters:
- instance-id:
- - i-123456789
- - i-987654321
- register: my_vms_eips
-
-# List all EIP addresses using the 'Name' tag as a filter.
-- ec2_eip_info:
- filters:
- tag:Name: www.example.com
- register: my_vms_eips
-
-# List all EIP addresses using the Allocation-id as a filter
-- ec2_eip_info:
- filters:
- allocation-id: eipalloc-64de1b01
- register: my_vms_eips
-
-# Set the variable eip_alloc to the value of the first allocation_id
-# and set the variable my_pub_ip to the value of the first public_ip
-- set_fact:
- eip_alloc: my_vms_eips.addresses[0].allocation_id
- my_pub_ip: my_vms_eips.addresses[0].public_ip
-
-'''
-
-
-RETURN = '''
-addresses:
- description: Properties of all Elastic IP addresses matching the provided filters. Each element is a dict with all the information related to an EIP.
- returned: on success
- type: list
- sample: [{
- "allocation_id": "eipalloc-64de1b01",
- "association_id": "eipassoc-0fe9ce90d6e983e97",
- "domain": "vpc",
- "instance_id": "i-01020cfeb25b0c84f",
- "network_interface_id": "eni-02fdeadfd4beef9323b",
- "network_interface_owner_id": "0123456789",
- "private_ip_address": "10.0.0.1",
- "public_ip": "54.81.104.1",
- "tags": {
- "Name": "test-vm-54.81.104.1"
- }
- }]
-
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
- boto3_tag_list_to_ansible_dict,
- camel_dict_to_snake_dict)
-try:
- from botocore.exceptions import (BotoCoreError, ClientError)
-except ImportError:
- pass # caught by imported AnsibleAWSModule
-
-
-def get_eips_details(module):
- connection = module.client('ec2')
- filters = module.params.get("filters")
- try:
- response = connection.describe_addresses(
- Filters=ansible_dict_to_boto3_filter_list(filters)
- )
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(
- e,
- msg="Error retrieving EIPs")
-
- addresses = camel_dict_to_snake_dict(response)['addresses']
- for address in addresses:
- if 'tags' in address:
- address['tags'] = boto3_tag_list_to_ansible_dict(address['tags'])
- return addresses
-
-
-def main():
- module = AnsibleAWSModule(
- argument_spec=dict(
- filters=dict(type='dict', default={})
- ),
- supports_check_mode=True
- )
- if module._module._name == 'ec2_eip_facts':
- module._module.deprecate("The 'ec2_eip_facts' module has been renamed to 'ec2_eip_info'", version='2.13')
-
- module.exit_json(changed=False, addresses=get_eips_details(module))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_elb.py b/lib/ansible/modules/cloud/amazon/ec2_elb.py
deleted file mode 100644
index 01cb34c038..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_elb.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: ec2_elb
-short_description: De-registers or registers instances from EC2 ELBs
-description:
- - This module de-registers or registers an AWS EC2 instance from the ELBs
- that it belongs to.
- - Returns fact "ec2_elbs" which is a list of elbs attached to the instance
- if state=absent is passed as an argument.
- - Will be marked changed when called only if there are ELBs found to operate on.
-version_added: "1.2"
-author: "John Jarvis (@jarv)"
-options:
- state:
- description:
- - register or deregister the instance
- required: true
- choices: ['present', 'absent']
- type: str
- instance_id:
- description:
- - EC2 Instance ID
- required: true
- type: str
- ec2_elbs:
- description:
- - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
- type: list
- enable_availability_zone:
- description:
- - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
- been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB.
- type: bool
- default: 'yes'
- wait:
- description:
- - Wait for instance registration or deregistration to complete successfully before returning.
- type: bool
- default: 'yes'
- wait_timeout:
- description:
- - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs.
- If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
- default: 0
- version_added: "1.6"
- type: int
-extends_documentation_fragment:
- - aws
- - ec2
-"""
-
-EXAMPLES = """
-# basic pre_task and post_task example
-pre_tasks:
- - name: Gathering ec2 facts
- action: ec2_facts
- - name: Instance De-register
- local_action:
- module: ec2_elb
- instance_id: "{{ ansible_ec2_instance_id }}"
- state: absent
-roles:
- - myrole
-post_tasks:
- - name: Instance Register
- local_action:
- module: ec2_elb
- instance_id: "{{ ansible_ec2_instance_id }}"
- ec2_elbs: "{{ item }}"
- state: present
- loop: "{{ ec2_elbs }}"
-"""
-
-import time
-
-try:
- import boto
- import boto.ec2
- import boto.ec2.autoscale
- import boto.ec2.elb
- from boto.regioninfo import RegionInfo
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec,
- get_aws_connection_info)
-
-
-class ElbManager:
- """Handles EC2 instance ELB registration and de-registration"""
-
- def __init__(self, module, instance_id=None, ec2_elbs=None,
- region=None, **aws_connect_params):
- self.module = module
- self.instance_id = instance_id
- self.region = region
- self.aws_connect_params = aws_connect_params
- self.lbs = self._get_instance_lbs(ec2_elbs)
- self.changed = False
-
- def deregister(self, wait, timeout):
- """De-register the instance from all ELBs and wait for the ELB
- to report it out-of-service"""
-
- for lb in self.lbs:
- initial_state = self._get_instance_health(lb)
- if initial_state is None:
- # Instance isn't registered with this load
- # balancer. Ignore it and try the next one.
- continue
-
- # The instance is not associated with any load balancer so nothing to do
- if not self._get_instance_lbs():
- return
-
- lb.deregister_instances([self.instance_id])
-
- # The ELB is changing state in some way. Either an instance that's
- # InService is moving to OutOfService, or an instance that's
- # already OutOfService is being deregistered.
- self.changed = True
-
- if wait:
- self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
-
- def register(self, wait, enable_availability_zone, timeout):
- """Register the instance for all ELBs and wait for the ELB
- to report the instance in-service"""
- for lb in self.lbs:
- initial_state = self._get_instance_health(lb)
-
- if enable_availability_zone:
- self._enable_availailability_zone(lb)
-
- lb.register_instances([self.instance_id])
-
- if wait:
- self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
- else:
- # We cannot assume no change was made if we don't wait
- # to find out
- self.changed = True
-
- def exists(self, lbtest):
- """ Verify that the named ELB actually exists """
-
- found = False
- for lb in self.lbs:
- if lb.name == lbtest:
- found = True
- break
- return found
-
- def _enable_availailability_zone(self, lb):
- """Enable the current instance's availability zone in the provided lb.
- Returns True if the zone was enabled or False if no change was made.
- lb: load balancer"""
- instance = self._get_instance()
- if instance.placement in lb.availability_zones:
- return False
-
- lb.enable_zones(zones=instance.placement)
-
- # If successful, the new zone will have been added to
- # lb.availability_zones
- return instance.placement in lb.availability_zones
-
- def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
- """Wait for an ELB to change state
- lb: load balancer
- awaited_state : state to poll for (string)"""
-
- wait_timeout = time.time() + timeout
- while True:
- instance_state = self._get_instance_health(lb)
-
- if not instance_state:
- msg = ("The instance %s could not be put in service on %s."
- " Reason: Invalid Instance")
- self.module.fail_json(msg=msg % (self.instance_id, lb))
-
- if instance_state.state == awaited_state:
- # Check the current state against the initial state, and only set
- # changed if they are different.
- if (initial_state is None) or (instance_state.state != initial_state.state):
- self.changed = True
- break
- elif self._is_instance_state_pending(instance_state):
- # If it's pending, we'll skip further checks and continue waiting
- pass
- elif (awaited_state == 'InService'
- and instance_state.reason_code == "Instance"
- and time.time() >= wait_timeout):
- # If the reason_code for the instance being out of service is
- # "Instance" this indicates a failure state, e.g. the instance
- # has failed a health check or the ELB does not have the
- # instance's availability zone enabled. The exact reason why is
- # described in InstantState.description.
- msg = ("The instance %s could not be put in service on %s."
- " Reason: %s")
- self.module.fail_json(msg=msg % (self.instance_id,
- lb,
- instance_state.description))
- time.sleep(1)
-
- def _is_instance_state_pending(self, instance_state):
- """
- Determines whether the instance_state is "pending", meaning there is
- an operation under way to bring it in service.
- """
- # This is messy, because AWS provides no way to distinguish between
- # an instance that is is OutOfService because it's pending vs. OutOfService
- # because it's failing health checks. So we're forced to analyze the
- # description, which is likely to be brittle.
- return (instance_state and 'pending' in instance_state.description)
-
- def _get_instance_health(self, lb):
- """
- Check instance health, should return status object or None under
- certain error conditions.
- """
- try:
- status = lb.get_instance_health([self.instance_id])[0]
- except boto.exception.BotoServerError as e:
- if e.error_code == 'InvalidInstance':
- return None
- else:
- raise
- return status
-
- def _get_instance_lbs(self, ec2_elbs=None):
- """Returns a list of ELBs attached to self.instance_id
- ec2_elbs: an optional list of elb names that will be used
- for elb lookup instead of returning what elbs
- are attached to self.instance_id"""
-
- if not ec2_elbs:
- ec2_elbs = self._get_auto_scaling_group_lbs()
-
- try:
- elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- self.module.fail_json(msg=str(e))
-
- elbs = []
- marker = None
- while True:
- try:
- newelbs = elb.get_all_load_balancers(marker=marker)
- marker = newelbs.next_marker
- elbs.extend(newelbs)
- if not marker:
- break
- except TypeError:
- # Older version of boto do not allow for params
- elbs = elb.get_all_load_balancers()
- break
-
- if ec2_elbs:
- lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
- else:
- lbs = []
- for lb in elbs:
- for info in lb.instances:
- if self.instance_id == info.id:
- lbs.append(lb)
- return lbs
-
- def _get_auto_scaling_group_lbs(self):
- """Returns a list of ELBs associated with self.instance_id
- indirectly through its auto scaling group membership"""
-
- try:
- asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- self.module.fail_json(msg=str(e))
-
- asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
- if len(asg_instances) > 1:
- self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
-
- if not asg_instances:
- asg_elbs = []
- else:
- asg_name = asg_instances[0].group_name
-
- asgs = asg.get_all_groups([asg_name])
- if len(asg_instances) != 1:
- self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
-
- asg_elbs = asgs[0].load_balancers
-
- return asg_elbs
-
- def _get_instance(self):
- """Returns a boto.ec2.InstanceObject for self.instance_id"""
- try:
- ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- self.module.fail_json(msg=str(e))
- return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state={'required': True, 'choices': ['present', 'absent']},
- instance_id={'required': True},
- ec2_elbs={'default': None, 'required': False, 'type': 'list'},
- enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
- wait={'required': False, 'default': True, 'type': 'bool'},
- wait_timeout={'required': False, 'default': 0, 'type': 'int'}
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
-
- if not region:
- module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
-
- ec2_elbs = module.params['ec2_elbs']
- wait = module.params['wait']
- enable_availability_zone = module.params['enable_availability_zone']
- timeout = module.params['wait_timeout']
-
- if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
- module.fail_json(msg="ELBs are required for registration")
-
- instance_id = module.params['instance_id']
- elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params)
-
- if ec2_elbs is not None:
- for elb in ec2_elbs:
- if not elb_man.exists(elb):
- msg = "ELB %s does not exist" % elb
- module.fail_json(msg=msg)
-
- if not module.check_mode:
- if module.params['state'] == 'present':
- elb_man.register(wait, enable_availability_zone, timeout)
- elif module.params['state'] == 'absent':
- elb_man.deregister(wait, timeout)
-
- ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
- ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
-
- module.exit_json(**ec2_facts_result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_elb_info.py b/lib/ansible/modules/cloud/amazon/ec2_elb_info.py
deleted file mode 100644
index 9a48cdfaaa..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_elb_info.py
+++ /dev/null
@@ -1,271 +0,0 @@
-#!/usr/bin/python
-#
-# This is a free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This Ansible library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this library. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_elb_info
-short_description: Gather information about EC2 Elastic Load Balancers in AWS
-description:
- - Gather information about EC2 Elastic Load Balancers in AWS
- - This module was called C(ec2_elb_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.0"
-author:
- - "Michael Schultz (@mjschultz)"
- - "Fernando Jose Pando (@nand0p)"
-options:
- names:
- description:
- - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned.
- type: list
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-# Output format tries to match ec2_elb_lb module input parameters
-
-# Gather information about all ELBs
-- action:
- module: ec2_elb_info
- register: elb_info
-
-- action:
- module: debug
- msg: "{{ item.dns_name }}"
- loop: "{{ elb_info.elbs }}"
-
-# Gather information about a particular ELB
-- action:
- module: ec2_elb_info
- names: frontend-prod-elb
- register: elb_info
-
-- action:
- module: debug
- msg: "{{ elb_info.elbs.0.dns_name }}"
-
-# Gather information about a set of ELBs
-- action:
- module: ec2_elb_info
- names:
- - frontend-prod-elb
- - backend-prod-elb
- register: elb_info
-
-- action:
- module: debug
- msg: "{{ item.dns_name }}"
- loop: "{{ elb_info.elbs }}"
-
-'''
-
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (
- AWSRetry,
- connect_to_aws,
- ec2_argument_spec,
- get_aws_connection_info,
-)
-
-try:
- import boto.ec2.elb
- from boto.ec2.tag import Tag
- from boto.exception import BotoServerError
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-
-class ElbInformation(object):
- """Handles ELB information."""
-
- def __init__(self,
- module,
- names,
- region,
- **aws_connect_params):
-
- self.module = module
- self.names = names
- self.region = region
- self.aws_connect_params = aws_connect_params
- self.connection = self._get_elb_connection()
-
- def _get_tags(self, elbname):
- params = {'LoadBalancerNames.member.1': elbname}
- elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
- return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
-
- @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
- def _get_elb_connection(self):
- return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
-
- def _get_elb_listeners(self, listeners):
- listener_list = []
-
- for listener in listeners:
- listener_dict = {
- 'load_balancer_port': listener[0],
- 'instance_port': listener[1],
- 'protocol': listener[2],
- 'instance_protocol': listener[3]
- }
-
- try:
- ssl_certificate_id = listener[4]
- except IndexError:
- pass
- else:
- if ssl_certificate_id:
- listener_dict['ssl_certificate_id'] = ssl_certificate_id
-
- listener_list.append(listener_dict)
-
- return listener_list
-
- def _get_health_check(self, health_check):
- protocol, port_path = health_check.target.split(':')
- try:
- port, path = port_path.split('/', 1)
- path = '/{0}'.format(path)
- except ValueError:
- port = port_path
- path = None
-
- health_check_dict = {
- 'ping_protocol': protocol.lower(),
- 'ping_port': int(port),
- 'response_timeout': health_check.timeout,
- 'interval': health_check.interval,
- 'unhealthy_threshold': health_check.unhealthy_threshold,
- 'healthy_threshold': health_check.healthy_threshold,
- }
-
- if path:
- health_check_dict['ping_path'] = path
- return health_check_dict
-
- @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
- def _get_elb_info(self, elb):
- elb_info = {
- 'name': elb.name,
- 'zones': elb.availability_zones,
- 'dns_name': elb.dns_name,
- 'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
- 'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
- 'hosted_zone_name': elb.canonical_hosted_zone_name,
- 'hosted_zone_id': elb.canonical_hosted_zone_name_id,
- 'instances': [instance.id for instance in elb.instances],
- 'listeners': self._get_elb_listeners(elb.listeners),
- 'scheme': elb.scheme,
- 'security_groups': elb.security_groups,
- 'health_check': self._get_health_check(elb.health_check),
- 'subnets': elb.subnets,
- 'instances_inservice': [],
- 'instances_inservice_count': 0,
- 'instances_outofservice': [],
- 'instances_outofservice_count': 0,
- 'instances_inservice_percent': 0.0,
- 'tags': self._get_tags(elb.name)
- }
-
- if elb.vpc_id:
- elb_info['vpc_id'] = elb.vpc_id
-
- if elb.instances:
- instance_health = self.connection.describe_instance_health(elb.name)
- elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
- elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
- elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
- elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
- try:
- elb_info['instances_inservice_percent'] = (
- float(elb_info['instances_inservice_count']) /
- float(elb_info['instances_inservice_count'] + elb_info['instances_outofservice_count'])
- ) * 100.
- except ZeroDivisionError:
- elb_info['instances_inservice_percent'] = 0.
- return elb_info
-
- def list_elbs(self):
- elb_array, token = [], None
- get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers)
- while True:
- all_elbs = get_elb_with_backoff(marker=token)
- token = all_elbs.next_marker
-
- if all_elbs:
- if self.names:
- for existing_lb in all_elbs:
- if existing_lb.name in self.names:
- elb_array.append(existing_lb)
- else:
- elb_array.extend(all_elbs)
- else:
- break
-
- if token is None:
- break
-
- return list(map(self._get_elb_info, elb_array))
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- names={'default': [], 'type': 'list'}
- )
- )
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
- if module._name == 'ec2_elb_facts':
- module.deprecate("The 'ec2_elb_facts' module has been renamed to 'ec2_elb_info'", version='2.13')
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- try:
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
- if not region:
- module.fail_json(msg="region must be specified")
-
- names = module.params['names']
- elb_information = ElbInformation(
- module, names, region, **aws_connect_params)
-
- ec2_info_result = dict(changed=False,
- elbs=elb_information.list_elbs())
-
- except BotoServerError as err:
- module.fail_json(msg="{0}: {1}".format(err.error_code, err.error_message),
- exception=traceback.format_exc())
-
- module.exit_json(**ec2_info_result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_instance.py b/lib/ansible/modules/cloud/amazon/ec2_instance.py
deleted file mode 100644
index 7a587fb941..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_instance.py
+++ /dev/null
@@ -1,1805 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: ec2_instance
-short_description: Create & manage EC2 instances
-description:
- - Create and manage AWS EC2 instances.
- - >
- Note: This module does not support creating
- L(EC2 Spot instances,https://aws.amazon.com/ec2/spot/). The M(ec2) module
- can create and manage spot instances.
-version_added: "2.5"
-author:
- - Ryan Scott Brown (@ryansb)
-requirements: [ "boto3", "botocore" ]
-options:
- instance_ids:
- description:
- - If you specify one or more instance IDs, only instances that have the specified IDs are returned.
- type: list
- state:
- description:
- - Goal state for the instances.
- choices: [present, terminated, running, started, stopped, restarted, rebooted, absent]
- default: present
- type: str
- wait:
- description:
- - Whether or not to wait for the desired state (use wait_timeout to customize this).
- default: true
- type: bool
- wait_timeout:
- description:
- - How long to wait (in seconds) for the instance to finish booting/terminating.
- default: 600
- type: int
- instance_type:
- description:
- - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
- Only required when instance is not already present.
- default: t2.micro
- type: str
- user_data:
- description:
- - Opaque blob of data which is made available to the ec2 instance
- type: str
- tower_callback:
- description:
- - Preconfigured user-data to enable an instance to perform a Tower callback (Linux only).
- - Mutually exclusive with I(user_data).
- - For Windows instances, to enable remote access via Ansible set I(tower_callback.windows) to true, and optionally set an admin password.
- - If using 'windows' and 'set_password', callback to Tower will not be performed but the instance will be ready to receive winrm connections from Ansible.
- type: dict
- suboptions:
- tower_address:
- description:
- - IP address or DNS name of Tower server. Must be accessible via this address from the VPC that this instance will be launched in.
- type: str
- job_template_id:
- description:
- - Either the integer ID of the Tower Job Template, or the name (name supported only for Tower 3.2+).
- type: str
- host_config_key:
- description:
- - Host configuration secret key generated by the Tower job template.
- type: str
- tags:
- description:
- - A hash/dictionary of tags to add to the new instance or to add/remove from an existing one.
- type: dict
- purge_tags:
- description:
- - Delete any tags not specified in the task that are on the instance.
- This means you have to specify all the desired tags on each task affecting an instance.
- default: false
- type: bool
- image:
- description:
- - An image to use for the instance. The M(ec2_ami_info) module may be used to retrieve images.
- One of I(image) or I(image_id) are required when instance is not already present.
- type: dict
- suboptions:
- id:
- description:
- - The AMI ID.
- type: str
- ramdisk:
- description:
- - Overrides the AMI's default ramdisk ID.
- type: str
- kernel:
- description:
- - a string AKI to override the AMI kernel.
- image_id:
- description:
- - I(ami) ID to use for the instance. One of I(image) or I(image_id) are required when instance is not already present.
- - This is an alias for I(image.id).
- type: str
- security_groups:
- description:
- - A list of security group IDs or names (strings). Mutually exclusive with I(security_group).
- type: list
- security_group:
- description:
- - A security group ID or name. Mutually exclusive with I(security_groups).
- type: str
- name:
- description:
- - The Name tag for the instance.
- type: str
- vpc_subnet_id:
- description:
- - The subnet ID in which to launch the instance (VPC)
- If none is provided, ec2_instance will chose the default zone of the default VPC.
- aliases: ['subnet_id']
- type: str
- network:
- description:
- - Either a dictionary containing the key 'interfaces' corresponding to a list of network interface IDs or
- containing specifications for a single network interface.
- - Use the ec2_eni module to create ENIs with special settings.
- type: dict
- suboptions:
- interfaces:
- description:
- - a list of ENI IDs (strings) or a list of objects containing the key I(id).
- type: list
- assign_public_ip:
- description:
- - when true assigns a public IP address to the interface
- type: bool
- private_ip_address:
- description:
- - an IPv4 address to assign to the interface
- type: str
- ipv6_addresses:
- description:
- - a list of IPv6 addresses to assign to the network interface
- type: list
- source_dest_check:
- description:
- - controls whether source/destination checking is enabled on the interface
- type: bool
- description:
- description:
- - a description for the network interface
- type: str
- private_ip_addresses:
- description:
- - a list of IPv4 addresses to assign to the network interface
- type: list
- subnet_id:
- description:
- - the subnet to connect the network interface to
- type: str
- delete_on_termination:
- description:
- - Delete the interface when the instance it is attached to is
- terminated.
- type: bool
- device_index:
- description:
- - The index of the interface to modify
- type: int
- groups:
- description:
- - a list of security group IDs to attach to the interface
- type: list
- volumes:
- description:
- - A list of block device mappings, by default this will always use the AMI root device so the volumes option is primarily for adding more storage.
- - A mapping contains the (optional) keys device_name, virtual_name, ebs.volume_type, ebs.volume_size, ebs.kms_key_id,
- ebs.iops, and ebs.delete_on_termination.
- - For more information about each parameter, see U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html).
- type: list
- launch_template:
- description:
- - The EC2 launch template to base instance configuration on.
- type: dict
- suboptions:
- id:
- description:
- - the ID of the launch template (optional if name is specified).
- type: str
- name:
- description:
- - the pretty name of the launch template (optional if id is specified).
- type: str
- version:
- description:
- - the specific version of the launch template to use. If unspecified, the template default is chosen.
- key_name:
- description:
- - Name of the SSH access key to assign to the instance - must exist in the region the instance is created.
- type: str
- availability_zone:
- description:
- - Specify an availability zone to use the default subnet it. Useful if not specifying the I(vpc_subnet_id) parameter.
- - If no subnet, ENI, or availability zone is provided, the default subnet in the default VPC will be used in the first AZ (alphabetically sorted).
- type: str
- instance_initiated_shutdown_behavior:
- description:
- - Whether to stop or terminate an instance upon shutdown.
- choices: ['stop', 'terminate']
- type: str
- tenancy:
- description:
- - What type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges.
- choices: ['dedicated', 'default']
- type: str
- termination_protection:
- description:
- - Whether to enable termination protection.
- This module will not terminate an instance with termination protection active, it must be turned off first.
- type: bool
- cpu_credit_specification:
- description:
- - For T series instances, choose whether to allow increased charges to buy CPU credits if the default pool is depleted.
- - Choose I(unlimited) to enable buying additional CPU credits.
- choices: ['unlimited', 'standard']
- type: str
- cpu_options:
- description:
- - Reduce the number of vCPU exposed to the instance.
- - Those parameters can only be set at instance launch. The two suboptions threads_per_core and core_count are mandatory.
- - See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for combinations available.
- - Requires botocore >= 1.10.16
- version_added: 2.7
- type: dict
- suboptions:
- threads_per_core:
- description:
- - Select the number of threads per core to enable. Disable or Enable Intel HT.
- choices: [1, 2]
- required: true
- type: int
- core_count:
- description:
- - Set the number of core to enable.
- required: true
- type: int
- detailed_monitoring:
- description:
- - Whether to allow detailed cloudwatch metrics to be collected, enabling more detailed alerting.
- type: bool
- ebs_optimized:
- description:
- - Whether instance is should use optimized EBS volumes, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html).
- type: bool
- filters:
- description:
- - A dict of filters to apply when deciding whether existing instances match and should be altered. Each dict item
- consists of a filter key and a filter value. See
- U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html).
- for possible filters. Filter names and values are case sensitive.
- - By default, instances are filtered for counting by their "Name" tag, base AMI, state (running, by default), and
- subnet ID. Any queryable filter can be used. Good candidates are specific tags, SSH keys, or security groups.
- type: dict
- instance_role:
- description:
- - The ARN or name of an EC2-enabled instance role to be used. If a name is not provided in arn format
- then the ListInstanceProfiles permission must also be granted.
- U(https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListInstanceProfiles.html) If no full ARN is provided,
- the role with a matching name will be used from the active AWS account.
- type: str
- placement_group:
- description:
- - The placement group that needs to be assigned to the instance
- version_added: 2.8
- type: str
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Terminate every running instance in a region. Use with EXTREME caution.
-- ec2_instance:
- state: absent
- filters:
- instance-state-name: running
-
-# restart a particular instance by its ID
-- ec2_instance:
- state: restarted
- instance_ids:
- - i-12345678
-
-# start an instance with a public IP address
-- ec2_instance:
- name: "public-compute-instance"
- key_name: "prod-ssh-key"
- vpc_subnet_id: subnet-5ca1ab1e
- instance_type: c5.large
- security_group: default
- network:
- assign_public_ip: true
- image_id: ami-123456
- tags:
- Environment: Testing
-
-# start an instance and Add EBS
-- ec2_instance:
- name: "public-withebs-instance"
- vpc_subnet_id: subnet-5ca1ab1e
- instance_type: t2.micro
- key_name: "prod-ssh-key"
- security_group: default
- volumes:
- - device_name: /dev/sda1
- ebs:
- volume_size: 16
- delete_on_termination: true
-
-# start an instance with a cpu_options
-- ec2_instance:
- name: "public-cpuoption-instance"
- vpc_subnet_id: subnet-5ca1ab1e
- tags:
- Environment: Testing
- instance_type: c4.large
- volumes:
- - device_name: /dev/sda1
- ebs:
- delete_on_termination: true
- cpu_options:
- core_count: 1
- threads_per_core: 1
-
-# start an instance and have it begin a Tower callback on boot
-- ec2_instance:
- name: "tower-callback-test"
- key_name: "prod-ssh-key"
- vpc_subnet_id: subnet-5ca1ab1e
- security_group: default
- tower_callback:
- # IP or hostname of tower server
- tower_address: 1.2.3.4
- job_template_id: 876
- host_config_key: '[secret config key goes here]'
- network:
- assign_public_ip: true
- image_id: ami-123456
- cpu_credit_specification: unlimited
- tags:
- SomeThing: "A value"
-
-# start an instance with ENI (An existing ENI ID is required)
-- ec2_instance:
- name: "public-eni-instance"
- key_name: "prod-ssh-key"
- vpc_subnet_id: subnet-5ca1ab1e
- network:
- interfaces:
- - id: "eni-12345"
- tags:
- Env: "eni_on"
- volumes:
- - device_name: /dev/sda1
- ebs:
- delete_on_termination: true
- instance_type: t2.micro
- image_id: ami-123456
-
-# add second ENI interface
-- ec2_instance:
- name: "public-eni-instance"
- network:
- interfaces:
- - id: "eni-12345"
- - id: "eni-67890"
- image_id: ami-123456
- tags:
- Env: "eni_on"
- instance_type: t2.micro
-'''
-
-RETURN = '''
-instances:
- description: a list of ec2 instances
- returned: when wait == true
- type: complex
- contains:
- ami_launch_index:
- description: The AMI launch index, which can be used to find this instance in the launch group.
- returned: always
- type: int
- sample: 0
- architecture:
- description: The architecture of the image
- returned: always
- type: str
- sample: x86_64
- block_device_mappings:
- description: Any block device mapping entries for the instance.
- returned: always
- type: complex
- contains:
- device_name:
- description: The device name exposed to the instance (for example, /dev/sdh or xvdh).
- returned: always
- type: str
- sample: /dev/sdh
- ebs:
- description: Parameters used to automatically set up EBS volumes when the instance is launched.
- returned: always
- type: complex
- contains:
- attach_time:
- description: The time stamp when the attachment initiated.
- returned: always
- type: str
- sample: "2017-03-23T22:51:24+00:00"
- delete_on_termination:
- description: Indicates whether the volume is deleted on instance termination.
- returned: always
- type: bool
- sample: true
- status:
- description: The attachment state.
- returned: always
- type: str
- sample: attached
- volume_id:
- description: The ID of the EBS volume
- returned: always
- type: str
- sample: vol-12345678
- client_token:
- description: The idempotency token you provided when you launched the instance, if applicable.
- returned: always
- type: str
- sample: mytoken
- ebs_optimized:
- description: Indicates whether the instance is optimized for EBS I/O.
- returned: always
- type: bool
- sample: false
- hypervisor:
- description: The hypervisor type of the instance.
- returned: always
- type: str
- sample: xen
- iam_instance_profile:
- description: The IAM instance profile associated with the instance, if applicable.
- returned: always
- type: complex
- contains:
- arn:
- description: The Amazon Resource Name (ARN) of the instance profile.
- returned: always
- type: str
- sample: "arn:aws:iam::000012345678:instance-profile/myprofile"
- id:
- description: The ID of the instance profile
- returned: always
- type: str
- sample: JFJ397FDG400FG9FD1N
- image_id:
- description: The ID of the AMI used to launch the instance.
- returned: always
- type: str
- sample: ami-0011223344
- instance_id:
- description: The ID of the instance.
- returned: always
- type: str
- sample: i-012345678
- instance_type:
- description: The instance type size of the running instance.
- returned: always
- type: str
- sample: t2.micro
- key_name:
- description: The name of the key pair, if this instance was launched with an associated key pair.
- returned: always
- type: str
- sample: my-key
- launch_time:
- description: The time the instance was launched.
- returned: always
- type: str
- sample: "2017-03-23T22:51:24+00:00"
- monitoring:
- description: The monitoring for the instance.
- returned: always
- type: complex
- contains:
- state:
- description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
- returned: always
- type: str
- sample: disabled
- network_interfaces:
- description: One or more network interfaces for the instance.
- returned: always
- type: complex
- contains:
- association:
- description: The association information for an Elastic IPv4 associated with the network interface.
- returned: always
- type: complex
- contains:
- ip_owner_id:
- description: The ID of the owner of the Elastic IP address.
- returned: always
- type: str
- sample: amazon
- public_dns_name:
- description: The public DNS name.
- returned: always
- type: str
- sample: ""
- public_ip:
- description: The public IP address or Elastic IP address bound to the network interface.
- returned: always
- type: str
- sample: 1.2.3.4
- attachment:
- description: The network interface attachment.
- returned: always
- type: complex
- contains:
- attach_time:
- description: The time stamp when the attachment initiated.
- returned: always
- type: str
- sample: "2017-03-23T22:51:24+00:00"
- attachment_id:
- description: The ID of the network interface attachment.
- returned: always
- type: str
- sample: eni-attach-3aff3f
- delete_on_termination:
- description: Indicates whether the network interface is deleted when the instance is terminated.
- returned: always
- type: bool
- sample: true
- device_index:
- description: The index of the device on the instance for the network interface attachment.
- returned: always
- type: int
- sample: 0
- status:
- description: The attachment state.
- returned: always
- type: str
- sample: attached
- description:
- description: The description.
- returned: always
- type: str
- sample: My interface
- groups:
- description: One or more security groups.
- returned: always
- type: list
- elements: dict
- contains:
- group_id:
- description: The ID of the security group.
- returned: always
- type: str
- sample: sg-abcdef12
- group_name:
- description: The name of the security group.
- returned: always
- type: str
- sample: mygroup
- ipv6_addresses:
- description: One or more IPv6 addresses associated with the network interface.
- returned: always
- type: list
- elements: dict
- contains:
- ipv6_address:
- description: The IPv6 address.
- returned: always
- type: str
- sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
- mac_address:
- description: The MAC address.
- returned: always
- type: str
- sample: "00:11:22:33:44:55"
- network_interface_id:
- description: The ID of the network interface.
- returned: always
- type: str
- sample: eni-01234567
- owner_id:
- description: The AWS account ID of the owner of the network interface.
- returned: always
- type: str
- sample: 01234567890
- private_ip_address:
- description: The IPv4 address of the network interface within the subnet.
- returned: always
- type: str
- sample: 10.0.0.1
- private_ip_addresses:
- description: The private IPv4 addresses associated with the network interface.
- returned: always
- type: list
- elements: dict
- contains:
- association:
- description: The association information for an Elastic IP address (IPv4) associated with the network interface.
- returned: always
- type: complex
- contains:
- ip_owner_id:
- description: The ID of the owner of the Elastic IP address.
- returned: always
- type: str
- sample: amazon
- public_dns_name:
- description: The public DNS name.
- returned: always
- type: str
- sample: ""
- public_ip:
- description: The public IP address or Elastic IP address bound to the network interface.
- returned: always
- type: str
- sample: 1.2.3.4
- primary:
- description: Indicates whether this IPv4 address is the primary private IP address of the network interface.
- returned: always
- type: bool
- sample: true
- private_ip_address:
- description: The private IPv4 address of the network interface.
- returned: always
- type: str
- sample: 10.0.0.1
- source_dest_check:
- description: Indicates whether source/destination checking is enabled.
- returned: always
- type: bool
- sample: true
- status:
- description: The status of the network interface.
- returned: always
- type: str
- sample: in-use
- subnet_id:
- description: The ID of the subnet for the network interface.
- returned: always
- type: str
- sample: subnet-0123456
- vpc_id:
- description: The ID of the VPC for the network interface.
- returned: always
- type: str
- sample: vpc-0123456
- placement:
- description: The location where the instance launched, if applicable.
- returned: always
- type: complex
- contains:
- availability_zone:
- description: The Availability Zone of the instance.
- returned: always
- type: str
- sample: ap-southeast-2a
- group_name:
- description: The name of the placement group the instance is in (for cluster compute instances).
- returned: always
- type: str
- sample: ""
- tenancy:
- description: The tenancy of the instance (if the instance is running in a VPC).
- returned: always
- type: str
- sample: default
- private_dns_name:
- description: The private DNS name.
- returned: always
- type: str
- sample: ip-10-0-0-1.ap-southeast-2.compute.internal
- private_ip_address:
- description: The IPv4 address of the network interface within the subnet.
- returned: always
- type: str
- sample: 10.0.0.1
- product_codes:
- description: One or more product codes.
- returned: always
- type: list
- elements: dict
- contains:
- product_code_id:
- description: The product code.
- returned: always
- type: str
- sample: aw0evgkw8ef3n2498gndfgasdfsd5cce
- product_code_type:
- description: The type of product code.
- returned: always
- type: str
- sample: marketplace
- public_dns_name:
- description: The public DNS name assigned to the instance.
- returned: always
- type: str
- sample:
- public_ip_address:
- description: The public IPv4 address assigned to the instance
- returned: always
- type: str
- sample: 52.0.0.1
- root_device_name:
- description: The device name of the root device
- returned: always
- type: str
- sample: /dev/sda1
- root_device_type:
- description: The type of root device used by the AMI.
- returned: always
- type: str
- sample: ebs
- security_groups:
- description: One or more security groups for the instance.
- returned: always
- type: list
- elements: dict
- contains:
- group_id:
- description: The ID of the security group.
- returned: always
- type: str
- sample: sg-0123456
- group_name:
- description: The name of the security group.
- returned: always
- type: str
- sample: my-security-group
- network.source_dest_check:
- description: Indicates whether source/destination checking is enabled.
- returned: always
- type: bool
- sample: true
- state:
- description: The current state of the instance.
- returned: always
- type: complex
- contains:
- code:
- description: The low byte represents the state.
- returned: always
- type: int
- sample: 16
- name:
- description: The name of the state.
- returned: always
- type: str
- sample: running
- state_transition_reason:
- description: The reason for the most recent state transition.
- returned: always
- type: str
- sample:
- subnet_id:
- description: The ID of the subnet in which the instance is running.
- returned: always
- type: str
- sample: subnet-00abcdef
- tags:
- description: Any tags assigned to the instance.
- returned: always
- type: dict
- sample:
- virtualization_type:
- description: The type of virtualization of the AMI.
- returned: always
- type: str
- sample: hvm
- vpc_id:
- description: The ID of the VPC the instance is in.
- returned: always
- type: dict
- sample: vpc-0011223344
-'''
-
-import re
-import uuid
-import string
-import textwrap
-import time
-from collections import namedtuple
-
-try:
- import boto3
- import botocore.exceptions
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.six import text_type, string_types
-from ansible.module_utils.six.moves.urllib import parse as urlparse
-from ansible.module_utils._text import to_bytes, to_native
-import ansible.module_utils.ec2 as ec2_utils
-from ansible.module_utils.ec2 import (AWSRetry,
- ansible_dict_to_boto3_filter_list,
- compare_aws_tags,
- boto3_tag_list_to_ansible_dict,
- ansible_dict_to_boto3_tag_list,
- camel_dict_to_snake_dict)
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-
-module = None
-
-
-def tower_callback_script(tower_conf, windows=False, passwd=None):
- script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'
- if windows and passwd is not None:
- script_tpl = """<powershell>
- $admin = [adsi]("WinNT://./administrator, user")
- $admin.PSBase.Invoke("SetPassword", "{PASS}")
- Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}'))
- </powershell>
- """
- return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url))
- elif windows and passwd is None:
- script_tpl = """<powershell>
- $admin = [adsi]("WinNT://./administrator, user")
- Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}'))
- </powershell>
- """
- return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url))
- elif not windows:
- for p in ['tower_address', 'job_template_id', 'host_config_key']:
- if p not in tower_conf:
- module.fail_json(msg="Incomplete tower_callback configuration. tower_callback.{0} not set.".format(p))
-
- if isinstance(tower_conf['job_template_id'], string_types):
- tower_conf['job_template_id'] = urlparse.quote(tower_conf['job_template_id'])
- tpl = string.Template(textwrap.dedent("""#!/bin/bash
- set -x
-
- retry_attempts=10
- attempt=0
- while [[ $attempt -lt $retry_attempts ]]
- do
- status_code=`curl --max-time 10 -v -k -s -i \
- --data "host_config_key=${host_config_key}" \
- 'https://${tower_address}/api/v2/job_templates/${template_id}/callback/' \
- | head -n 1 \
- | awk '{print $2}'`
- if [[ $status_code == 404 ]]
- then
- status_code=`curl --max-time 10 -v -k -s -i \
- --data "host_config_key=${host_config_key}" \
- 'https://${tower_address}/api/v1/job_templates/${template_id}/callback/' \
- | head -n 1 \
- | awk '{print $2}'`
- # fall back to using V1 API for Tower 3.1 and below, since v2 API will always 404
- fi
- if [[ $status_code == 201 ]]
- then
- exit 0
- fi
- attempt=$(( attempt + 1 ))
- echo "$${status_code} received... retrying in 1 minute. (Attempt $${attempt})"
- sleep 60
- done
- exit 1
- """))
- return tpl.safe_substitute(tower_address=tower_conf['tower_address'],
- template_id=tower_conf['job_template_id'],
- host_config_key=tower_conf['host_config_key'])
- raise NotImplementedError("Only windows with remote-prep or non-windows with tower job callback supported so far.")
-
-
-@AWSRetry.jittered_backoff()
-def manage_tags(match, new_tags, purge_tags, ec2):
- changed = False
- old_tags = boto3_tag_list_to_ansible_dict(match['Tags'])
- tags_to_set, tags_to_delete = compare_aws_tags(
- old_tags, new_tags,
- purge_tags=purge_tags,
- )
- if tags_to_set:
- ec2.create_tags(
- Resources=[match['InstanceId']],
- Tags=ansible_dict_to_boto3_tag_list(tags_to_set))
- changed |= True
- if tags_to_delete:
- delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete)
- ec2.delete_tags(
- Resources=[match['InstanceId']],
- Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values))
- changed |= True
- return changed
-
-
-def build_volume_spec(params):
- volumes = params.get('volumes') or []
- for volume in volumes:
- if 'ebs' in volume:
- for int_value in ['volume_size', 'iops']:
- if int_value in volume['ebs']:
- volume['ebs'][int_value] = int(volume['ebs'][int_value])
- return [ec2_utils.snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes]
-
-
-def add_or_update_instance_profile(instance, desired_profile_name):
- instance_profile_setting = instance.get('IamInstanceProfile')
- if instance_profile_setting and desired_profile_name:
- if desired_profile_name in (instance_profile_setting.get('Name'), instance_profile_setting.get('Arn')):
- # great, the profile we asked for is what's there
- return False
- else:
- desired_arn = determine_iam_role(desired_profile_name)
- if instance_profile_setting.get('Arn') == desired_arn:
- return False
- # update association
- ec2 = module.client('ec2')
- try:
- association = ec2.describe_iam_instance_profile_associations(Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}])
- except botocore.exceptions.ClientError as e:
- # check for InvalidAssociationID.NotFound
- module.fail_json_aws(e, "Could not find instance profile association")
- try:
- resp = ec2.replace_iam_instance_profile_association(
- AssociationId=association['IamInstanceProfileAssociations'][0]['AssociationId'],
- IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)}
- )
- return True
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e, "Could not associate instance profile")
-
- if not instance_profile_setting and desired_profile_name:
- # create association
- ec2 = module.client('ec2')
- try:
- resp = ec2.associate_iam_instance_profile(
- IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)},
- InstanceId=instance['InstanceId']
- )
- return True
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e, "Could not associate new instance profile")
-
- return False
-
-
-def build_network_spec(params, ec2=None):
- """
- Returns list of interfaces [complex]
- Interface type: {
- 'AssociatePublicIpAddress': True|False,
- 'DeleteOnTermination': True|False,
- 'Description': 'string',
- 'DeviceIndex': 123,
- 'Groups': [
- 'string',
- ],
- 'Ipv6AddressCount': 123,
- 'Ipv6Addresses': [
- {
- 'Ipv6Address': 'string'
- },
- ],
- 'NetworkInterfaceId': 'string',
- 'PrivateIpAddress': 'string',
- 'PrivateIpAddresses': [
- {
- 'Primary': True|False,
- 'PrivateIpAddress': 'string'
- },
- ],
- 'SecondaryPrivateIpAddressCount': 123,
- 'SubnetId': 'string'
- },
- """
- if ec2 is None:
- ec2 = module.client('ec2')
-
- interfaces = []
- network = params.get('network') or {}
- if not network.get('interfaces'):
- # they only specified one interface
- spec = {
- 'DeviceIndex': 0,
- }
- if network.get('assign_public_ip') is not None:
- spec['AssociatePublicIpAddress'] = network['assign_public_ip']
-
- if params.get('vpc_subnet_id'):
- spec['SubnetId'] = params['vpc_subnet_id']
- else:
- default_vpc = get_default_vpc(ec2)
- if default_vpc is None:
- raise module.fail_json(
- msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to create an instance")
- else:
- sub = get_default_subnet(ec2, default_vpc)
- spec['SubnetId'] = sub['SubnetId']
-
- if network.get('private_ip_address'):
- spec['PrivateIpAddress'] = network['private_ip_address']
-
- if params.get('security_group') or params.get('security_groups'):
- groups = discover_security_groups(
- group=params.get('security_group'),
- groups=params.get('security_groups'),
- subnet_id=spec['SubnetId'],
- ec2=ec2
- )
- spec['Groups'] = [g['GroupId'] for g in groups]
- if network.get('description') is not None:
- spec['Description'] = network['description']
- # TODO more special snowflake network things
-
- return [spec]
-
- # handle list of `network.interfaces` options
- for idx, interface_params in enumerate(network.get('interfaces', [])):
- spec = {
- 'DeviceIndex': idx,
- }
-
- if isinstance(interface_params, string_types):
- # naive case where user gave
- # network_interfaces: [eni-1234, eni-4567, ....]
- # put into normal data structure so we don't dupe code
- interface_params = {'id': interface_params}
-
- if interface_params.get('id') is not None:
- # if an ID is provided, we don't want to set any other parameters.
- spec['NetworkInterfaceId'] = interface_params['id']
- interfaces.append(spec)
- continue
-
- spec['DeleteOnTermination'] = interface_params.get('delete_on_termination', True)
-
- if interface_params.get('ipv6_addresses'):
- spec['Ipv6Addresses'] = [{'Ipv6Address': a} for a in interface_params.get('ipv6_addresses', [])]
-
- if interface_params.get('private_ip_address'):
- spec['PrivateIpAddress'] = interface_params.get('private_ip_address')
-
- if interface_params.get('description'):
- spec['Description'] = interface_params.get('description')
-
- if interface_params.get('subnet_id', params.get('vpc_subnet_id')):
- spec['SubnetId'] = interface_params.get('subnet_id', params.get('vpc_subnet_id'))
- elif not spec.get('SubnetId') and not interface_params['id']:
- # TODO grab a subnet from default VPC
- raise ValueError('Failed to assign subnet to interface {0}'.format(interface_params))
-
- interfaces.append(spec)
- return interfaces
-
-
-def warn_if_public_ip_assignment_changed(instance):
- # This is a non-modifiable attribute.
- assign_public_ip = (module.params.get('network') or {}).get('assign_public_ip')
- if assign_public_ip is None:
- return
-
- # Check that public ip assignment is the same and warn if not
- public_dns_name = instance.get('PublicDnsName')
- if (public_dns_name and not assign_public_ip) or (assign_public_ip and not public_dns_name):
- module.warn(
- "Unable to modify public ip assignment to {0} for instance {1}. "
- "Whether or not to assign a public IP is determined during instance creation.".format(
- assign_public_ip, instance['InstanceId']))
-
-
-def warn_if_cpu_options_changed(instance):
- # This is a non-modifiable attribute.
- cpu_options = module.params.get('cpu_options')
- if cpu_options is None:
- return
-
- # Check that the CpuOptions set are the same and warn if not
- core_count_curr = instance['CpuOptions'].get('CoreCount')
- core_count = cpu_options.get('core_count')
- threads_per_core_curr = instance['CpuOptions'].get('ThreadsPerCore')
- threads_per_core = cpu_options.get('threads_per_core')
- if core_count_curr != core_count:
- module.warn(
- "Unable to modify core_count from {0} to {1}. "
- "Assigning a number of core is determinted during instance creation".format(
- core_count_curr, core_count))
-
- if threads_per_core_curr != threads_per_core:
- module.warn(
- "Unable to modify threads_per_core from {0} to {1}. "
- "Assigning a number of threads per core is determined during instance creation.".format(
- threads_per_core_curr, threads_per_core))
-
-
-def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None, ec2=None):
- if ec2 is None:
- ec2 = module.client('ec2')
-
- if subnet_id is not None:
- try:
- sub = ec2.describe_subnets(SubnetIds=[subnet_id])
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'InvalidGroup.NotFound':
- module.fail_json(
- "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format(
- subnet_id
- )
- )
- module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id))
- parent_vpc_id = sub['Subnets'][0]['VpcId']
-
- vpc = {
- 'Name': 'vpc-id',
- 'Values': [parent_vpc_id]
- }
-
- # because filter lists are AND in the security groups API,
- # make two separate requests for groups by ID and by name
- id_filters = [vpc]
- name_filters = [vpc]
-
- if group:
- name_filters.append(
- dict(
- Name='group-name',
- Values=[group]
- )
- )
- if group.startswith('sg-'):
- id_filters.append(
- dict(
- Name='group-id',
- Values=[group]
- )
- )
- if groups:
- name_filters.append(
- dict(
- Name='group-name',
- Values=groups
- )
- )
- if [g for g in groups if g.startswith('sg-')]:
- id_filters.append(
- dict(
- Name='group-id',
- Values=[g for g in groups if g.startswith('sg-')]
- )
- )
-
- found_groups = []
- for f_set in (id_filters, name_filters):
- if len(f_set) > 1:
- found_groups.extend(ec2.get_paginator(
- 'describe_security_groups'
- ).paginate(
- Filters=f_set
- ).search('SecurityGroups[]'))
- return list(dict((g['GroupId'], g) for g in found_groups).values())
-
-
-def build_top_level_options(params):
- spec = {}
- if params.get('image_id'):
- spec['ImageId'] = params['image_id']
- elif isinstance(params.get('image'), dict):
- image = params.get('image', {})
- spec['ImageId'] = image.get('id')
- if 'ramdisk' in image:
- spec['RamdiskId'] = image['ramdisk']
- if 'kernel' in image:
- spec['KernelId'] = image['kernel']
- if not spec.get('ImageId') and not params.get('launch_template'):
- module.fail_json(msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template.")
-
- if params.get('key_name') is not None:
- spec['KeyName'] = params.get('key_name')
- if params.get('user_data') is not None:
- spec['UserData'] = to_native(params.get('user_data'))
- elif params.get('tower_callback') is not None:
- spec['UserData'] = tower_callback_script(
- tower_conf=params.get('tower_callback'),
- windows=params.get('tower_callback').get('windows', False),
- passwd=params.get('tower_callback').get('set_password'),
- )
-
- if params.get('launch_template') is not None:
- spec['LaunchTemplate'] = {}
- if not params.get('launch_template').get('id') or params.get('launch_template').get('name'):
- module.fail_json(msg="Could not create instance with launch template. Either launch_template.name or launch_template.id parameters are required")
-
- if params.get('launch_template').get('id') is not None:
- spec['LaunchTemplate']['LaunchTemplateId'] = params.get('launch_template').get('id')
- if params.get('launch_template').get('name') is not None:
- spec['LaunchTemplate']['LaunchTemplateName'] = params.get('launch_template').get('name')
- if params.get('launch_template').get('version') is not None:
- spec['LaunchTemplate']['Version'] = to_native(params.get('launch_template').get('version'))
-
- if params.get('detailed_monitoring', False):
- spec['Monitoring'] = {'Enabled': True}
- if params.get('cpu_credit_specification') is not None:
- spec['CreditSpecification'] = {'CpuCredits': params.get('cpu_credit_specification')}
- if params.get('tenancy') is not None:
- spec['Placement'] = {'Tenancy': params.get('tenancy')}
- if params.get('placement_group'):
- if 'Placement' in spec:
- spec['Placement']['GroupName'] = str(params.get('placement_group'))
- else:
- spec.setdefault('Placement', {'GroupName': str(params.get('placement_group'))})
- if params.get('ebs_optimized') is not None:
- spec['EbsOptimized'] = params.get('ebs_optimized')
- if params.get('instance_initiated_shutdown_behavior'):
- spec['InstanceInitiatedShutdownBehavior'] = params.get('instance_initiated_shutdown_behavior')
- if params.get('termination_protection') is not None:
- spec['DisableApiTermination'] = params.get('termination_protection')
- if params.get('cpu_options') is not None:
- spec['CpuOptions'] = {}
- spec['CpuOptions']['ThreadsPerCore'] = params.get('cpu_options').get('threads_per_core')
- spec['CpuOptions']['CoreCount'] = params.get('cpu_options').get('core_count')
- return spec
-
-
-def build_instance_tags(params, propagate_tags_to_volumes=True):
- tags = params.get('tags', {})
- if params.get('name') is not None:
- if tags is None:
- tags = {}
- tags['Name'] = params.get('name')
- return [
- {
- 'ResourceType': 'volume',
- 'Tags': ansible_dict_to_boto3_tag_list(tags),
- },
- {
- 'ResourceType': 'instance',
- 'Tags': ansible_dict_to_boto3_tag_list(tags),
- },
- ]
-
-
-def build_run_instance_spec(params, ec2=None):
- if ec2 is None:
- ec2 = module.client('ec2')
-
- spec = dict(
- ClientToken=uuid.uuid4().hex,
- MaxCount=1,
- MinCount=1,
- )
- # network parameters
- spec['NetworkInterfaces'] = build_network_spec(params, ec2)
- spec['BlockDeviceMappings'] = build_volume_spec(params)
- spec.update(**build_top_level_options(params))
- spec['TagSpecifications'] = build_instance_tags(params)
-
- # IAM profile
- if params.get('instance_role'):
- spec['IamInstanceProfile'] = dict(Arn=determine_iam_role(params.get('instance_role')))
-
- spec['InstanceType'] = params['instance_type']
- return spec
-
-
-def await_instances(ids, state='OK'):
- if not module.params.get('wait', True):
- # the user asked not to wait for anything
- return
-
- if module.check_mode:
- # In check mode, there is no change even if you wait.
- return
-
- state_opts = {
- 'OK': 'instance_status_ok',
- 'STOPPED': 'instance_stopped',
- 'TERMINATED': 'instance_terminated',
- 'EXISTS': 'instance_exists',
- 'RUNNING': 'instance_running',
- }
- if state not in state_opts:
- module.fail_json(msg="Cannot wait for state {0}, invalid state".format(state))
- waiter = module.client('ec2').get_waiter(state_opts[state])
- try:
- waiter.wait(
- InstanceIds=ids,
- WaiterConfig={
- 'Delay': 15,
- 'MaxAttempts': module.params.get('wait_timeout', 600) // 15,
- }
- )
- except botocore.exceptions.WaiterConfigError as e:
- module.fail_json(msg="{0}. Error waiting for instances {1} to reach state {2}".format(
- to_native(e), ', '.join(ids), state))
- except botocore.exceptions.WaiterError as e:
- module.warn("Instances {0} took too long to reach state {1}. {2}".format(
- ', '.join(ids), state, to_native(e)))
-
-
-def diff_instance_and_params(instance, params, ec2=None, skip=None):
- """boto3 instance obj, module params"""
- if ec2 is None:
- ec2 = module.client('ec2')
-
- if skip is None:
- skip = []
-
- changes_to_apply = []
- id_ = instance['InstanceId']
-
- ParamMapper = namedtuple('ParamMapper', ['param_key', 'instance_key', 'attribute_name', 'add_value'])
-
- def value_wrapper(v):
- return {'Value': v}
-
- param_mappings = [
- ParamMapper('ebs_optimized', 'EbsOptimized', 'ebsOptimized', value_wrapper),
- ParamMapper('termination_protection', 'DisableApiTermination', 'disableApiTermination', value_wrapper),
- # user data is an immutable property
- # ParamMapper('user_data', 'UserData', 'userData', value_wrapper),
- ]
-
- for mapping in param_mappings:
- if params.get(mapping.param_key) is not None and mapping.instance_key not in skip:
- value = AWSRetry.jittered_backoff()(ec2.describe_instance_attribute)(Attribute=mapping.attribute_name, InstanceId=id_)
- if params.get(mapping.param_key) is not None and value[mapping.instance_key]['Value'] != params.get(mapping.param_key):
- arguments = dict(
- InstanceId=instance['InstanceId'],
- # Attribute=mapping.attribute_name,
- )
- arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key))
- changes_to_apply.append(arguments)
-
- if (params.get('network') or {}).get('source_dest_check') is not None:
- # network.source_dest_check is nested, so needs to be treated separately
- check = bool(params.get('network').get('source_dest_check'))
- if instance['SourceDestCheck'] != check:
- changes_to_apply.append(dict(
- InstanceId=instance['InstanceId'],
- SourceDestCheck={'Value': check},
- ))
-
- return changes_to_apply
-
-
-def change_network_attachments(instance, params, ec2):
- if (params.get('network') or {}).get('interfaces') is not None:
- new_ids = []
- for inty in params.get('network').get('interfaces'):
- if isinstance(inty, dict) and 'id' in inty:
- new_ids.append(inty['id'])
- elif isinstance(inty, string_types):
- new_ids.append(inty)
- # network.interfaces can create the need to attach new interfaces
- old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']]
- to_attach = set(new_ids) - set(old_ids)
- for eni_id in to_attach:
- ec2.attach_network_interface(
- DeviceIndex=new_ids.index(eni_id),
- InstanceId=instance['InstanceId'],
- NetworkInterfaceId=eni_id,
- )
- return bool(len(to_attach))
- return False
-
-
-def find_instances(ec2, ids=None, filters=None):
- paginator = ec2.get_paginator('describe_instances')
- if ids:
- return list(paginator.paginate(
- InstanceIds=ids,
- ).search('Reservations[].Instances[]'))
- elif filters is None:
- module.fail_json(msg="No filters provided when they were required")
- elif filters is not None:
- for key in list(filters.keys()):
- if not key.startswith("tag:"):
- filters[key.replace("_", "-")] = filters.pop(key)
- return list(paginator.paginate(
- Filters=ansible_dict_to_boto3_filter_list(filters)
- ).search('Reservations[].Instances[]'))
- return []
-
-
-@AWSRetry.jittered_backoff()
-def get_default_vpc(ec2):
- vpcs = ec2.describe_vpcs(Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'}))
- if len(vpcs.get('Vpcs', [])):
- return vpcs.get('Vpcs')[0]
- return None
-
-
-@AWSRetry.jittered_backoff()
-def get_default_subnet(ec2, vpc, availability_zone=None):
- subnets = ec2.describe_subnets(
- Filters=ansible_dict_to_boto3_filter_list({
- 'vpc-id': vpc['VpcId'],
- 'state': 'available',
- 'default-for-az': 'true',
- })
- )
- if len(subnets.get('Subnets', [])):
- if availability_zone is not None:
- subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets'))
- if availability_zone in subs_by_az:
- return subs_by_az[availability_zone]
-
- # to have a deterministic sorting order, we sort by AZ so we'll always pick the `a` subnet first
- # there can only be one default-for-az subnet per AZ, so the AZ key is always unique in this list
- by_az = sorted(subnets.get('Subnets'), key=lambda s: s['AvailabilityZone'])
- return by_az[0]
- return None
-
-
-def ensure_instance_state(state, ec2=None):
- if ec2 is None:
- module.client('ec2')
- if state in ('running', 'started'):
- changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING')
-
- if failed:
- module.fail_json(
- msg="Unable to start instances: {0}".format(failure_reason),
- reboot_success=list(changed),
- reboot_failed=failed)
-
- module.exit_json(
- msg='Instances started',
- reboot_success=list(changed),
- changed=bool(len(changed)),
- reboot_failed=[],
- instances=[pretty_instance(i) for i in instances],
- )
- elif state in ('restarted', 'rebooted'):
- changed, failed, instances, failure_reason = change_instance_state(
- filters=module.params.get('filters'),
- desired_state='STOPPED')
- changed, failed, instances, failure_reason = change_instance_state(
- filters=module.params.get('filters'),
- desired_state='RUNNING')
-
- if failed:
- module.fail_json(
- msg="Unable to restart instances: {0}".format(failure_reason),
- reboot_success=list(changed),
- reboot_failed=failed)
-
- module.exit_json(
- msg='Instances restarted',
- reboot_success=list(changed),
- changed=bool(len(changed)),
- reboot_failed=[],
- instances=[pretty_instance(i) for i in instances],
- )
- elif state in ('stopped',):
- changed, failed, instances, failure_reason = change_instance_state(
- filters=module.params.get('filters'),
- desired_state='STOPPED')
-
- if failed:
- module.fail_json(
- msg="Unable to stop instances: {0}".format(failure_reason),
- stop_success=list(changed),
- stop_failed=failed)
-
- module.exit_json(
- msg='Instances stopped',
- stop_success=list(changed),
- changed=bool(len(changed)),
- stop_failed=[],
- instances=[pretty_instance(i) for i in instances],
- )
- elif state in ('absent', 'terminated'):
- terminated, terminate_failed, instances, failure_reason = change_instance_state(
- filters=module.params.get('filters'),
- desired_state='TERMINATED')
-
- if terminate_failed:
- module.fail_json(
- msg="Unable to terminate instances: {0}".format(failure_reason),
- terminate_success=list(terminated),
- terminate_failed=terminate_failed)
- module.exit_json(
- msg='Instances terminated',
- terminate_success=list(terminated),
- changed=bool(len(terminated)),
- terminate_failed=[],
- instances=[pretty_instance(i) for i in instances],
- )
-
-
-@AWSRetry.jittered_backoff()
-def change_instance_state(filters, desired_state, ec2=None):
- """Takes STOPPED/RUNNING/TERMINATED"""
- if ec2 is None:
- ec2 = module.client('ec2')
-
- changed = set()
- instances = find_instances(ec2, filters=filters)
- to_change = set(i['InstanceId'] for i in instances if i['State']['Name'].upper() != desired_state)
- unchanged = set()
- failure_reason = ""
-
- for inst in instances:
- try:
- if desired_state == 'TERMINATED':
- if module.check_mode:
- changed.add(inst['InstanceId'])
- continue
-
- # TODO use a client-token to prevent double-sends of these start/stop/terminate commands
- # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html
- resp = ec2.terminate_instances(InstanceIds=[inst['InstanceId']])
- [changed.add(i['InstanceId']) for i in resp['TerminatingInstances']]
- if desired_state == 'STOPPED':
- if inst['State']['Name'] in ('stopping', 'stopped'):
- unchanged.add(inst['InstanceId'])
- continue
-
- if module.check_mode:
- changed.add(inst['InstanceId'])
- continue
-
- resp = ec2.stop_instances(InstanceIds=[inst['InstanceId']])
- [changed.add(i['InstanceId']) for i in resp['StoppingInstances']]
- if desired_state == 'RUNNING':
- if module.check_mode:
- changed.add(inst['InstanceId'])
- continue
-
- resp = ec2.start_instances(InstanceIds=[inst['InstanceId']])
- [changed.add(i['InstanceId']) for i in resp['StartingInstances']]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- try:
- failure_reason = to_native(e.message)
- except AttributeError:
- failure_reason = to_native(e)
-
- if changed:
- await_instances(ids=list(changed) + list(unchanged), state=desired_state)
-
- change_failed = list(to_change - changed)
- instances = find_instances(ec2, ids=list(i['InstanceId'] for i in instances))
- return changed, change_failed, instances, failure_reason
-
-
-def pretty_instance(i):
- instance = camel_dict_to_snake_dict(i, ignore_list=['Tags'])
- instance['tags'] = boto3_tag_list_to_ansible_dict(i['Tags'])
- return instance
-
-
-def determine_iam_role(name_or_arn):
- if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn):
- return name_or_arn
- iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
- try:
- role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True)
- return role['InstanceProfile']['Arn']
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchEntity':
- module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn))
- module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn))
-
-
-def handle_existing(existing_matches, changed, ec2, state):
- if state in ('running', 'started') and [i for i in existing_matches if i['State']['Name'] != 'running']:
- ins_changed, failed, instances, failure_reason = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING')
- if failed:
- module.fail_json(msg="Couldn't start instances: {0}. Failure reason: {1}".format(instances, failure_reason))
- module.exit_json(
- changed=bool(len(ins_changed)) or changed,
- instances=[pretty_instance(i) for i in instances],
- instance_ids=[i['InstanceId'] for i in instances],
- )
- changes = diff_instance_and_params(existing_matches[0], module.params)
- for c in changes:
- AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c)
- changed |= bool(changes)
- changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('instance_role'))
- changed |= change_network_attachments(existing_matches[0], module.params, ec2)
- altered = find_instances(ec2, ids=[i['InstanceId'] for i in existing_matches])
- module.exit_json(
- changed=bool(len(changes)) or changed,
- instances=[pretty_instance(i) for i in altered],
- instance_ids=[i['InstanceId'] for i in altered],
- changes=changes,
- )
-
-
-def ensure_present(existing_matches, changed, ec2, state):
- if len(existing_matches):
- try:
- handle_existing(existing_matches, changed, ec2, state)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(
- e, msg="Failed to handle existing instances {0}".format(', '.join([i['InstanceId'] for i in existing_matches])),
- # instances=[pretty_instance(i) for i in existing_matches],
- # instance_ids=[i['InstanceId'] for i in existing_matches],
- )
- try:
- instance_spec = build_run_instance_spec(module.params)
- # If check mode is enabled,suspend 'ensure function'.
- if module.check_mode:
- module.exit_json(
- changed=True,
- spec=instance_spec,
- )
- instance_response = run_instances(ec2, **instance_spec)
- instances = instance_response['Instances']
- instance_ids = [i['InstanceId'] for i in instances]
-
- for ins in instances:
- changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized'])
- for c in changes:
- try:
- AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c)))
-
- if not module.params.get('wait'):
- module.exit_json(
- changed=True,
- instance_ids=instance_ids,
- spec=instance_spec,
- )
- await_instances(instance_ids)
- instances = ec2.get_paginator('describe_instances').paginate(
- InstanceIds=instance_ids
- ).search('Reservations[].Instances[]')
-
- module.exit_json(
- changed=True,
- instances=[pretty_instance(i) for i in instances],
- instance_ids=instance_ids,
- spec=instance_spec,
- )
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed to create new EC2 instance")
-
-
-@AWSRetry.jittered_backoff()
-def run_instances(ec2, **instance_spec):
- try:
- return ec2.run_instances(**instance_spec)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'InvalidParameterValue' and "Invalid IAM Instance Profile ARN" in e.response['Error']['Message']:
- # If the instance profile has just been created, it takes some time to be visible by ec2
- # So we wait 10 second and retry the run_instances
- time.sleep(10)
- return ec2.run_instances(**instance_spec)
- else:
- raise e
-
-
-def main():
- global module
- argument_spec = dict(
- state=dict(default='present', choices=['present', 'started', 'running', 'stopped', 'restarted', 'rebooted', 'terminated', 'absent']),
- wait=dict(default=True, type='bool'),
- wait_timeout=dict(default=600, type='int'),
- # count=dict(default=1, type='int'),
- image=dict(type='dict'),
- image_id=dict(type='str'),
- instance_type=dict(default='t2.micro', type='str'),
- user_data=dict(type='str'),
- tower_callback=dict(type='dict'),
- ebs_optimized=dict(type='bool'),
- vpc_subnet_id=dict(type='str', aliases=['subnet_id']),
- availability_zone=dict(type='str'),
- security_groups=dict(default=[], type='list'),
- security_group=dict(type='str'),
- instance_role=dict(type='str'),
- name=dict(type='str'),
- tags=dict(type='dict'),
- purge_tags=dict(type='bool', default=False),
- filters=dict(type='dict', default=None),
- launch_template=dict(type='dict'),
- key_name=dict(type='str'),
- cpu_credit_specification=dict(type='str', choices=['standard', 'unlimited']),
- cpu_options=dict(type='dict', options=dict(
- core_count=dict(type='int', required=True),
- threads_per_core=dict(type='int', choices=[1, 2], required=True)
- )),
- tenancy=dict(type='str', choices=['dedicated', 'default']),
- placement_group=dict(type='str'),
- instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']),
- termination_protection=dict(type='bool'),
- detailed_monitoring=dict(type='bool'),
- instance_ids=dict(default=[], type='list'),
- network=dict(default=None, type='dict'),
- volumes=dict(default=None, type='list'),
- )
- # running/present are synonyms
- # as are terminated/absent
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- mutually_exclusive=[
- ['security_groups', 'security_group'],
- ['availability_zone', 'vpc_subnet_id'],
- ['tower_callback', 'user_data'],
- ['image_id', 'image'],
- ],
- supports_check_mode=True
- )
-
- if module.params.get('network'):
- if module.params.get('network').get('interfaces'):
- if module.params.get('security_group'):
- module.fail_json(msg="Parameter network.interfaces can't be used with security_group")
- if module.params.get('security_groups'):
- module.fail_json(msg="Parameter network.interfaces can't be used with security_groups")
-
- state = module.params.get('state')
- ec2 = module.client('ec2')
- if module.params.get('filters') is None:
- filters = {
- # all states except shutting-down and terminated
- 'instance-state-name': ['pending', 'running', 'stopping', 'stopped']
- }
- if state == 'stopped':
- # only need to change instances that aren't already stopped
- filters['instance-state-name'] = ['stopping', 'pending', 'running']
-
- if isinstance(module.params.get('instance_ids'), string_types):
- filters['instance-id'] = [module.params.get('instance_ids')]
- elif isinstance(module.params.get('instance_ids'), list) and len(module.params.get('instance_ids')):
- filters['instance-id'] = module.params.get('instance_ids')
- else:
- if not module.params.get('vpc_subnet_id'):
- if module.params.get('network'):
- # grab AZ from one of the ENIs
- ints = module.params.get('network').get('interfaces')
- if ints:
- filters['network-interface.network-interface-id'] = []
- for i in ints:
- if isinstance(i, dict):
- i = i['id']
- filters['network-interface.network-interface-id'].append(i)
- else:
- sub = get_default_subnet(ec2, get_default_vpc(ec2), availability_zone=module.params.get('availability_zone'))
- filters['subnet-id'] = sub['SubnetId']
- else:
- filters['subnet-id'] = [module.params.get('vpc_subnet_id')]
-
- if module.params.get('name'):
- filters['tag:Name'] = [module.params.get('name')]
-
- if module.params.get('image_id'):
- filters['image-id'] = [module.params.get('image_id')]
- elif (module.params.get('image') or {}).get('id'):
- filters['image-id'] = [module.params.get('image', {}).get('id')]
-
- module.params['filters'] = filters
-
- if module.params.get('cpu_options') and not module.botocore_at_least('1.10.16'):
- module.fail_json(msg="cpu_options is only supported with botocore >= 1.10.16")
-
- existing_matches = find_instances(ec2, filters=module.params.get('filters'))
- changed = False
-
- if state not in ('terminated', 'absent') and existing_matches:
- for match in existing_matches:
- warn_if_public_ip_assignment_changed(match)
- warn_if_cpu_options_changed(match)
- tags = module.params.get('tags') or {}
- name = module.params.get('name')
- if name:
- tags['Name'] = name
- changed |= manage_tags(match, tags, module.params.get('purge_tags', False), ec2)
-
- if state in ('present', 'running', 'started'):
- ensure_present(existing_matches=existing_matches, changed=changed, ec2=ec2, state=state)
- elif state in ('restarted', 'rebooted', 'stopped', 'absent', 'terminated'):
- if existing_matches:
- ensure_instance_state(state, ec2)
- else:
- module.exit_json(
- msg='No matching instances found',
- changed=False,
- instances=[],
- )
- else:
- module.fail_json(msg="We don't handle the state {0}".format(state))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_instance_info.py b/lib/ansible/modules/cloud/amazon/ec2_instance_info.py
deleted file mode 100644
index 7615b958d3..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_instance_info.py
+++ /dev/null
@@ -1,571 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: ec2_instance_info
-short_description: Gather information about ec2 instances in AWS
-description:
- - Gather information about ec2 instances in AWS
- - This module was called C(ec2_instance_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.4"
-author:
- - Michael Schuett (@michaeljs1990)
- - Rob White (@wimnat)
-requirements: [ "boto3", "botocore" ]
-options:
- instance_ids:
- description:
- - If you specify one or more instance IDs, only instances that have the specified IDs are returned.
- required: false
- version_added: 2.4
- type: list
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
- U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters. Filter
- names and values are case sensitive.
- required: false
- default: {}
- type: dict
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all instances
-- ec2_instance_info:
-
-# Gather information about all instances in AZ ap-southeast-2a
-- ec2_instance_info:
- filters:
- availability-zone: ap-southeast-2a
-
-# Gather information about a particular instance using ID
-- ec2_instance_info:
- instance_ids:
- - i-12345678
-
-# Gather information about any instance with a tag key Name and value Example
-- ec2_instance_info:
- filters:
- "tag:Name": Example
-
-# Gather information about any instance in states "shutting-down", "stopping", "stopped"
-- ec2_instance_info:
- filters:
- instance-state-name: [ "shutting-down", "stopping", "stopped" ]
-
-'''
-
-RETURN = '''
-instances:
- description: a list of ec2 instances
- returned: always
- type: complex
- contains:
- ami_launch_index:
- description: The AMI launch index, which can be used to find this instance in the launch group.
- returned: always
- type: int
- sample: 0
- architecture:
- description: The architecture of the image
- returned: always
- type: str
- sample: x86_64
- block_device_mappings:
- description: Any block device mapping entries for the instance.
- returned: always
- type: complex
- contains:
- device_name:
- description: The device name exposed to the instance (for example, /dev/sdh or xvdh).
- returned: always
- type: str
- sample: /dev/sdh
- ebs:
- description: Parameters used to automatically set up EBS volumes when the instance is launched.
- returned: always
- type: complex
- contains:
- attach_time:
- description: The time stamp when the attachment initiated.
- returned: always
- type: str
- sample: "2017-03-23T22:51:24+00:00"
- delete_on_termination:
- description: Indicates whether the volume is deleted on instance termination.
- returned: always
- type: bool
- sample: true
- status:
- description: The attachment state.
- returned: always
- type: str
- sample: attached
- volume_id:
- description: The ID of the EBS volume
- returned: always
- type: str
- sample: vol-12345678
- cpu_options:
- description: The CPU options set for the instance.
- returned: always if botocore version >= 1.10.16
- type: complex
- contains:
- core_count:
- description: The number of CPU cores for the instance.
- returned: always
- type: int
- sample: 1
- threads_per_core:
- description: The number of threads per CPU core. On supported instance, a value of 1 means Intel Hyper-Threading Technology is disabled.
- returned: always
- type: int
- sample: 1
- client_token:
- description: The idempotency token you provided when you launched the instance, if applicable.
- returned: always
- type: str
- sample: mytoken
- ebs_optimized:
- description: Indicates whether the instance is optimized for EBS I/O.
- returned: always
- type: bool
- sample: false
- hypervisor:
- description: The hypervisor type of the instance.
- returned: always
- type: str
- sample: xen
- iam_instance_profile:
- description: The IAM instance profile associated with the instance, if applicable.
- returned: always
- type: complex
- contains:
- arn:
- description: The Amazon Resource Name (ARN) of the instance profile.
- returned: always
- type: str
- sample: "arn:aws:iam::000012345678:instance-profile/myprofile"
- id:
- description: The ID of the instance profile
- returned: always
- type: str
- sample: JFJ397FDG400FG9FD1N
- image_id:
- description: The ID of the AMI used to launch the instance.
- returned: always
- type: str
- sample: ami-0011223344
- instance_id:
- description: The ID of the instance.
- returned: always
- type: str
- sample: i-012345678
- instance_type:
- description: The instance type size of the running instance.
- returned: always
- type: str
- sample: t2.micro
- key_name:
- description: The name of the key pair, if this instance was launched with an associated key pair.
- returned: always
- type: str
- sample: my-key
- launch_time:
- description: The time the instance was launched.
- returned: always
- type: str
- sample: "2017-03-23T22:51:24+00:00"
- monitoring:
- description: The monitoring for the instance.
- returned: always
- type: complex
- contains:
- state:
- description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
- returned: always
- type: str
- sample: disabled
- network_interfaces:
- description: One or more network interfaces for the instance.
- returned: always
- type: complex
- contains:
- association:
- description: The association information for an Elastic IPv4 associated with the network interface.
- returned: always
- type: complex
- contains:
- ip_owner_id:
- description: The ID of the owner of the Elastic IP address.
- returned: always
- type: str
- sample: amazon
- public_dns_name:
- description: The public DNS name.
- returned: always
- type: str
- sample: ""
- public_ip:
- description: The public IP address or Elastic IP address bound to the network interface.
- returned: always
- type: str
- sample: 1.2.3.4
- attachment:
- description: The network interface attachment.
- returned: always
- type: complex
- contains:
- attach_time:
- description: The time stamp when the attachment initiated.
- returned: always
- type: str
- sample: "2017-03-23T22:51:24+00:00"
- attachment_id:
- description: The ID of the network interface attachment.
- returned: always
- type: str
- sample: eni-attach-3aff3f
- delete_on_termination:
- description: Indicates whether the network interface is deleted when the instance is terminated.
- returned: always
- type: bool
- sample: true
- device_index:
- description: The index of the device on the instance for the network interface attachment.
- returned: always
- type: int
- sample: 0
- status:
- description: The attachment state.
- returned: always
- type: str
- sample: attached
- description:
- description: The description.
- returned: always
- type: str
- sample: My interface
- groups:
- description: One or more security groups.
- returned: always
- type: list
- elements: dict
- contains:
- group_id:
- description: The ID of the security group.
- returned: always
- type: str
- sample: sg-abcdef12
- group_name:
- description: The name of the security group.
- returned: always
- type: str
- sample: mygroup
- ipv6_addresses:
- description: One or more IPv6 addresses associated with the network interface.
- returned: always
- type: list
- elements: dict
- contains:
- ipv6_address:
- description: The IPv6 address.
- returned: always
- type: str
- sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
- mac_address:
- description: The MAC address.
- returned: always
- type: str
- sample: "00:11:22:33:44:55"
- network_interface_id:
- description: The ID of the network interface.
- returned: always
- type: str
- sample: eni-01234567
- owner_id:
- description: The AWS account ID of the owner of the network interface.
- returned: always
- type: str
- sample: 01234567890
- private_ip_address:
- description: The IPv4 address of the network interface within the subnet.
- returned: always
- type: str
- sample: 10.0.0.1
- private_ip_addresses:
- description: The private IPv4 addresses associated with the network interface.
- returned: always
- type: list
- elements: dict
- contains:
- association:
- description: The association information for an Elastic IP address (IPv4) associated with the network interface.
- returned: always
- type: complex
- contains:
- ip_owner_id:
- description: The ID of the owner of the Elastic IP address.
- returned: always
- type: str
- sample: amazon
- public_dns_name:
- description: The public DNS name.
- returned: always
- type: str
- sample: ""
- public_ip:
- description: The public IP address or Elastic IP address bound to the network interface.
- returned: always
- type: str
- sample: 1.2.3.4
- primary:
- description: Indicates whether this IPv4 address is the primary private IP address of the network interface.
- returned: always
- type: bool
- sample: true
- private_ip_address:
- description: The private IPv4 address of the network interface.
- returned: always
- type: str
- sample: 10.0.0.1
- source_dest_check:
- description: Indicates whether source/destination checking is enabled.
- returned: always
- type: bool
- sample: true
- status:
- description: The status of the network interface.
- returned: always
- type: str
- sample: in-use
- subnet_id:
- description: The ID of the subnet for the network interface.
- returned: always
- type: str
- sample: subnet-0123456
- vpc_id:
- description: The ID of the VPC for the network interface.
- returned: always
- type: str
- sample: vpc-0123456
- placement:
- description: The location where the instance launched, if applicable.
- returned: always
- type: complex
- contains:
- availability_zone:
- description: The Availability Zone of the instance.
- returned: always
- type: str
- sample: ap-southeast-2a
- group_name:
- description: The name of the placement group the instance is in (for cluster compute instances).
- returned: always
- type: str
- sample: ""
- tenancy:
- description: The tenancy of the instance (if the instance is running in a VPC).
- returned: always
- type: str
- sample: default
- private_dns_name:
- description: The private DNS name.
- returned: always
- type: str
- sample: ip-10-0-0-1.ap-southeast-2.compute.internal
- private_ip_address:
- description: The IPv4 address of the network interface within the subnet.
- returned: always
- type: str
- sample: 10.0.0.1
- product_codes:
- description: One or more product codes.
- returned: always
- type: list
- elements: dict
- contains:
- product_code_id:
- description: The product code.
- returned: always
- type: str
- sample: aw0evgkw8ef3n2498gndfgasdfsd5cce
- product_code_type:
- description: The type of product code.
- returned: always
- type: str
- sample: marketplace
- public_dns_name:
- description: The public DNS name assigned to the instance.
- returned: always
- type: str
- sample:
- public_ip_address:
- description: The public IPv4 address assigned to the instance
- returned: always
- type: str
- sample: 52.0.0.1
- root_device_name:
- description: The device name of the root device
- returned: always
- type: str
- sample: /dev/sda1
- root_device_type:
- description: The type of root device used by the AMI.
- returned: always
- type: str
- sample: ebs
- security_groups:
- description: One or more security groups for the instance.
- returned: always
- type: list
- elements: dict
- contains:
- group_id:
- description: The ID of the security group.
- returned: always
- type: str
- sample: sg-0123456
- group_name:
- description: The name of the security group.
- returned: always
- type: str
- sample: my-security-group
- source_dest_check:
- description: Indicates whether source/destination checking is enabled.
- returned: always
- type: bool
- sample: true
- state:
- description: The current state of the instance.
- returned: always
- type: complex
- contains:
- code:
- description: The low byte represents the state.
- returned: always
- type: int
- sample: 16
- name:
- description: The name of the state.
- returned: always
- type: str
- sample: running
- state_transition_reason:
- description: The reason for the most recent state transition.
- returned: always
- type: str
- sample:
- subnet_id:
- description: The ID of the subnet in which the instance is running.
- returned: always
- type: str
- sample: subnet-00abcdef
- tags:
- description: Any tags assigned to the instance.
- returned: always
- type: dict
- sample:
- virtualization_type:
- description: The type of virtualization of the AMI.
- returned: always
- type: str
- sample: hvm
- vpc_id:
- description: The ID of the VPC the instance is in.
- returned: always
- type: dict
- sample: vpc-0011223344
-'''
-
-import traceback
-
-try:
- import boto3
- from botocore.exceptions import ClientError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
- boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
- ec2_argument_spec, get_aws_connection_info)
-
-
-def list_ec2_instances(connection, module):
-
- instance_ids = module.params.get("instance_ids")
- filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
-
- try:
- reservations_paginator = connection.get_paginator('describe_instances')
- reservations = reservations_paginator.paginate(InstanceIds=instance_ids, Filters=filters).build_full_result()
- except ClientError as e:
- module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
- # Get instances from reservations
- instances = []
- for reservation in reservations['Reservations']:
- instances = instances + reservation['Instances']
-
- # Turn the boto3 result in to ansible_friendly_snaked_names
- snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances]
-
- # Turn the boto3 result in to ansible friendly tag dictionary
- for instance in snaked_instances:
- instance['tags'] = boto3_tag_list_to_ansible_dict(instance.get('tags', []), 'key', 'value')
-
- module.exit_json(instances=snaked_instances)
-
-
-def main():
-
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- instance_ids=dict(default=[], type='list'),
- filters=dict(default={}, type='dict')
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- mutually_exclusive=[
- ['instance_ids', 'filters']
- ],
- supports_check_mode=True
- )
- if module._name == 'ec2_instance_facts':
- module.deprecate("The 'ec2_instance_facts' module has been renamed to 'ec2_instance_info'", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
-
- if region:
- connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
- else:
- module.fail_json(msg="region must be specified")
-
- list_ec2_instances(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_launch_template.py b/lib/ansible/modules/cloud/amazon/ec2_launch_template.py
deleted file mode 100644
index d81f6fec25..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_launch_template.py
+++ /dev/null
@@ -1,702 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = '''
----
-module: ec2_launch_template
-version_added: "2.8"
-short_description: Manage EC2 launch templates
-description:
- - Create, modify, and delete EC2 Launch Templates, which can be used to
- create individual instances or with Autoscaling Groups.
- - The I(ec2_instance) and I(ec2_asg) modules can, instead of specifying all
- parameters on those tasks, be passed a Launch Template which contains
- settings like instance size, disk type, subnet, and more.
-requirements:
- - botocore
- - boto3 >= 1.6.0
-extends_documentation_fragment:
- - aws
- - ec2
-author:
- - Ryan Scott Brown (@ryansb)
-options:
- template_id:
- description:
- - The ID for the launch template, can be used for all cases except creating a new Launch Template.
- aliases: [id]
- type: str
- template_name:
- description:
- - The template name. This must be unique in the region-account combination you are using.
- aliases: [name]
- type: str
- default_version:
- description:
- - Which version should be the default when users spin up new instances based on this template? By default, the latest version will be made the default.
- type: str
- default: latest
- state:
- description:
- - Whether the launch template should exist or not.
- - Deleting specific versions of a launch template is not supported at this time.
- choices: [present, absent]
- default: present
- type: str
- block_device_mappings:
- description:
- - The block device mapping. Supplying both a snapshot ID and an encryption
- value as arguments for block-device mapping results in an error. This is
- because only blank volumes can be encrypted on start, and these are not
- created from a snapshot. If a snapshot is the basis for the volume, it
- contains data by definition and its encryption status cannot be changed
- using this action.
- type: list
- elements: dict
- suboptions:
- device_name:
- description: The device name (for example, /dev/sdh or xvdh).
- type: str
- no_device:
- description: Suppresses the specified device included in the block device mapping of the AMI.
- type: str
- virtual_name:
- description: >
- The virtual device name (ephemeralN). Instance store volumes are
- numbered starting from 0. An instance type with 2 available instance
- store volumes can specify mappings for ephemeral0 and ephemeral1. The
- number of available instance store volumes depends on the instance
- type. After you connect to the instance, you must mount the volume.
- type: str
- ebs:
- description: Parameters used to automatically set up EBS volumes when the instance is launched.
- type: dict
- suboptions:
- delete_on_termination:
- description: Indicates whether the EBS volume is deleted on instance termination.
- type: bool
- encrypted:
- description: >
- Indicates whether the EBS volume is encrypted. Encrypted volumes
- can only be attached to instances that support Amazon EBS
- encryption. If you are creating a volume from a snapshot, you
- can't specify an encryption value.
- type: bool
- iops:
- description:
- - The number of I/O operations per second (IOPS) that the volume
- supports. For io1, this represents the number of IOPS that are
- provisioned for the volume. For gp2, this represents the baseline
- performance of the volume and the rate at which the volume
- accumulates I/O credits for bursting. For more information about
- General Purpose SSD baseline performance, I/O credits, and
- bursting, see Amazon EBS Volume Types in the Amazon Elastic
- Compute Cloud User Guide.
- - >
- Condition: This parameter is required for requests to create io1
- volumes; it is not used in requests to create gp2, st1, sc1, or
- standard volumes.
- type: int
- kms_key_id:
- description: The ARN of the AWS Key Management Service (AWS KMS) CMK used for encryption.
- type: str
- snapshot_id:
- description: The ID of the snapshot to create the volume from.
- type: str
- volume_size:
- description:
- - The size of the volume, in GiB.
- - "Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size."
- type: int
- volume_type:
- description: The volume type
- type: str
- cpu_options:
- description:
- - Choose CPU settings for the EC2 instances that will be created with this template.
- - For more information, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html)
- type: dict
- suboptions:
- core_count:
- description: The number of CPU cores for the instance.
- type: int
- threads_per_core:
- description: >
- The number of threads per CPU core. To disable Intel Hyper-Threading
- Technology for the instance, specify a value of 1. Otherwise, specify
- the default value of 2.
- type: int
- credit_specification:
- description: The credit option for CPU usage of the instance. Valid for T2 or T3 instances only.
- type: dict
- suboptions:
- cpu_credits:
- description: >
- The credit option for CPU usage of a T2 or T3 instance. Valid values
- are C(standard) and C(unlimited).
- type: str
- disable_api_termination:
- description: >
- This helps protect instances from accidental termination. If set to true,
- you can't terminate the instance using the Amazon EC2 console, CLI, or
- API. To change this attribute to false after launch, use
- I(ModifyInstanceAttribute).
- type: bool
- ebs_optimized:
- description: >
- Indicates whether the instance is optimized for Amazon EBS I/O. This
- optimization provides dedicated throughput to Amazon EBS and an optimized
- configuration stack to provide optimal Amazon EBS I/O performance. This
- optimization isn't available with all instance types. Additional usage
- charges apply when using an EBS-optimized instance.
- type: bool
- elastic_gpu_specifications:
- type: list
- elements: dict
- description: Settings for Elastic GPU attachments. See U(https://aws.amazon.com/ec2/elastic-gpus/) for details.
- suboptions:
- type:
- description: The type of Elastic GPU to attach
- type: str
- iam_instance_profile:
- description: >
- The name or ARN of an IAM instance profile. Requires permissions to
- describe existing instance roles to confirm ARN is properly formed.
- type: str
- image_id:
- description: >
- The AMI ID to use for new instances launched with this template. This
- value is region-dependent since AMIs are not global resources.
- type: str
- instance_initiated_shutdown_behavior:
- description: >
- Indicates whether an instance stops or terminates when you initiate
- shutdown from the instance using the operating system shutdown command.
- choices: [stop, terminate]
- type: str
- instance_market_options:
- description: Options for alternative instance markets, currently only the spot market is supported.
- type: dict
- suboptions:
- market_type:
- description: The market type. This should always be 'spot'.
- type: str
- spot_options:
- description: Spot-market specific settings.
- type: dict
- suboptions:
- block_duration_minutes:
- description: >
- The required duration for the Spot Instances (also known as Spot
- blocks), in minutes. This value must be a multiple of 60 (60,
- 120, 180, 240, 300, or 360).
- type: int
- instance_interruption_behavior:
- description: The behavior when a Spot Instance is interrupted. The default is C(terminate).
- choices: [hibernate, stop, terminate]
- type: str
- max_price:
- description: The highest hourly price you're willing to pay for this Spot Instance.
- type: str
- spot_instance_type:
- description: The request type to send.
- choices: [one-time, persistent]
- type: str
- instance_type:
- description: >
- The instance type, such as C(c5.2xlarge). For a full list of instance types, see
- U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html).
- type: str
- kernel_id:
- description: >
- The ID of the kernel. We recommend that you use PV-GRUB instead of
- kernels and RAM disks. For more information, see
- U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html)
- type: str
- key_name:
- description:
- - The name of the key pair. You can create a key pair using M(ec2_key).
- - If you do not specify a key pair, you can't connect to the instance
- unless you choose an AMI that is configured to allow users another way to
- log in.
- type: str
- monitoring:
- description: Settings for instance monitoring.
- type: dict
- suboptions:
- enabled:
- type: bool
- description: Whether to turn on detailed monitoring for new instances. This will incur extra charges.
- network_interfaces:
- description: One or more network interfaces.
- type: list
- elements: dict
- suboptions:
- associate_public_ip_address:
- description: Associates a public IPv4 address with eth0 for a new network interface.
- type: bool
- delete_on_termination:
- description: Indicates whether the network interface is deleted when the instance is terminated.
- type: bool
- description:
- description: A description for the network interface.
- type: str
- device_index:
- description: The device index for the network interface attachment.
- type: int
- groups:
- description: List of security group IDs to include on this instance.
- type: list
- elements: str
- ipv6_address_count:
- description: >
- The number of IPv6 addresses to assign to a network interface. Amazon
- EC2 automatically selects the IPv6 addresses from the subnet range.
- You can't use this option if specifying the I(ipv6_addresses) option.
- type: int
- ipv6_addresses:
- description: >
- A list of one or more specific IPv6 addresses from the IPv6 CIDR
- block range of your subnet. You can't use this option if you're
- specifying the I(ipv6_address_count) option.
- type: list
- elements: str
- network_interface_id:
- description: The eni ID of a network interface to attach.
- type: str
- private_ip_address:
- description: The primary private IPv4 address of the network interface.
- type: str
- subnet_id:
- description: The ID of the subnet for the network interface.
- type: str
- placement:
- description: The placement group settings for the instance.
- type: dict
- suboptions:
- affinity:
- description: The affinity setting for an instance on a Dedicated Host.
- type: str
- availability_zone:
- description: The Availability Zone for the instance.
- type: str
- group_name:
- description: The name of the placement group for the instance.
- type: str
- host_id:
- description: The ID of the Dedicated Host for the instance.
- type: str
- tenancy:
- description: >
- The tenancy of the instance (if the instance is running in a VPC). An
- instance with a tenancy of dedicated runs on single-tenant hardware.
- type: str
- ram_disk_id:
- description: >
- The ID of the RAM disk to launch the instance with. We recommend that you
- use PV-GRUB instead of kernels and RAM disks. For more information, see
- U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html)
- type: str
- security_group_ids:
- description: A list of security group IDs (VPC or EC2-Classic) that the new instances will be added to.
- type: list
- elements: str
- security_groups:
- description: A list of security group names (VPC or EC2-Classic) that the new instances will be added to.
- type: list
- elements: str
- tags:
- type: dict
- description:
- - A set of key-value pairs to be applied to resources when this Launch Template is used.
- - "Tag key constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with I(aws:)"
- - "Tag value constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters."
- user_data:
- description: >
- The Base64-encoded user data to make available to the instance. For more information, see the Linux
- U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and Windows
- U(http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data)
- documentation on user-data.
- type: str
-'''
-
-EXAMPLES = '''
-- name: Create an ec2 launch template
- ec2_launch_template:
- name: "my_template"
- image_id: "ami-04b762b4289fba92b"
- key_name: my_ssh_key
- instance_type: t2.micro
- iam_instance_profile: myTestProfile
- disable_api_termination: true
-
-- name: >
- Create a new version of an existing ec2 launch template with a different instance type,
- while leaving an older version as the default version
- ec2_launch_template:
- name: "my_template"
- default_version: 1
- instance_type: c5.4xlarge
-
-- name: Delete an ec2 launch template
- ec2_launch_template:
- name: "my_template"
- state: absent
-
-# This module does not yet allow deletion of specific versions of launch templates
-'''
-
-RETURN = '''
-latest_version:
- description: Latest available version of the launch template
- returned: when state=present
- type: int
-default_version:
- description: The version that will be used if only the template name is specified. Often this is the same as the latest version, but not always.
- returned: when state=present
- type: int
-'''
-import re
-from uuid import uuid4
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict, snake_dict_to_camel_dict
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError, WaiterError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def determine_iam_role(module, name_or_arn):
- if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn):
- return name_or_arn
- iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
- try:
- role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True)
- return {'arn': role['InstanceProfile']['Arn']}
- except is_boto3_error_code('NoSuchEntity') as e:
- module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn))
- except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn))
-
-
-def existing_templates(module):
- ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
- matches = None
- try:
- if module.params.get('template_id'):
- matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')])
- elif module.params.get('template_name'):
- matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')])
- except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException') as e:
- # no named template was found, return nothing/empty versions
- return None, []
- except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Launch template with ID {0} is not a valid ID. It should start with `lt-....`'.format(
- module.params.get('launch_template_id')))
- except is_boto3_error_code('InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except
- module.fail_json_aws(
- e, msg='Launch template with ID {0} could not be found, please supply a name '
- 'instead so that a new template can be created'.format(module.params.get('launch_template_id')))
- except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Could not check existing launch templates. This may be an IAM permission problem.')
- else:
- template = matches['LaunchTemplates'][0]
- template_id, template_version, template_default = template['LaunchTemplateId'], template['LatestVersionNumber'], template['DefaultVersionNumber']
- try:
- return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id)['LaunchTemplateVersions']
- except (ClientError, BotoCoreError, WaiterError) as e:
- module.fail_json_aws(e, msg='Could not find launch template versions for {0} (ID: {1}).'.format(template['LaunchTemplateName'], template_id))
-
-
-def params_to_launch_data(module, template_params):
- if template_params.get('tags'):
- template_params['tag_specifications'] = [
- {
- 'resource_type': r_type,
- 'tags': [
- {'Key': k, 'Value': v} for k, v
- in template_params['tags'].items()
- ]
- }
- for r_type in ('instance', 'volume')
- ]
- del template_params['tags']
- if module.params.get('iam_instance_profile'):
- template_params['iam_instance_profile'] = determine_iam_role(module, module.params['iam_instance_profile'])
- params = snake_dict_to_camel_dict(
- dict((k, v) for k, v in template_params.items() if v is not None),
- capitalize_first=True,
- )
- return params
-
-
-def delete_template(module):
- ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
- template, template_versions = existing_templates(module)
- deleted_versions = []
- if template or template_versions:
- non_default_versions = [to_text(t['VersionNumber']) for t in template_versions if not t['DefaultVersion']]
- if non_default_versions:
- try:
- v_resp = ec2.delete_launch_template_versions(
- LaunchTemplateId=template['LaunchTemplateId'],
- Versions=non_default_versions,
- )
- if v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']:
- module.warn('Failed to delete template versions {0} on launch template {1}'.format(
- v_resp['UnsuccessfullyDeletedLaunchTemplateVersions'],
- template['LaunchTemplateId'],
- ))
- deleted_versions = [camel_dict_to_snake_dict(v) for v in v_resp['SuccessfullyDeletedLaunchTemplateVersions']]
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Could not delete existing versions of the launch template {0}".format(template['LaunchTemplateId']))
- try:
- resp = ec2.delete_launch_template(
- LaunchTemplateId=template['LaunchTemplateId'],
- )
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template['LaunchTemplateId']))
- return {
- 'deleted_versions': deleted_versions,
- 'deleted_template': camel_dict_to_snake_dict(resp['LaunchTemplate']),
- 'changed': True,
- }
- else:
- return {'changed': False}
-
-
-def create_or_update(module, template_options):
- ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidLaunchTemplateId.NotFound']))
- template, template_versions = existing_templates(module)
- out = {}
- lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options))
- if not (template or template_versions):
- # create a full new one
- try:
- resp = ec2.create_launch_template(
- LaunchTemplateName=module.params['template_name'],
- LaunchTemplateData=lt_data,
- ClientToken=uuid4().hex,
- aws_retry=True,
- )
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create launch template")
- template, template_versions = existing_templates(module)
- out['changed'] = True
- elif template and template_versions:
- most_recent = sorted(template_versions, key=lambda x: x['VersionNumber'])[-1]
- if lt_data == most_recent['LaunchTemplateData']:
- out['changed'] = False
- return out
- try:
- resp = ec2.create_launch_template_version(
- LaunchTemplateId=template['LaunchTemplateId'],
- LaunchTemplateData=lt_data,
- ClientToken=uuid4().hex,
- aws_retry=True,
- )
- if module.params.get('default_version') in (None, ''):
- # no need to do anything, leave the existing version as default
- pass
- elif module.params.get('default_version') == 'latest':
- set_default = ec2.modify_launch_template(
- LaunchTemplateId=template['LaunchTemplateId'],
- DefaultVersion=to_text(resp['LaunchTemplateVersion']['VersionNumber']),
- ClientToken=uuid4().hex,
- aws_retry=True,
- )
- else:
- try:
- int(module.params.get('default_version'))
- except ValueError:
- module.fail_json(msg='default_version param was not a valid integer, got "{0}"'.format(module.params.get('default_version')))
- set_default = ec2.modify_launch_template(
- LaunchTemplateId=template['LaunchTemplateId'],
- DefaultVersion=to_text(int(module.params.get('default_version'))),
- ClientToken=uuid4().hex,
- aws_retry=True,
- )
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create subsequent launch template version")
- template, template_versions = existing_templates(module)
- out['changed'] = True
- return out
-
-
-def format_module_output(module):
- output = {}
- template, template_versions = existing_templates(module)
- template = camel_dict_to_snake_dict(template)
- template_versions = [camel_dict_to_snake_dict(v) for v in template_versions]
- for v in template_versions:
- for ts in (v['launch_template_data'].get('tag_specifications') or []):
- ts['tags'] = boto3_tag_list_to_ansible_dict(ts.pop('tags'))
- output.update(dict(template=template, versions=template_versions))
- output['default_template'] = [
- v for v in template_versions
- if v.get('default_version')
- ][0]
- output['latest_template'] = [
- v for v in template_versions
- if (
- v.get('version_number') and
- int(v['version_number']) == int(template['latest_version_number'])
- )
- ][0]
- if "version_number" in output['default_template']:
- output['default_version'] = output['default_template']['version_number']
- if "version_number" in output['latest_template']:
- output['latest_version'] = output['latest_template']['version_number']
- return output
-
-
-def main():
- template_options = dict(
- block_device_mappings=dict(
- type='list',
- options=dict(
- device_name=dict(),
- ebs=dict(
- type='dict',
- options=dict(
- delete_on_termination=dict(type='bool'),
- encrypted=dict(type='bool'),
- iops=dict(type='int'),
- kms_key_id=dict(),
- snapshot_id=dict(),
- volume_size=dict(type='int'),
- volume_type=dict(),
- ),
- ),
- no_device=dict(),
- virtual_name=dict(),
- ),
- ),
- cpu_options=dict(
- type='dict',
- options=dict(
- core_count=dict(type='int'),
- threads_per_core=dict(type='int'),
- ),
- ),
- credit_specification=dict(
- dict(type='dict'),
- options=dict(
- cpu_credits=dict(),
- ),
- ),
- disable_api_termination=dict(type='bool'),
- ebs_optimized=dict(type='bool'),
- elastic_gpu_specifications=dict(
- options=dict(type=dict()),
- type='list',
- ),
- iam_instance_profile=dict(),
- image_id=dict(),
- instance_initiated_shutdown_behavior=dict(choices=['stop', 'terminate']),
- instance_market_options=dict(
- type='dict',
- options=dict(
- market_type=dict(),
- spot_options=dict(
- type='dict',
- options=dict(
- block_duration_minutes=dict(type='int'),
- instance_interruption_behavior=dict(choices=['hibernate', 'stop', 'terminate']),
- max_price=dict(),
- spot_instance_type=dict(choices=['one-time', 'persistent']),
- ),
- ),
- ),
- ),
- instance_type=dict(),
- kernel_id=dict(),
- key_name=dict(),
- monitoring=dict(
- type='dict',
- options=dict(
- enabled=dict(type='bool')
- ),
- ),
- network_interfaces=dict(
- type='list',
- options=dict(
- associate_public_ip_address=dict(type='bool'),
- delete_on_termination=dict(type='bool'),
- description=dict(),
- device_index=dict(type='int'),
- groups=dict(type='list'),
- ipv6_address_count=dict(type='int'),
- ipv6_addresses=dict(type='list'),
- network_interface_id=dict(),
- private_ip_address=dict(),
- subnet_id=dict(),
- ),
- ),
- placement=dict(
- options=dict(
- affinity=dict(),
- availability_zone=dict(),
- group_name=dict(),
- host_id=dict(),
- tenancy=dict(),
- ),
- type='dict',
- ),
- ram_disk_id=dict(),
- security_group_ids=dict(type='list'),
- security_groups=dict(type='list'),
- tags=dict(type='dict'),
- user_data=dict(),
- )
-
- arg_spec = dict(
- state=dict(choices=['present', 'absent'], default='present'),
- template_name=dict(aliases=['name']),
- template_id=dict(aliases=['id']),
- default_version=dict(default='latest'),
- )
-
- arg_spec.update(template_options)
-
- module = AnsibleAWSModule(
- argument_spec=arg_spec,
- required_one_of=[
- ('template_name', 'template_id')
- ],
- supports_check_mode=True
- )
-
- if not module.boto3_at_least('1.6.0'):
- module.fail_json(msg="ec2_launch_template requires boto3 >= 1.6.0")
-
- for interface in (module.params.get('network_interfaces') or []):
- if interface.get('ipv6_addresses'):
- interface['ipv6_addresses'] = [{'ipv6_address': x} for x in interface['ipv6_addresses']]
-
- if module.params.get('state') == 'present':
- out = create_or_update(module, template_options)
- out.update(format_module_output(module))
- elif module.params.get('state') == 'absent':
- out = delete_template(module)
- else:
- module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get('state')))
-
- module.exit_json(**out)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_lc.py b/lib/ansible/modules/cloud/amazon/ec2_lc.py
deleted file mode 100644
index 82d4364aee..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_lc.py
+++ /dev/null
@@ -1,714 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_lc
-
-short_description: Create or delete AWS Autoscaling Launch Configurations
-
-description:
- - Can create or delete AWS Autoscaling Configurations.
- - Works with the ec2_asg module to manage Autoscaling Groups.
-
-notes:
- - Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration after it is changed will not modify the
- launch configuration on AWS. You must create a new config and assign it to the ASG instead.
- - encrypted volumes are supported on versions >= 2.4
-
-version_added: "1.6"
-
-author:
- - "Gareth Rushgrove (@garethr)"
- - "Willem van Ketwich (@wilvk)"
-
-options:
- state:
- description:
- - Register or deregister the instance.
- default: present
- choices: ['present', 'absent']
- type: str
- name:
- description:
- - Unique name for configuration.
- required: true
- type: str
- instance_type:
- description:
- - Instance type to use for the instance.
- - Required when creating a new Launch Configuration.
- type: str
- image_id:
- description:
- - The AMI unique identifier to be used for the group.
- type: str
- key_name:
- description:
- - The SSH key name to be used for access to managed instances.
- type: str
- security_groups:
- description:
- - A list of security groups to apply to the instances. Since version 2.4 you can specify either security group names or IDs or a mix. Previous
- to 2.4, for VPC instances, specify security group IDs and for EC2-Classic, specify either security group names or IDs.
- type: list
- elements: str
- volumes:
- description:
- - A list dictionaries defining the volumes to create.
- - For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
- type: list
- elements: dict
- suboptions:
- device_name:
- type: str
- description:
- - The name for the volume (For example C(/dev/sda)).
- required: true
- no_device:
- type: bool
- description:
- - When I(no_device=true) the device will not be created.
- snapshot:
- type: str
- description:
- - The ID of an EBS snapshot to copy when creating the volume.
- - Mutually exclusive with the I(ephemeral) parameter.
- ephemeral:
- type: str
- description:
- - Whether the volume should be ephemeral.
- - Data on ephemeral volumes is lost when the instance is stopped.
- - Mutually exclusive with the I(snapshot) parameter.
- volume_size:
- type: int
- description:
- - The size of the volume (in GiB).
- - Required unless one of I(ephemeral), I(snapshot) or I(no_device) is set.
- volume_type:
- type: str
- description:
- - The type of volume to create.
- - See
- U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) for more information on the available volume types.
- delete_on_termination:
- type: bool
- default: false
- description:
- - Whether the volume should be automatically deleted when the instance
- is terminated.
- iops:
- type: int
- description:
- - The number of IOPS per second to provision for the volume.
- - Required when I(volume_type=io1).
- encrypted:
- type: bool
- default: false
- description:
- - Whether the volume should be encrypted using the 'aws/ebs' KMS CMK.
- user_data:
- description:
- - Opaque blob of data which is made available to the ec2 instance. Mutually exclusive with I(user_data_path).
- type: str
- user_data_path:
- description:
- - Path to the file that contains userdata for the ec2 instances. Mutually exclusive with I(user_data).
- version_added: "2.3"
- type: path
- kernel_id:
- description:
- - Kernel id for the EC2 instance.
- type: str
- spot_price:
- description:
- - The spot price you are bidding. Only applies for an autoscaling group with spot instances.
- type: float
- instance_monitoring:
- description:
- - Specifies whether instances are launched with detailed monitoring.
- type: bool
- default: false
- assign_public_ip:
- description:
- - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address
- to each instance launched in a Amazon VPC.
- version_added: "1.8"
- type: bool
- ramdisk_id:
- description:
- - A RAM disk id for the instances.
- version_added: "1.8"
- type: str
- instance_profile_name:
- description:
- - The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instances.
- version_added: "1.8"
- type: str
- ebs_optimized:
- description:
- - Specifies whether the instance is optimized for EBS I/O (true) or not (false).
- default: false
- version_added: "1.8"
- type: bool
- classic_link_vpc_id:
- description:
- - Id of ClassicLink enabled VPC
- version_added: "2.0"
- type: str
- classic_link_vpc_security_groups:
- description:
- - A list of security group IDs with which to associate the ClassicLink VPC instances.
- version_added: "2.0"
- type: list
- elements: str
- vpc_id:
- description:
- - VPC ID, used when resolving security group names to IDs.
- version_added: "2.4"
- type: str
- instance_id:
- description:
- - The Id of a running instance to use as a basis for a launch configuration. Can be used in place of I(image_id) and I(instance_type).
- version_added: "2.4"
- type: str
- placement_tenancy:
- description:
- - Determines whether the instance runs on single-tenant hardware or not.
- - When not set AWS will default to C(default).
- version_added: "2.4"
- type: str
- choices: ['default', 'dedicated']
- associate_public_ip_address:
- description:
- - The I(associate_public_ip_address) option does nothing and will be removed in Ansible 2.14.
- type: bool
-
-extends_documentation_fragment:
- - aws
- - ec2
-
-requirements:
- - boto3 >= 1.4.4
-
-'''
-
-EXAMPLES = '''
-
-# create a launch configuration using an AMI image and instance type as a basis
-
-- name: note that encrypted volumes are only supported in >= Ansible 2.4
- ec2_lc:
- name: special
- image_id: ami-XXX
- key_name: default
- security_groups: ['group', 'group2' ]
- instance_type: t1.micro
- volumes:
- - device_name: /dev/sda1
- volume_size: 100
- volume_type: io1
- iops: 3000
- delete_on_termination: true
- encrypted: true
- - device_name: /dev/sdb
- ephemeral: ephemeral0
-
-# create a launch configuration using a running instance id as a basis
-
-- ec2_lc:
- name: special
- instance_id: i-00a48b207ec59e948
- key_name: default
- security_groups: ['launch-wizard-2' ]
- volumes:
- - device_name: /dev/sda1
- volume_size: 120
- volume_type: io1
- iops: 3000
- delete_on_termination: true
-
-# create a launch configuration to omit the /dev/sdf EBS device that is included in the AMI image
-
-- ec2_lc:
- name: special
- image_id: ami-XXX
- key_name: default
- security_groups: ['group', 'group2' ]
- instance_type: t1.micro
- volumes:
- - device_name: /dev/sdf
- no_device: true
-
-- name: Use EBS snapshot ID for volume
- block:
- - name: Set Volume Facts
- set_fact:
- volumes:
- - device_name: /dev/sda1
- volume_size: 20
- ebs:
- snapshot: snap-XXXX
- volume_type: gp2
- delete_on_termination: true
- encrypted: no
-
- - name: Create launch configuration
- ec2_lc:
- name: lc1
- image_id: ami-xxxx
- assign_public_ip: yes
- instance_type: t2.medium
- key_name: my-key
- security_groups: "['sg-xxxx']"
- volumes: "{{ volumes }}"
- register: lc_info
-'''
-
-RETURN = '''
-arn:
- description: The Amazon Resource Name of the launch configuration.
- returned: when I(state=present)
- type: str
- sample: arn:aws:autoscaling:us-east-1:148830907657:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name
-changed:
- description: Whether the state of the launch configuration has changed.
- returned: always
- type: bool
- sample: false
-created_time:
- description: The creation date and time for the launch configuration.
- returned: when I(state=present)
- type: str
- sample: '2017-11-03 23:46:44.841000'
-image_id:
- description: The ID of the Amazon Machine Image used by the launch configuration.
- returned: when I(state=present)
- type: str
- sample: ami-9be6f38c
-instance_type:
- description: The instance type for the instances.
- returned: when I(state=present)
- type: str
- sample: t1.micro
-name:
- description: The name of the launch configuration.
- returned: when I(state=present)
- type: str
- sample: launch_config_name
-result:
- description: The specification details for the launch configuration.
- returned: when I(state=present)
- type: complex
- contains:
- PlacementTenancy:
- description: The tenancy of the instances, either default or dedicated.
- returned: when I(state=present)
- type: str
- sample: default
- associate_public_ip_address:
- description: (EC2-VPC) Indicates whether to assign a public IP address to each instance.
- returned: when I(state=present)
- type: bool
- sample: false
- block_device_mappings:
- description: A block device mapping, which specifies the block devices.
- returned: when I(state=present)
- type: complex
- contains:
- device_name:
- description: The device name exposed to the EC2 instance (for example, /dev/sdh or xvdh).
- returned: when I(state=present)
- type: str
- sample: /dev/sda1
- ebs:
- description: The information about the Amazon EBS volume.
- returned: when I(state=present)
- type: complex
- contains:
- snapshot_id:
- description: The ID of the snapshot.
- returned: when I(state=present)
- type: str
- volume_size:
- description: The volume size, in GiB.
- returned: when I(state=present)
- type: str
- sample: '100'
- virtual_name:
- description: The name of the virtual device (for example, ephemeral0).
- returned: when I(state=present)
- type: str
- sample: ephemeral0
- classic_link_vpc_id:
- description: The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to.
- returned: when I(state=present)
- type: str
- classic_link_vpc_security_groups:
- description: The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId.
- returned: when I(state=present)
- type: list
- sample: []
- created_time:
- description: The creation date and time for the launch configuration.
- returned: when I(state=present)
- type: str
- sample: '2017-11-03 23:46:44.841000'
- delete_on_termination:
- description: Indicates whether the volume is deleted on instance termination.
- returned: when I(state=present)
- type: bool
- sample: true
- ebs_optimized:
- description: Indicates whether the instance is optimized for EBS I/O (true) or not (false).
- returned: when I(state=present)
- type: bool
- sample: false
- image_id:
- description: The ID of the Amazon Machine Image used by the launch configuration.
- returned: when I(state=present)
- type: str
- sample: ami-9be6f38c
- instance_monitoring:
- description: Indicates whether instances in this group are launched with detailed (true) or basic (false) monitoring.
- returned: when I(state=present)
- type: bool
- sample: true
- instance_profile_name:
- description: The name or Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance.
- returned: when I(state=present)
- type: str
- sample: null
- instance_type:
- description: The instance type for the instances.
- returned: when I(state=present)
- type: str
- sample: t1.micro
- iops:
- description: The number of I/O operations per second (IOPS) to provision for the volume.
- returned: when I(state=present)
- type: int
- kernel_id:
- description: The ID of the kernel associated with the AMI.
- returned: when I(state=present)
- type: str
- sample: ''
- key_name:
- description: The name of the key pair.
- returned: when I(state=present)
- type: str
- sample: testkey
- launch_configuration_arn:
- description: The Amazon Resource Name (ARN) of the launch configuration.
- returned: when I(state=present)
- type: str
- sample: arn:aws:autoscaling:us-east-1:148830907657:launchConfiguration:888d9b58-d93a-40c4-90cf-759197a2621a:launchConfigurationName/launch_config_name
- member:
- description: ""
- returned: when I(state=present)
- type: str
- sample: "\n "
- name:
- description: The name of the launch configuration.
- returned: when I(state=present)
- type: str
- sample: launch_config_name
- ramdisk_id:
- description: The ID of the RAM disk associated with the AMI.
- returned: when I(state=present)
- type: str
- sample: ''
- security_groups:
- description: The security groups to associate with the instances.
- returned: when I(state=present)
- type: list
- sample:
- - sg-5e27db2f
- spot_price:
- description: The price to bid when launching Spot Instances.
- returned: when I(state=present)
- type: float
- use_block_device_types:
- description: Indicates whether to suppress a device mapping.
- returned: when I(state=present)
- type: bool
- sample: false
- user_data:
- description: The user data available to the instances.
- returned: when I(state=present)
- type: str
- sample: ''
- volume_type:
- description: The volume type (one of standard, io1, gp2).
- returned: when I(state=present)
- type: str
- sample: io1
-security_groups:
- description: The security groups to associate with the instances.
- returned: when I(state=present)
- type: list
- sample:
- - sg-5e27db2f
-
-'''
-
-
-import traceback
-from ansible.module_utils.ec2 import (get_aws_connection_info, ec2_argument_spec, ec2_connect, camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names,
- boto3_conn, snake_dict_to_camel_dict, HAS_BOTO3)
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import AnsibleModule
-
-try:
- import botocore
-except ImportError:
- pass
-
-
-def create_block_device_meta(module, volume):
- if 'snapshot' not in volume and 'ephemeral' not in volume and 'no_device' not in volume:
- if 'volume_size' not in volume:
- module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
- if 'snapshot' in volume:
- if volume.get('volume_type') == 'io1' and 'iops' not in volume:
- module.fail_json(msg='io1 volumes must have an iops value set')
- if 'ephemeral' in volume:
- if 'snapshot' in volume:
- module.fail_json(msg='Cannot set both ephemeral and snapshot')
-
- return_object = {}
-
- if 'ephemeral' in volume:
- return_object['VirtualName'] = volume.get('ephemeral')
-
- if 'device_name' in volume:
- return_object['DeviceName'] = volume.get('device_name')
-
- if 'no_device' in volume:
- return_object['NoDevice'] = volume.get('no_device')
-
- if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'ips', 'encrypted']):
- return_object['Ebs'] = {}
-
- if 'snapshot' in volume:
- return_object['Ebs']['SnapshotId'] = volume.get('snapshot')
-
- if 'volume_size' in volume:
- return_object['Ebs']['VolumeSize'] = int(volume.get('volume_size', 0))
-
- if 'volume_type' in volume:
- return_object['Ebs']['VolumeType'] = volume.get('volume_type')
-
- if 'delete_on_termination' in volume:
- return_object['Ebs']['DeleteOnTermination'] = volume.get('delete_on_termination', False)
-
- if 'iops' in volume:
- return_object['Ebs']['Iops'] = volume.get('iops')
-
- if 'encrypted' in volume:
- return_object['Ebs']['Encrypted'] = volume.get('encrypted')
-
- return return_object
-
-
-def create_launch_config(connection, module):
- name = module.params.get('name')
- vpc_id = module.params.get('vpc_id')
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- ec2_connection = boto3_conn(module, 'client', 'ec2', region, ec2_url, **aws_connect_kwargs)
- security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), ec2_connection, vpc_id=vpc_id, boto3=True)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except ValueError as e:
- module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc())
- user_data = module.params.get('user_data')
- user_data_path = module.params.get('user_data_path')
- volumes = module.params['volumes']
- instance_monitoring = module.params.get('instance_monitoring')
- assign_public_ip = module.params.get('assign_public_ip')
- instance_profile_name = module.params.get('instance_profile_name')
- ebs_optimized = module.params.get('ebs_optimized')
- classic_link_vpc_id = module.params.get('classic_link_vpc_id')
- classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups')
-
- block_device_mapping = []
-
- convert_list = ['image_id', 'instance_type', 'instance_type', 'instance_id', 'placement_tenancy', 'key_name', 'kernel_id', 'ramdisk_id', 'spot_price']
-
- launch_config = (snake_dict_to_camel_dict(dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list)))
-
- if user_data_path:
- try:
- with open(user_data_path, 'r') as user_data_file:
- user_data = user_data_file.read()
- except IOError as e:
- module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc())
-
- if volumes:
- for volume in volumes:
- if 'device_name' not in volume:
- module.fail_json(msg='Device name must be set for volume')
- # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 to be a signal not to create this volume
- if 'volume_size' not in volume or int(volume['volume_size']) > 0:
- block_device_mapping.append(create_block_device_meta(module, volume))
-
- try:
- launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations')
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to describe launch configuration by name", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
- changed = False
- result = {}
-
- launch_config['LaunchConfigurationName'] = name
-
- if security_groups is not None:
- launch_config['SecurityGroups'] = security_groups
-
- if classic_link_vpc_id is not None:
- launch_config['ClassicLinkVPCId'] = classic_link_vpc_id
-
- if instance_monitoring is not None:
- launch_config['InstanceMonitoring'] = {'Enabled': instance_monitoring}
-
- if classic_link_vpc_security_groups is not None:
- launch_config['ClassicLinkVPCSecurityGroups'] = classic_link_vpc_security_groups
-
- if block_device_mapping:
- launch_config['BlockDeviceMappings'] = block_device_mapping
-
- if instance_profile_name is not None:
- launch_config['IamInstanceProfile'] = instance_profile_name
-
- if assign_public_ip is not None:
- launch_config['AssociatePublicIpAddress'] = assign_public_ip
-
- if user_data is not None:
- launch_config['UserData'] = user_data
-
- if ebs_optimized is not None:
- launch_config['EbsOptimized'] = ebs_optimized
-
- if len(launch_configs) == 0:
- try:
- connection.create_launch_configuration(**launch_config)
- launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations')
- changed = True
- if launch_configs:
- launch_config = launch_configs[0]
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to create launch configuration", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
- result = (dict((k, v) for k, v in launch_config.items()
- if k not in ['Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings']))
-
- result['CreatedTime'] = to_text(launch_config.get('CreatedTime'))
-
- try:
- result['InstanceMonitoring'] = module.boolean(launch_config.get('InstanceMonitoring').get('Enabled'))
- except AttributeError:
- result['InstanceMonitoring'] = False
-
- result['BlockDeviceMappings'] = []
-
- for block_device_mapping in launch_config.get('BlockDeviceMappings', []):
- result['BlockDeviceMappings'].append(dict(device_name=block_device_mapping.get('DeviceName'), virtual_name=block_device_mapping.get('VirtualName')))
- if block_device_mapping.get('Ebs') is not None:
- result['BlockDeviceMappings'][-1]['ebs'] = dict(
- snapshot_id=block_device_mapping.get('Ebs').get('SnapshotId'), volume_size=block_device_mapping.get('Ebs').get('VolumeSize'))
-
- if user_data_path:
- result['UserData'] = "hidden" # Otherwise, we dump binary to the user's terminal
-
- return_object = {
- 'Name': result.get('LaunchConfigurationName'),
- 'CreatedTime': result.get('CreatedTime'),
- 'ImageId': result.get('ImageId'),
- 'Arn': result.get('LaunchConfigurationARN'),
- 'SecurityGroups': result.get('SecurityGroups'),
- 'InstanceType': result.get('InstanceType'),
- 'Result': result
- }
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(return_object))
-
-
-def delete_launch_config(connection, module):
- try:
- name = module.params.get('name')
- launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=[name]).get('LaunchConfigurations')
- if launch_configs:
- connection.delete_launch_configuration(LaunchConfigurationName=launch_configs[0].get('LaunchConfigurationName'))
- module.exit_json(changed=True)
- else:
- module.exit_json(changed=False)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Failed to delete launch configuration", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- name=dict(required=True),
- image_id=dict(),
- instance_id=dict(),
- key_name=dict(),
- security_groups=dict(default=[], type='list'),
- user_data=dict(),
- user_data_path=dict(type='path'),
- kernel_id=dict(),
- volumes=dict(type='list'),
- instance_type=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- spot_price=dict(type='float'),
- ramdisk_id=dict(),
- instance_profile_name=dict(),
- ebs_optimized=dict(default=False, type='bool'),
- associate_public_ip_address=dict(type='bool', removed_in_version='2.14'),
- instance_monitoring=dict(default=False, type='bool'),
- assign_public_ip=dict(type='bool'),
- classic_link_vpc_security_groups=dict(type='list'),
- classic_link_vpc_id=dict(),
- vpc_id=dict(),
- placement_tenancy=dict(choices=['default', 'dedicated'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- mutually_exclusive=[['user_data', 'user_data_path']]
- )
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoRegionError:
- module.fail_json(msg=("region must be specified as a parameter in AWS_DEFAULT_REGION environment variable or in boto configuration file"))
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="unable to establish connection - " + str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
- state = module.params.get('state')
-
- if state == 'present':
- create_launch_config(connection, module)
- elif state == 'absent':
- delete_launch_config(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_lc_find.py b/lib/ansible/modules/cloud/amazon/ec2_lc_find.py
deleted file mode 100644
index a972e7f052..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_lc_find.py
+++ /dev/null
@@ -1,217 +0,0 @@
-#!/usr/bin/python
-# encoding: utf-8
-
-# (c) 2015, Jose Armesto <jose@armesto.net>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: ec2_lc_find
-short_description: Find AWS Autoscaling Launch Configurations
-description:
- - Returns list of matching Launch Configurations for a given name, along with other useful information.
- - Results can be sorted and sliced.
- - It depends on boto.
- - Based on the work by Tom Bamford U(https://github.com/tombamford)
-
-version_added: "2.2"
-author: "Jose Armesto (@fiunchinho)"
-options:
- name_regex:
- description:
- - A Launch Configuration to match.
- - It'll be compiled as regex.
- required: True
- type: str
- sort_order:
- description:
- - Order in which to sort results.
- choices: ['ascending', 'descending']
- default: 'ascending'
- type: str
- limit:
- description:
- - How many results to show.
- - Corresponds to Python slice notation like list[:limit].
- type: int
-requirements:
- - "python >= 2.6"
- - boto3
-extends_documentation_fragment:
- - ec2
- - aws
-"""
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Search for the Launch Configurations that start with "app"
-- ec2_lc_find:
- name_regex: app.*
- sort_order: descending
- limit: 2
-'''
-
-RETURN = '''
-image_id:
- description: AMI id
- returned: when Launch Configuration was found
- type: str
- sample: "ami-0d75df7e"
-user_data:
- description: User data used to start instance
- returned: when Launch Configuration was found
- type: str
- sample: "ZXhwb3J0IENMT1VE"
-name:
- description: Name of the Launch Configuration
- returned: when Launch Configuration was found
- type: str
- sample: "myapp-v123"
-arn:
- description: Name of the AMI
- returned: when Launch Configuration was found
- type: str
- sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject"
-instance_type:
- description: Type of ec2 instance
- returned: when Launch Configuration was found
- type: str
- sample: "t2.small"
-created_time:
- description: When it was created
- returned: when Launch Configuration was found
- type: str
- sample: "2016-06-29T14:59:22.222000+00:00"
-ebs_optimized:
- description: Launch Configuration EBS optimized property
- returned: when Launch Configuration was found
- type: bool
- sample: False
-instance_monitoring:
- description: Launch Configuration instance monitoring property
- returned: when Launch Configuration was found
- type: str
- sample: {"Enabled": false}
-classic_link_vpc_security_groups:
- description: Launch Configuration classic link vpc security groups property
- returned: when Launch Configuration was found
- type: list
- sample: []
-block_device_mappings:
- description: Launch Configuration block device mappings property
- returned: when Launch Configuration was found
- type: list
- sample: []
-keyname:
- description: Launch Configuration ssh key
- returned: when Launch Configuration was found
- type: str
- sample: mykey
-security_groups:
- description: Launch Configuration security groups
- returned: when Launch Configuration was found
- type: list
- sample: []
-kernel_id:
- description: Launch Configuration kernel to use
- returned: when Launch Configuration was found
- type: str
- sample: ''
-ram_disk_id:
- description: Launch Configuration ram disk property
- returned: when Launch Configuration was found
- type: str
- sample: ''
-associate_public_address:
- description: Assign public address or not
- returned: when Launch Configuration was found
- type: bool
- sample: True
-...
-'''
-import re
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
-
-
-def find_launch_configs(client, module):
- name_regex = module.params.get('name_regex')
- sort_order = module.params.get('sort_order')
- limit = module.params.get('limit')
-
- paginator = client.get_paginator('describe_launch_configurations')
-
- response_iterator = paginator.paginate(
- PaginationConfig={
- 'MaxItems': 1000,
- 'PageSize': 100
- }
- )
-
- results = []
-
- for response in response_iterator:
- response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),
- response['LaunchConfigurations'])
-
- for lc in response['LaunchConfigurations']:
- data = {
- 'name': lc['LaunchConfigurationName'],
- 'arn': lc['LaunchConfigurationARN'],
- 'created_time': lc['CreatedTime'],
- 'user_data': lc['UserData'],
- 'instance_type': lc['InstanceType'],
- 'image_id': lc['ImageId'],
- 'ebs_optimized': lc['EbsOptimized'],
- 'instance_monitoring': lc['InstanceMonitoring'],
- 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],
- 'block_device_mappings': lc['BlockDeviceMappings'],
- 'keyname': lc['KeyName'],
- 'security_groups': lc['SecurityGroups'],
- 'kernel_id': lc['KernelId'],
- 'ram_disk_id': lc['RamdiskId'],
- 'associate_public_address': lc.get('AssociatePublicIpAddress', False),
- }
-
- results.append(data)
-
- results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))
-
- if limit:
- results = results[:int(limit)]
-
- module.exit_json(changed=False, results=results)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- name_regex=dict(required=True),
- sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),
- limit=dict(required=False, type='int'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- )
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, True)
-
- client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params)
- find_launch_configs(client, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_lc_info.py b/lib/ansible/modules/cloud/amazon/ec2_lc_info.py
deleted file mode 100644
index 7c963224e0..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_lc_info.py
+++ /dev/null
@@ -1,237 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_lc_info
-short_description: Gather information about AWS Autoscaling Launch Configurations.
-description:
- - Gather information about AWS Autoscaling Launch Configurations.
- - This module was called C(ec2_lc_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.3"
-author: "Loïc Latreille (@psykotox)"
-requirements: [ boto3 ]
-options:
- name:
- description:
- - A name or a list of name to match.
- default: []
- type: list
- elements: str
- sort:
- description:
- - Optional attribute which with to sort the results.
- choices: ['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']
- type: str
- sort_order:
- description:
- - Order in which to sort results.
- - Only used when the 'sort' parameter is specified.
- choices: ['ascending', 'descending']
- default: 'ascending'
- type: str
- sort_start:
- description:
- - Which result to start with (when sorting).
- - Corresponds to Python slice notation.
- type: int
- sort_end:
- description:
- - Which result to end with (when sorting).
- - Corresponds to Python slice notation.
- type: int
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all launch configurations
-- ec2_lc_info:
-
-# Gather information about launch configuration with name "example"
-- ec2_lc_info:
- name: example
-
-# Gather information sorted by created_time from most recent to least recent
-- ec2_lc_info:
- sort: created_time
- sort_order: descending
-'''
-
-RETURN = '''
-block_device_mapping:
- description: Block device mapping for the instances of launch configuration
- type: list
- returned: always
- sample: "[{
- 'device_name': '/dev/xvda':,
- 'ebs': {
- 'delete_on_termination': true,
- 'volume_size': 8,
- 'volume_type': 'gp2'
- }]"
-classic_link_vpc_security_groups:
- description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id
- type: str
- returned: always
- sample:
-created_time:
- description: The creation date and time for the launch configuration
- type: str
- returned: always
- sample: "2016-05-27T13:47:44.216000+00:00"
-ebs_optimized:
- description: EBS I/O optimized (true ) or not (false )
- type: bool
- returned: always
- sample: true,
-image_id:
- description: ID of the Amazon Machine Image (AMI)
- type: str
- returned: always
- sample: "ami-12345678"
-instance_monitoring:
- description: Launched with detailed monitoring or not
- type: dict
- returned: always
- sample: "{
- 'enabled': true
- }"
-instance_type:
- description: Instance type
- type: str
- returned: always
- sample: "t2.micro"
-kernel_id:
- description: ID of the kernel associated with the AMI
- type: str
- returned: always
- sample:
-key_name:
- description: Name of the key pair
- type: str
- returned: always
- sample: "user_app"
-launch_configuration_arn:
- description: Amazon Resource Name (ARN) of the launch configuration
- type: str
- returned: always
- sample: "arn:aws:autoscaling:us-east-1:666612345678:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app"
-launch_configuration_name:
- description: Name of the launch configuration
- type: str
- returned: always
- sample: "lc-app"
-ramdisk_id:
- description: ID of the RAM disk associated with the AMI
- type: str
- returned: always
- sample:
-security_groups:
- description: Security groups to associated
- type: list
- returned: always
- sample: "[
- 'web'
- ]"
-user_data:
- description: User data available
- type: str
- returned: always
- sample:
-'''
-
-try:
- import boto3
- from botocore.exceptions import ClientError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
- get_aws_connection_info)
-
-
-def list_launch_configs(connection, module):
-
- launch_config_name = module.params.get("name")
- sort = module.params.get('sort')
- sort_order = module.params.get('sort_order')
- sort_start = module.params.get('sort_start')
- sort_end = module.params.get('sort_end')
-
- try:
- pg = connection.get_paginator('describe_launch_configurations')
- launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result()
- except ClientError as e:
- module.fail_json(msg=e.message)
-
- snaked_launch_configs = []
- for launch_config in launch_configs['LaunchConfigurations']:
- snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config))
-
- for launch_config in snaked_launch_configs:
- if 'CreatedTime' in launch_config:
- launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
-
- if sort:
- snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
-
- if sort and sort_start and sort_end:
- snaked_launch_configs = snaked_launch_configs[sort_start:sort_end]
- elif sort and sort_start:
- snaked_launch_configs = snaked_launch_configs[sort_start:]
- elif sort and sort_end:
- snaked_launch_configs = snaked_launch_configs[:sort_end]
-
- module.exit_json(launch_configurations=snaked_launch_configs)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- name=dict(required=False, default=[], type='list'),
- sort=dict(required=False, default=None,
- choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
- sort_order=dict(required=False, default='ascending',
- choices=['ascending', 'descending']),
- sort_start=dict(required=False, type='int'),
- sort_end=dict(required=False, type='int'),
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec)
- if module._name == 'ec2_lc_facts':
- module.deprecate("The 'ec2_lc_facts' module has been renamed to 'ec2_lc_info'", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
-
- if region:
- connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_params)
- else:
- module.fail_json(msg="region must be specified")
-
- list_launch_configs(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py b/lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py
deleted file mode 100644
index b221ae49e3..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py
+++ /dev/null
@@ -1,410 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
-module: ec2_metric_alarm
-short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
-description:
- - Can create or delete AWS metric alarms.
- - Metrics you wish to alarm on must already exist.
-version_added: "1.6"
-author: "Zacharie Eakin (@Zeekin)"
-options:
- state:
- description:
- - Register or deregister the alarm.
- choices: ['present', 'absent']
- default: 'present'
- type: str
- name:
- description:
- - Unique name for the alarm.
- required: true
- type: str
- metric:
- description:
- - Name of the monitored metric (e.g. C(CPUUtilization)).
- - Metric must already exist.
- required: false
- type: str
- namespace:
- description:
- - Name of the appropriate namespace (C(AWS/EC2), C(System/Linux), etc.), which determines the category it will appear under in cloudwatch.
- required: false
- type: str
- statistic:
- description:
- - Operation applied to the metric.
- - Works in conjunction with I(period) and I(evaluation_periods) to determine the comparison value.
- required: false
- choices: ['SampleCount','Average','Sum','Minimum','Maximum']
- type: str
- comparison:
- description:
- - Determines how the threshold value is compared
- - Symbolic comparison operators have been deprecated, and will be removed in 2.14
- required: false
- type: str
- choices:
- - 'GreaterThanOrEqualToThreshold'
- - 'GreaterThanThreshold'
- - 'LessThanThreshold'
- - 'LessThanOrEqualToThreshold'
- - '<='
- - '<'
- - '>='
- - '>'
- threshold:
- description:
- - Sets the min/max bound for triggering the alarm.
- required: false
- type: float
- period:
- description:
- - The time (in seconds) between metric evaluations.
- required: false
- type: int
- evaluation_periods:
- description:
- - The number of times in which the metric is evaluated before final calculation.
- required: false
- type: int
- unit:
- description:
- - The threshold's unit of measurement.
- required: false
- type: str
- choices:
- - 'Seconds'
- - 'Microseconds'
- - 'Milliseconds'
- - 'Bytes'
- - 'Kilobytes'
- - 'Megabytes'
- - 'Gigabytes'
- - 'Terabytes'
- - 'Bits'
- - 'Kilobits'
- - 'Megabits'
- - 'Gigabits'
- - 'Terabits'
- - 'Percent'
- - 'Count'
- - 'Bytes/Second'
- - 'Kilobytes/Second'
- - 'Megabytes/Second'
- - 'Gigabytes/Second'
- - 'Terabytes/Second'
- - 'Bits/Second'
- - 'Kilobits/Second'
- - 'Megabits/Second'
- - 'Gigabits/Second'
- - 'Terabits/Second'
- - 'Count/Second'
- - 'None'
- description:
- description:
- - A longer description of the alarm.
- required: false
- type: str
- dimensions:
- description:
- - A dictionary describing which metric the alarm is applied to.
- - 'For more information see the AWS documentation:'
- - U(https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension)
- required: false
- type: dict
- alarm_actions:
- description:
- - A list of the names action(s) taken when the alarm is in the C(alarm) status, denoted as Amazon Resource Name(s).
- required: false
- type: list
- elements: str
- insufficient_data_actions:
- description:
- - A list of the names of action(s) to take when the alarm is in the C(insufficient_data) status.
- required: false
- type: list
- elements: str
- ok_actions:
- description:
- - A list of the names of action(s) to take when the alarm is in the C(ok) status, denoted as Amazon Resource Name(s).
- required: false
- type: list
- elements: str
- treat_missing_data:
- description:
- - Sets how the alarm handles missing data points.
- required: false
- type: str
- choices:
- - 'breaching'
- - 'notBreaching'
- - 'ignore'
- - 'missing'
- default: 'missing'
- version_added: "2.10"
-extends_documentation_fragment:
- - aws
- - ec2
-"""
-
-EXAMPLES = '''
- - name: create alarm
- ec2_metric_alarm:
- state: present
- region: ap-southeast-2
- name: "cpu-low"
- metric: "CPUUtilization"
- namespace: "AWS/EC2"
- statistic: Average
- comparison: "LessThanOrEqualToThreshold"
- threshold: 5.0
- period: 300
- evaluation_periods: 3
- unit: "Percent"
- description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
- dimensions: {'InstanceId':'i-XXX'}
- alarm_actions: ["action1","action2"]
-
- - name: Create an alarm to recover a failed instance
- ec2_metric_alarm:
- state: present
- region: us-west-1
- name: "recover-instance"
- metric: "StatusCheckFailed_System"
- namespace: "AWS/EC2"
- statistic: "Minimum"
- comparison: ">="
- threshold: 1.0
- period: 60
- evaluation_periods: 2
- unit: "Count"
- description: "This will recover an instance when it fails"
- dimensions: {"InstanceId":'i-XXX'}
- alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"]
-
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-
-try:
- from botocore.exceptions import ClientError
-except ImportError:
- pass # protected by AnsibleAWSModule
-
-
-def create_metric_alarm(connection, module):
-
- name = module.params.get('name')
- metric = module.params.get('metric')
- namespace = module.params.get('namespace')
- statistic = module.params.get('statistic')
- comparison = module.params.get('comparison')
- threshold = module.params.get('threshold')
- period = module.params.get('period')
- evaluation_periods = module.params.get('evaluation_periods')
- unit = module.params.get('unit')
- description = module.params.get('description')
- dimensions = module.params.get('dimensions')
- alarm_actions = module.params.get('alarm_actions')
- insufficient_data_actions = module.params.get('insufficient_data_actions')
- ok_actions = module.params.get('ok_actions')
- treat_missing_data = module.params.get('treat_missing_data')
-
- warnings = []
-
- alarms = connection.describe_alarms(AlarmNames=[name])
-
- comparisons = {'<=': 'LessThanOrEqualToThreshold',
- '<': 'LessThanThreshold',
- '>=': 'GreaterThanOrEqualToThreshold',
- '>': 'GreaterThanThreshold'}
- if comparison in ('<=', '<', '>', '>='):
- module.deprecate('Using the <=, <, > and >= operators for comparison has been deprecated. Please use LessThanOrEqualToThreshold, '
- 'LessThanThreshold, GreaterThanThreshold or GreaterThanOrEqualToThreshold instead.', version="2.14")
- comparison = comparisons[comparison]
-
- if not isinstance(dimensions, list):
- fixed_dimensions = []
- for key, value in dimensions.items():
- fixed_dimensions.append({'Name': key, 'Value': value})
- dimensions = fixed_dimensions
-
- if not alarms['MetricAlarms']:
- try:
- connection.put_metric_alarm(AlarmName=name,
- MetricName=metric,
- Namespace=namespace,
- Statistic=statistic,
- ComparisonOperator=comparison,
- Threshold=threshold,
- Period=period,
- EvaluationPeriods=evaluation_periods,
- Unit=unit,
- AlarmDescription=description,
- Dimensions=dimensions,
- AlarmActions=alarm_actions,
- InsufficientDataActions=insufficient_data_actions,
- OKActions=ok_actions,
- TreatMissingData=treat_missing_data)
- changed = True
- alarms = connection.describe_alarms(AlarmNames=[name])
- except ClientError as e:
- module.fail_json_aws(e)
-
- else:
- changed = False
- alarm = alarms['MetricAlarms'][0]
-
- # Workaround for alarms created before TreatMissingData was introduced
- if 'TreatMissingData' not in alarm.keys():
- alarm['TreatMissingData'] = 'missing'
-
- for key, value in {'MetricName': metric,
- 'Namespace': namespace,
- 'Statistic': statistic,
- 'ComparisonOperator': comparison,
- 'Threshold': threshold,
- 'Period': period,
- 'EvaluationPeriods': evaluation_periods,
- 'Unit': unit,
- 'AlarmDescription': description,
- 'Dimensions': dimensions,
- 'TreatMissingData': treat_missing_data}.items():
- try:
- if alarm[key] != value:
- changed = True
- except KeyError:
- if value is not None:
- changed = True
-
- alarm[key] = value
-
- for key, value in {'AlarmActions': alarm_actions,
- 'InsufficientDataActions': insufficient_data_actions,
- 'OKActions': ok_actions}.items():
- action = value or []
- if alarm[key] != action:
- changed = True
- alarm[key] = value
-
- try:
- if changed:
- connection.put_metric_alarm(AlarmName=alarm['AlarmName'],
- MetricName=alarm['MetricName'],
- Namespace=alarm['Namespace'],
- Statistic=alarm['Statistic'],
- ComparisonOperator=alarm['ComparisonOperator'],
- Threshold=alarm['Threshold'],
- Period=alarm['Period'],
- EvaluationPeriods=alarm['EvaluationPeriods'],
- Unit=alarm['Unit'],
- AlarmDescription=alarm['AlarmDescription'],
- Dimensions=alarm['Dimensions'],
- AlarmActions=alarm['AlarmActions'],
- InsufficientDataActions=alarm['InsufficientDataActions'],
- OKActions=alarm['OKActions'],
- TreatMissingData=alarm['TreatMissingData'])
- except ClientError as e:
- module.fail_json_aws(e)
-
- result = alarms['MetricAlarms'][0]
- module.exit_json(changed=changed, warnings=warnings,
- name=result['AlarmName'],
- actions_enabled=result['ActionsEnabled'],
- alarm_actions=result['AlarmActions'],
- alarm_arn=result['AlarmArn'],
- comparison=result['ComparisonOperator'],
- description=result['AlarmDescription'],
- dimensions=result['Dimensions'],
- evaluation_periods=result['EvaluationPeriods'],
- insufficient_data_actions=result['InsufficientDataActions'],
- last_updated=result['AlarmConfigurationUpdatedTimestamp'],
- metric=result['MetricName'],
- namespace=result['Namespace'],
- ok_actions=result['OKActions'],
- period=result['Period'],
- state_reason=result['StateReason'],
- state_value=result['StateValue'],
- statistic=result['Statistic'],
- threshold=result['Threshold'],
- treat_missing_data=result['TreatMissingData'],
- unit=result['Unit'])
-
-
-def delete_metric_alarm(connection, module):
- name = module.params.get('name')
- alarms = connection.describe_alarms(AlarmNames=[name])
-
- if alarms['MetricAlarms']:
- try:
- connection.delete_alarms(AlarmNames=[name])
- module.exit_json(changed=True)
- except (ClientError) as e:
- module.fail_json_aws(e)
- else:
- module.exit_json(changed=False)
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True, type='str'),
- metric=dict(type='str'),
- namespace=dict(type='str'),
- statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
- comparison=dict(type='str', choices=['LessThanOrEqualToThreshold', 'LessThanThreshold', 'GreaterThanThreshold',
- 'GreaterThanOrEqualToThreshold', '<=', '<', '>', '>=']),
- threshold=dict(type='float'),
- period=dict(type='int'),
- unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes',
- 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count',
- 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second',
- 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second',
- 'Terabits/Second', 'Count/Second', 'None']),
- evaluation_periods=dict(type='int'),
- description=dict(type='str'),
- dimensions=dict(type='dict', default={}),
- alarm_actions=dict(type='list', default=[]),
- insufficient_data_actions=dict(type='list', default=[]),
- ok_actions=dict(type='list', default=[]),
- treat_missing_data=dict(type='str', choices=['breaching', 'notBreaching', 'ignore', 'missing'], default='missing'),
- state=dict(default='present', choices=['present', 'absent']),
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
-
- state = module.params.get('state')
-
- connection = module.client('cloudwatch')
-
- if state == 'present':
- create_metric_alarm(connection, module)
- elif state == 'absent':
- delete_metric_alarm(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_placement_group.py b/lib/ansible/modules/cloud/amazon/ec2_placement_group.py
deleted file mode 100644
index e59c02171a..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_placement_group.py
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_placement_group
-short_description: Create or delete an EC2 Placement Group
-description:
- - Create an EC2 Placement Group; if the placement group already exists,
- nothing is done. Or, delete an existing placement group. If the placement
- group is absent, do nothing. See also
- U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html)
-version_added: "2.5"
-author: "Brad Macpherson (@iiibrad)"
-options:
- name:
- description:
- - The name for the placement group.
- required: true
- type: str
- state:
- description:
- - Create or delete placement group.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- strategy:
- description:
- - Placement group strategy. Cluster will cluster instances into a
- low-latency group in a single Availability Zone, while Spread spreads
- instances across underlying hardware.
- default: cluster
- choices: [ 'cluster', 'spread' ]
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide
-# for details.
-
-# Create a placement group.
-- ec2_placement_group:
- name: my-cluster
- state: present
-
-# Create a Spread placement group.
-- ec2_placement_group:
- name: my-cluster
- state: present
- strategy: spread
-
-# Delete a placement group.
-- ec2_placement_group:
- name: my-cluster
- state: absent
-
-'''
-
-
-RETURN = '''
-placement_group:
- description: Placement group attributes
- returned: when state != absent
- type: complex
- contains:
- name:
- description: PG name
- type: str
- sample: my-cluster
- state:
- description: PG state
- type: str
- sample: "available"
- strategy:
- description: PG strategy
- type: str
- sample: "cluster"
-
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import AWSRetry
-try:
- from botocore.exceptions import (BotoCoreError, ClientError)
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-@AWSRetry.exponential_backoff()
-def get_placement_group_details(connection, module):
- name = module.params.get("name")
- try:
- response = connection.describe_placement_groups(
- Filters=[{
- "Name": "group-name",
- "Values": [name]
- }])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(
- e,
- msg="Couldn't find placement group named [%s]" % name)
-
- if len(response['PlacementGroups']) != 1:
- return None
- else:
- placement_group = response['PlacementGroups'][0]
- return {
- "name": placement_group['GroupName'],
- "state": placement_group['State'],
- "strategy": placement_group['Strategy'],
- }
-
-
-@AWSRetry.exponential_backoff()
-def create_placement_group(connection, module):
- name = module.params.get("name")
- strategy = module.params.get("strategy")
-
- try:
- connection.create_placement_group(
- GroupName=name, Strategy=strategy, DryRun=module.check_mode)
- except (BotoCoreError, ClientError) as e:
- if e.response['Error']['Code'] == "DryRunOperation":
- module.exit_json(changed=True, placement_group={
- "name": name,
- "state": 'DryRun',
- "strategy": strategy,
- })
- module.fail_json_aws(
- e,
- msg="Couldn't create placement group [%s]" % name)
-
- module.exit_json(changed=True,
- placement_group=get_placement_group_details(
- connection, module
- ))
-
-
-@AWSRetry.exponential_backoff()
-def delete_placement_group(connection, module):
- name = module.params.get("name")
-
- try:
- connection.delete_placement_group(
- GroupName=name, DryRun=module.check_mode)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(
- e,
- msg="Couldn't delete placement group [%s]" % name)
-
- module.exit_json(changed=True)
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True, type='str'),
- state=dict(default='present', choices=['present', 'absent']),
- strategy=dict(default='cluster', choices=['cluster', 'spread'])
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- connection = module.client('ec2')
-
- state = module.params.get("state")
-
- if state == 'present':
- placement_group = get_placement_group_details(connection, module)
- if placement_group is None:
- create_placement_group(connection, module)
- else:
- strategy = module.params.get("strategy")
- if placement_group['strategy'] == strategy:
- module.exit_json(
- changed=False, placement_group=placement_group)
- else:
- name = module.params.get("name")
- module.fail_json(
- msg=("Placement group '{}' exists, can't change strategy" +
- " from '{}' to '{}'").format(
- name,
- placement_group['strategy'],
- strategy))
-
- elif state == 'absent':
- placement_group = get_placement_group_details(connection, module)
- if placement_group is None:
- module.exit_json(changed=False)
- else:
- delete_placement_group(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_placement_group_info.py b/lib/ansible/modules/cloud/amazon/ec2_placement_group_info.py
deleted file mode 100644
index 84cf784a71..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_placement_group_info.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_placement_group_info
-short_description: List EC2 Placement Group(s) details
-description:
- - List details of EC2 Placement Group(s).
- - This module was called C(ec2_placement_group_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.5"
-author: "Brad Macpherson (@iiibrad)"
-options:
- names:
- description:
- - A list of names to filter on. If a listed group does not exist, there
- will be no corresponding entry in the result; no error will be raised.
- type: list
- elements: str
- required: false
- default: []
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details or the AWS region,
-# see the AWS Guide for details.
-
-# List all placement groups.
-- ec2_placement_group_info:
- register: all_ec2_placement_groups
-
-# List two placement groups.
-- ec2_placement_group_info:
- names:
- - my-cluster
- - my-other-cluster
- register: specific_ec2_placement_groups
-
-- debug: msg="{{ specific_ec2_placement_groups | json_query(\"[?name=='my-cluster']\") }}"
-
-'''
-
-
-RETURN = '''
-placement_groups:
- description: Placement group attributes
- returned: always
- type: complex
- contains:
- name:
- description: PG name
- type: str
- sample: my-cluster
- state:
- description: PG state
- type: str
- sample: "available"
- strategy:
- description: PG strategy
- type: str
- sample: "cluster"
-
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-try:
- from botocore.exceptions import (BotoCoreError, ClientError)
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def get_placement_groups_details(connection, module):
- names = module.params.get("names")
- try:
- if len(names) > 0:
- response = connection.describe_placement_groups(
- Filters=[{
- "Name": "group-name",
- "Values": names
- }])
- else:
- response = connection.describe_placement_groups()
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(
- e,
- msg="Couldn't find placement groups named [%s]" % names)
-
- results = []
- for placement_group in response['PlacementGroups']:
- results.append({
- "name": placement_group['GroupName'],
- "state": placement_group['State'],
- "strategy": placement_group['Strategy'],
- })
- return results
-
-
-def main():
- argument_spec = dict(
- names=dict(type='list', default=[])
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
- if module._module._name == 'ec2_placement_group_facts':
- module._module.deprecate("The 'ec2_placement_group_facts' module has been renamed to 'ec2_placement_group_info'", version='2.13')
-
- connection = module.client('ec2')
-
- placement_groups = get_placement_groups_details(connection, module)
- module.exit_json(changed=False, placement_groups=placement_groups)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py b/lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py
deleted file mode 100644
index 42241882ad..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
-module: ec2_scaling_policy
-short_description: Create or delete AWS scaling policies for Autoscaling groups
-description:
- - Can create or delete scaling policies for autoscaling groups.
- - Referenced autoscaling groups must already exist.
-version_added: "1.6"
-author: "Zacharie Eakin (@Zeekin)"
-options:
- state:
- description:
- - Register or deregister the policy.
- default: present
- choices: ['present', 'absent']
- type: str
- name:
- description:
- - Unique name for the scaling policy.
- required: true
- type: str
- asg_name:
- description:
- - Name of the associated autoscaling group.
- required: true
- type: str
- adjustment_type:
- description:
- - The type of change in capacity of the autoscaling group.
- choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']
- type: str
- scaling_adjustment:
- description:
- - The amount by which the autoscaling group is adjusted by the policy.
- type: int
- min_adjustment_step:
- description:
- - Minimum amount of adjustment when policy is triggered.
- type: int
- cooldown:
- description:
- - The minimum period of time (in seconds) between which autoscaling actions can take place.
- type: int
-extends_documentation_fragment:
- - aws
- - ec2
-"""
-
-EXAMPLES = '''
-- ec2_scaling_policy:
- state: present
- region: US-XXX
- name: "scaledown-policy"
- adjustment_type: "ChangeInCapacity"
- asg_name: "slave-pool"
- scaling_adjustment: -1
- min_adjustment_step: 1
- cooldown: 300
-'''
-
-try:
- import boto.ec2.autoscale
- import boto.exception
- from boto.ec2.autoscale import ScalingPolicy
- from boto.exception import BotoServerError
-except ImportError:
- pass # Taken care of by ec2.HAS_BOTO
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec,
- get_aws_connection_info)
-
-
-def create_scaling_policy(connection, module):
- sp_name = module.params.get('name')
- adjustment_type = module.params.get('adjustment_type')
- asg_name = module.params.get('asg_name')
- scaling_adjustment = module.params.get('scaling_adjustment')
- min_adjustment_step = module.params.get('min_adjustment_step')
- cooldown = module.params.get('cooldown')
-
- scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])
-
- if not scalingPolicies:
- sp = ScalingPolicy(
- name=sp_name,
- adjustment_type=adjustment_type,
- as_name=asg_name,
- scaling_adjustment=scaling_adjustment,
- min_adjustment_step=min_adjustment_step,
- cooldown=cooldown)
-
- try:
- connection.create_scaling_policy(sp)
- policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0]
- module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
- cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
- except BotoServerError as e:
- module.fail_json(msg=str(e))
- else:
- policy = scalingPolicies[0]
- changed = False
-
- # min_adjustment_step attribute is only relevant if the adjustment_type
- # is set to percentage change in capacity, so it is a special case
- if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity':
- if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'):
- changed = True
-
- # set the min adjustment step in case the user decided to change their
- # adjustment type to percentage
- setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
-
- # check the remaining attributes
- for attr in ('adjustment_type', 'scaling_adjustment', 'cooldown'):
- if getattr(policy, attr) != module.params.get(attr):
- changed = True
- setattr(policy, attr, module.params.get(attr))
-
- try:
- if changed:
- connection.create_scaling_policy(policy)
- policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0]
- module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
- cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
- except BotoServerError as e:
- module.fail_json(msg=str(e))
-
-
-def delete_scaling_policy(connection, module):
- sp_name = module.params.get('name')
- asg_name = module.params.get('asg_name')
-
- scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])
-
- if scalingPolicies:
- try:
- connection.delete_policy(sp_name, asg_name)
- module.exit_json(changed=True)
- except BotoServerError as e:
- module.exit_json(changed=False, msg=str(e))
- else:
- module.exit_json(changed=False)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- name=dict(required=True, type='str'),
- adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']),
- asg_name=dict(required=True, type='str'),
- scaling_adjustment=dict(type='int'),
- min_adjustment_step=dict(type='int'),
- cooldown=dict(type='int'),
- state=dict(default='present', choices=['present', 'absent']),
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec)
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
-
- state = module.params.get('state')
-
- try:
- connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- module.fail_json(msg=str(e))
-
- if state == 'present':
- create_scaling_policy(connection, module)
- elif state == 'absent':
- delete_scaling_policy(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_snapshot_copy.py b/lib/ansible/modules/cloud/amazon/ec2_snapshot_copy.py
deleted file mode 100644
index 1746a6f2ee..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_snapshot_copy.py
+++ /dev/null
@@ -1,201 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2017, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-
-DOCUMENTATION = '''
----
-module: ec2_snapshot_copy
-short_description: Copies an EC2 snapshot and returns the new Snapshot ID.
-description:
- - Copies an EC2 Snapshot from a source region to a destination region.
-version_added: "2.4"
-options:
- source_region:
- description:
- - The source region the Snapshot should be copied from.
- required: true
- type: str
- source_snapshot_id:
- description:
- - The ID of the Snapshot in source region that should be copied.
- required: true
- type: str
- description:
- description:
- - An optional human-readable string describing purpose of the new Snapshot.
- type: str
- encrypted:
- description:
- - Whether or not the destination Snapshot should be encrypted.
- type: bool
- default: 'no'
- kms_key_id:
- description:
- - KMS key id used to encrypt snapshot. If not specified, AWS defaults to C(alias/aws/ebs).
- type: str
- wait:
- description:
- - Wait for the copied Snapshot to be in 'Available' state before returning.
- type: bool
- default: 'no'
- wait_timeout:
- version_added: "2.6"
- description:
- - How long before wait gives up, in seconds.
- default: 600
- type: int
- tags:
- description:
- - A hash/dictionary of tags to add to the new Snapshot; '{"key":"value"}' and '{"key":"value","key":"value"}'
- type: dict
-author: Deepak Kothandan (@Deepakkothandan) <deepak.kdy@gmail.com>
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - boto3
-'''
-
-EXAMPLES = '''
-# Basic Snapshot Copy
-- ec2_snapshot_copy:
- source_region: eu-central-1
- region: eu-west-1
- source_snapshot_id: snap-xxxxxxx
-
-# Copy Snapshot and wait until available
-- ec2_snapshot_copy:
- source_region: eu-central-1
- region: eu-west-1
- source_snapshot_id: snap-xxxxxxx
- wait: yes
- wait_timeout: 1200 # Default timeout is 600
- register: snapshot_id
-
-# Tagged Snapshot copy
-- ec2_snapshot_copy:
- source_region: eu-central-1
- region: eu-west-1
- source_snapshot_id: snap-xxxxxxx
- tags:
- Name: Snapshot-Name
-
-# Encrypted Snapshot copy
-- ec2_snapshot_copy:
- source_region: eu-central-1
- region: eu-west-1
- source_snapshot_id: snap-xxxxxxx
- encrypted: yes
-
-# Encrypted Snapshot copy with specified key
-- ec2_snapshot_copy:
- source_region: eu-central-1
- region: eu-west-1
- source_snapshot_id: snap-xxxxxxx
- encrypted: yes
- kms_key_id: arn:aws:kms:eu-central-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
-'''
-
-RETURN = '''
-snapshot_id:
- description: snapshot id of the newly created snapshot
- returned: when snapshot copy is successful
- type: str
- sample: "snap-e9095e8c"
-'''
-
-import traceback
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info, camel_dict_to_snake_dict)
-from ansible.module_utils._text import to_native
-
-try:
- import boto3
- from botocore.exceptions import ClientError, WaiterError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-
-def copy_snapshot(module, ec2):
- """
- Copies an EC2 Snapshot to another region
-
- module : AnsibleModule object
- ec2: ec2 connection object
- """
-
- params = {
- 'SourceRegion': module.params.get('source_region'),
- 'SourceSnapshotId': module.params.get('source_snapshot_id'),
- 'Description': module.params.get('description')
- }
-
- if module.params.get('encrypted'):
- params['Encrypted'] = True
-
- if module.params.get('kms_key_id'):
- params['KmsKeyId'] = module.params.get('kms_key_id')
-
- try:
- snapshot_id = ec2.copy_snapshot(**params)['SnapshotId']
- if module.params.get('wait'):
- delay = 15
- # Add one to max_attempts as wait() increment
- # its counter before assessing it for time.sleep()
- max_attempts = (module.params.get('wait_timeout') // delay) + 1
- ec2.get_waiter('snapshot_completed').wait(
- SnapshotIds=[snapshot_id],
- WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts)
- )
- if module.params.get('tags'):
- ec2.create_tags(
- Resources=[snapshot_id],
- Tags=[{'Key': k, 'Value': v} for k, v in module.params.get('tags').items()]
- )
-
- except WaiterError as we:
- module.fail_json(msg='An error occurred waiting for the snapshot to become available. (%s)' % str(we), exception=traceback.format_exc())
- except ClientError as ce:
- module.fail_json(msg=str(ce), exception=traceback.format_exc(), **camel_dict_to_snake_dict(ce.response))
-
- module.exit_json(changed=True, snapshot_id=snapshot_id)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- source_region=dict(required=True),
- source_snapshot_id=dict(required=True),
- description=dict(default=''),
- encrypted=dict(type='bool', default=False, required=False),
- kms_key_id=dict(type='str', required=False),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=600),
- tags=dict(type='dict')))
-
- module = AnsibleModule(argument_spec=argument_spec)
-
- if not HAS_BOTO3:
- module.fail_json(msg='botocore and boto3 are required.')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- client = boto3_conn(module, conn_type='client', resource='ec2',
- region=region, endpoint=ec2_url, **aws_connect_kwargs)
-
- copy_snapshot(module, client)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_transit_gateway.py b/lib/ansible/modules/cloud/amazon/ec2_transit_gateway.py
deleted file mode 100644
index 69eadef0e8..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_transit_gateway.py
+++ /dev/null
@@ -1,578 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
-module: ec2_transit_gateway
-short_description: Create and delete AWS Transit Gateways
-description:
- - Creates AWS Transit Gateways.
- - Deletes AWS Transit Gateways.
- - Updates tags on existing transit gateways.
-version_added: "2.8"
-requirements: [ 'botocore', 'boto3' ]
-options:
- asn:
- description:
- - A private Autonomous System Number (ASN) for the Amazon side of a BGP session.
- - The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs.
- type: int
- auto_associate:
- description:
- - Enable or disable automatic association with the default association route table.
- default: true
- type: bool
- auto_attach:
- description:
- - Enable or disable automatic acceptance of attachment requests.
- default: false
- type: bool
- auto_propagate:
- description:
- - Enable or disable automatic propagation of routes to the default propagation route table.
- default: true
- type: bool
- description:
- description:
- - The description of the transit gateway.
- type: str
- dns_support:
- description:
- - Whether to enable AWS DNS support.
- default: true
- type: bool
- purge_tags:
- description:
- - Whether to purge existing tags not included with tags argument.
- default: true
- type: bool
- state:
- description:
- - C(present) to ensure resource is created.
- - C(absent) to remove resource.
- default: present
- choices: [ "present", "absent"]
- type: str
- tags:
- description:
- - A dictionary of resource tags
- type: dict
- transit_gateway_id:
- description:
- - The ID of the transit gateway.
- type: str
- vpn_ecmp_support:
- description:
- - Enable or disable Equal Cost Multipath Protocol support.
- default: true
- type: bool
- wait:
- description:
- - Whether to wait for status
- default: true
- type: bool
- wait_timeout:
- description:
- - number of seconds to wait for status
- default: 300
- type: int
-
-author: "Bob Boldin (@BobBoldin)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Create a new transit gateway using defaults
- ec2_transit_gateway:
- state: present
- region: us-east-1
- description: personal-testing
- register: created_tgw
-
-- name: Create a new transit gateway with options
- ec2_transit_gateway:
- asn: 64514
- auto_associate: no
- auto_propagate: no
- dns_support: True
- description: "nonprod transit gateway"
- purge_tags: False
- state: present
- region: us-east-1
- tags:
- Name: nonprod transit gateway
- status: testing
-
-- name: Remove a transit gateway by description
- ec2_transit_gateway:
- state: absent
- region: us-east-1
- description: personal-testing
-
-- name: Remove a transit gateway by id
- ec2_transit_gateway:
- state: absent
- region: ap-southeast-2
- transit_gateway_id: tgw-3a9aa123
- register: deleted_tgw
-'''
-
-RETURN = '''
-transit_gateway:
- description: The attributes of the transit gateway.
- type: complex
- returned: I(state=present)
- contains:
- creation_time:
- description: The creation time of the transit gateway.
- returned: always
- type: str
- sample: "2019-03-06T17:13:51+00:00"
- description:
- description: The description of the transit gateway.
- returned: always
- type: str
- sample: my test tgw
- options:
- description: The options attributes of the transit gateway
- returned: always
- type: complex
- contains:
- amazon_side_asn:
- description:
- - A private Autonomous System Number (ASN) for the Amazon side of a BGP session.
- The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs.
- returned: always
- type: str
- sample: 64512
- auto_accept_shared_attachements:
- description: Indicates whether attachment requests are automatically accepted.
- returned: always
- type: str
- sample: disable
- default_route_table_association:
- description:
- - Indicates whether resource attachments are automatically
- associated with the default association route table.
- returned: always
- type: str
- sample: enable
- association_default_route_table_id:
- description: The ID of the default association route table.
- returned: Iwhen exists
- type: str
- sample: tgw-rtb-abc123444
- default_route_table_propagation:
- description:
- - Indicates whether resource attachments automatically
- propagate routes to the default propagation route table.
- returned: always
- type: str
- sample: disable
- propagation_default_route_table_id:
- description: The ID of the default propagation route table.
- returned: when exists
- type: str
- sample: tgw-rtb-def456777
- vpn_ecmp_support:
- description: Indicates whether Equal Cost Multipath Protocol support is enabled.
- returned: always
- type: str
- sample: enable
- dns_support:
- description: Indicates whether DNS support is enabled.
- returned: always
- type: str
- sample: enable
- owner_id:
- description: The account that owns the transit gateway.
- returned: always
- type: str
- sample: '123456789012'
- state:
- description: The state of the transit gateway.
- returned: always
- type: str
- sample: pending
- tags:
- description: A dictionary of resource tags
- returned: always
- type: dict
- sample:
- tags:
- Name: nonprod_tgw
- transit_gateway_arn:
- description: The ID of the transit_gateway.
- returned: always
- type: str
- sample: tgw-3a9aa123
- transit_gateway_id:
- description: The ID of the transit_gateway.
- returned: always
- type: str
- sample: tgw-3a9aa123
-'''
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except Exception:
- pass
- # handled by imported AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from time import sleep, time
-from ansible.module_utils._text import to_text
-from ansible.module_utils.ec2 import (
- ansible_dict_to_boto3_tag_list,
- ansible_dict_to_boto3_filter_list,
- AWSRetry,
- boto3_tag_list_to_ansible_dict,
- camel_dict_to_snake_dict,
- compare_aws_tags
-)
-
-
-class AnsibleEc2Tgw(object):
-
- def __init__(self, module, results):
- self._module = module
- self._results = results
- self._connection = self._module.client('ec2')
- self._check_mode = self._module.check_mode
-
- if not hasattr(self._connection, 'describe_transit_gateways'):
- self._module.fail_json(msg='transit gateway module requires boto3 >= 1.9.52')
-
- def process(self):
- """ Process the request based on state parameter .
- state = present will search for an existing tgw based and return the object data.
- if no object is found it will be created
-
- state = absent will attempt to remove the tgw however will fail if it still has
- attachments or associations
- """
- description = self._module.params.get('description')
- state = self._module.params.get('state', 'present')
- tgw_id = self._module.params.get('transit_gateway_id')
-
- if state == 'present':
- self.ensure_tgw_present(tgw_id, description)
- elif state == 'absent':
- self.ensure_tgw_absent(tgw_id, description)
-
- def wait_for_status(self, wait_timeout, tgw_id, status, skip_deleted=True):
- """
- Wait for the Transit Gateway to reach the specified status
- :param wait_timeout: Number of seconds to wait, until this timeout is reached.
- :param tgw_id: The Amazon nat id.
- :param status: The status to wait for.
- examples. status=available, status=deleted
- :param skip_deleted: ignore deleted transit gateways
- :return dict: transit gateway object
- """
- polling_increment_secs = 5
- wait_timeout = time() + wait_timeout
- status_achieved = False
- transit_gateway = dict()
-
- while wait_timeout > time():
- try:
- transit_gateway = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=skip_deleted)
-
- if transit_gateway:
- if self._check_mode:
- transit_gateway['state'] = status
-
- if transit_gateway.get('state') == status:
- status_achieved = True
- break
-
- elif transit_gateway.get('state') == 'failed':
- break
-
- else:
- sleep(polling_increment_secs)
-
- except ClientError as e:
- self._module.fail_json_aws(e)
-
- if not status_achieved:
- self._module.fail_json(
- msg="Wait time out reached, while waiting for results")
-
- return transit_gateway
-
- def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True):
- """ search for an existing tgw by either tgw_id or description
- :param tgw_id: The AWS id of the transit gateway
- :param description: The description of the transit gateway.
- :param skip_deleted: ignore deleted transit gateways
- :return dict: transit gateway object
- """
- filters = []
- if tgw_id:
- filters = ansible_dict_to_boto3_filter_list({'transit-gateway-id': tgw_id})
-
- try:
- response = AWSRetry.exponential_backoff()(self._connection.describe_transit_gateways)(Filters=filters)
- except (ClientError, BotoCoreError) as e:
- self._module.fail_json_aws(e)
-
- tgw = None
- tgws = []
-
- if len(response.get('TransitGateways', [])) == 1 and tgw_id:
- if (response['TransitGateways'][0]['State'] != 'deleted') or not skip_deleted:
- tgws.extend(response['TransitGateways'])
-
- for gateway in response.get('TransitGateways', []):
- if description == gateway['Description'] and gateway['State'] != 'deleted':
- tgws.append(gateway)
-
- if len(tgws) > 1:
- self._module.fail_json(
- msg='EC2 returned more than one transit Gateway for description {0}, aborting'.format(description))
- elif tgws:
- tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=['Tags'])
- tgw['tags'] = boto3_tag_list_to_ansible_dict(tgws[0]['Tags'])
-
- return tgw
-
- @staticmethod
- def enable_option_flag(flag):
- disabled = "disable"
- enabled = "enable"
- if flag:
- return enabled
- return disabled
-
- def create_tgw(self, description):
- """
- Create a transit gateway and optionally wait for status to become available.
-
- :param description: The description of the transit gateway.
- :return dict: transit gateway object
- """
- options = dict()
- wait = self._module.params.get('wait')
- wait_timeout = self._module.params.get('wait_timeout')
-
- if self._module.params.get('asn'):
- options['AmazonSideAsn'] = self._module.params.get('asn')
-
- options['AutoAcceptSharedAttachments'] = self.enable_option_flag(self._module.params.get('auto_attach'))
- options['DefaultRouteTableAssociation'] = self.enable_option_flag(self._module.params.get('auto_associate'))
- options['DefaultRouteTablePropagation'] = self.enable_option_flag(self._module.params.get('auto_propagate'))
- options['VpnEcmpSupport'] = self.enable_option_flag(self._module.params.get('vpn_ecmp_support'))
- options['DnsSupport'] = self.enable_option_flag(self._module.params.get('dns_support'))
-
- try:
- response = self._connection.create_transit_gateway(Description=description, Options=options)
- except (ClientError, BotoCoreError) as e:
- self._module.fail_json_aws(e)
-
- tgw_id = response['TransitGateway']['TransitGatewayId']
-
- if wait:
- result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="available")
- else:
- result = self.get_matching_tgw(tgw_id=tgw_id)
-
- self._results['msg'] = (' Transit gateway {0} created'.format(result['transit_gateway_id']))
-
- return result
-
- def delete_tgw(self, tgw_id):
- """
- De;lete the transit gateway and optionally wait for status to become deleted
-
- :param tgw_id: The id of the transit gateway
- :return dict: transit gateway object
- """
- wait = self._module.params.get('wait')
- wait_timeout = self._module.params.get('wait_timeout')
-
- try:
- response = self._connection.delete_transit_gateway(TransitGatewayId=tgw_id)
- except (ClientError, BotoCoreError) as e:
- self._module.fail_json_aws(e)
-
- if wait:
- result = self.wait_for_status(wait_timeout=wait_timeout, tgw_id=tgw_id, status="deleted", skip_deleted=False)
- else:
- result = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=False)
-
- self._results['msg'] = (' Transit gateway {0} deleted'.format(tgw_id))
-
- return result
-
- def ensure_tags(self, tgw_id, tags, purge_tags):
- """
- Ensures tags are applied to the transit gateway. Optionally will remove any
- existing tags not in the tags argument if purge_tags is set to true
-
- :param tgw_id: The AWS id of the transit gateway
- :param tags: list of tags to apply to the transit gateway.
- :param purge_tags: when true existing tags not in tags parms are removed
- :return: true if tags were updated
- """
- tags_changed = False
- filters = ansible_dict_to_boto3_filter_list({'resource-id': tgw_id})
- try:
- cur_tags = self._connection.describe_tags(Filters=filters)
- except (ClientError, BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="Couldn't describe tags")
-
- to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
-
- if to_update:
- try:
- if not self._check_mode:
- AWSRetry.exponential_backoff()(self._connection.create_tags)(
- Resources=[tgw_id],
- Tags=ansible_dict_to_boto3_tag_list(to_update)
- )
- self._results['changed'] = True
- tags_changed = True
- except (ClientError, BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="Couldn't create tags {0} for resource {1}".format(
- ansible_dict_to_boto3_tag_list(to_update), tgw_id))
-
- if to_delete:
- try:
- if not self._check_mode:
- tags_list = []
- for key in to_delete:
- tags_list.append({'Key': key})
-
- AWSRetry.exponential_backoff()(self._connection.delete_tags)(
- Resources=[tgw_id],
- Tags=tags_list
- )
- self._results['changed'] = True
- tags_changed = True
- except (ClientError, BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="Couldn't delete tags {0} for resource {1}".format(
- ansible_dict_to_boto3_tag_list(to_delete), tgw_id))
-
- return tags_changed
-
- def ensure_tgw_present(self, tgw_id=None, description=None):
- """
- Will create a tgw if no match to the tgw_id or description are found
- Will update the tgw tags if matching one found but tags are not synced
-
- :param tgw_id: The AWS id of the transit gateway
- :param description: The description of the transit gateway.
- :return dict: transit gateway object
- """
- tgw = self.get_matching_tgw(tgw_id, description)
-
- if tgw is None:
- if self._check_mode:
- self._results['changed'] = True
- self._results['transit_gateway_id'] = None
- return self._results
-
- try:
- if not description:
- self._module.fail_json(msg="Failed to create Transit Gateway: description argument required")
- tgw = self.create_tgw(description)
- self._results['changed'] = True
- except (BotoCoreError, ClientError) as e:
- self._module.fail_json_aws(e, msg='Unable to create Transit Gateway')
-
- if self._module.params.get('tags') != tgw.get('tags'):
- stringed_tags_dict = dict((to_text(k), to_text(v)) for k, v in self._module.params.get('tags').items())
- if self.ensure_tags(tgw['transit_gateway_id'], stringed_tags_dict, self._module.params.get('purge_tags')):
- self._results['changed'] = True
-
- self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id'])
-
- return self._results
-
- def ensure_tgw_absent(self, tgw_id=None, description=None):
- """
- Will delete the tgw if a single tgw is found not yet in deleted status
-
- :param tgw_id: The AWS id of the transit gateway
- :param description: The description of the transit gateway.
- :return doct: transit gateway object
- """
- self._results['transit_gateway_id'] = None
- tgw = self.get_matching_tgw(tgw_id, description)
-
- if tgw is not None:
- if self._check_mode:
- self._results['changed'] = True
- return self._results
-
- try:
- tgw = self.delete_tgw(tgw_id=tgw['transit_gateway_id'])
- self._results['changed'] = True
- self._results['transit_gateway'] = self.get_matching_tgw(tgw_id=tgw['transit_gateway_id'],
- skip_deleted=False)
- except (BotoCoreError, ClientError) as e:
- self._module.fail_json_aws(e, msg='Unable to delete Transit Gateway')
-
- return self._results
-
-
-def setup_module_object():
- """
- merge argument spec and create Ansible module object
- :return: Ansible module object
- """
-
- argument_spec = dict(
- asn=dict(type='int'),
- auto_associate=dict(type='bool', default='yes'),
- auto_attach=dict(type='bool', default='no'),
- auto_propagate=dict(type='bool', default='yes'),
- description=dict(type='str'),
- dns_support=dict(type='bool', default='yes'),
- purge_tags=dict(type='bool', default='yes'),
- state=dict(default='present', choices=['present', 'absent']),
- tags=dict(default=dict(), type='dict'),
- transit_gateway_id=dict(type='str'),
- vpn_ecmp_support=dict(type='bool', default='yes'),
- wait=dict(type='bool', default='yes'),
- wait_timeout=dict(type='int', default=300)
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- required_one_of=[('description', 'transit_gateway_id')],
- supports_check_mode=True,
- )
-
- return module
-
-
-def main():
-
- module = setup_module_object()
-
- results = dict(
- changed=False
- )
-
- tgw_manager = AnsibleEc2Tgw(module=module, results=results)
- tgw_manager.process()
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_transit_gateway_info.py b/lib/ansible/modules/cloud/amazon/ec2_transit_gateway_info.py
deleted file mode 100644
index 94f86ec11e..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_transit_gateway_info.py
+++ /dev/null
@@ -1,268 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'supported_by': 'community',
- 'status': ['preview']
-}
-
-DOCUMENTATION = '''
-module: ec2_transit_gateway_info
-short_description: Gather information about ec2 transit gateways in AWS
-description:
- - Gather information about ec2 transit gateways in AWS
-version_added: "2.8"
-author: "Bob Boldin (@BobBoldin)"
-requirements:
- - botocore
- - boto3
-options:
- transit_gateway_ids:
- description:
- - A list of transit gateway IDs to gather information for.
- version_added: "2.8"
- aliases: [transit_gateway_id]
- type: list
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGateways.html) for filters.
- type: dict
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather info about all transit gateways
-- ec2_transit_gateway_info:
-
-# Gather info about a particular transit gateway using filter transit gateway ID
-- ec2_transit_gateway_info:
- filters:
- transit-gateway-id: tgw-02c42332e6b7da829
-
-# Gather info about a particular transit gateway using multiple option filters
-- ec2_transit_gateway_info:
- filters:
- options.dns-support: enable
- options.vpn-ecmp-support: enable
-
-# Gather info about multiple transit gateways using module param
-- ec2_transit_gateway_info:
- transit_gateway_ids:
- - tgw-02c42332e6b7da829
- - tgw-03c53443d5a8cb716
-'''
-
-RETURN = '''
-transit_gateways:
- description: >
- Transit gateways that match the provided filters. Each element consists of a dict with all the information
- related to that transit gateway.
- returned: on success
- type: complex
- contains:
- creation_time:
- description: The creation time.
- returned: always
- type: str
- sample: "2019-02-05T16:19:58+00:00"
- description:
- description: The description of the transit gateway.
- returned: always
- type: str
- sample: "A transit gateway"
- options:
- description: A dictionary of the transit gateway options.
- returned: always
- type: complex
- contains:
- amazon_side_asn:
- description:
- - A private Autonomous System Number (ASN) for the Amazon
- side of a BGP session. The range is 64512 to 65534 for
- 16-bit ASNs and 4200000000 to 4294967294 for 32-bit ASNs.
- returned: always
- type: int
- sample: 64512
- auto_accept_shared_attachments:
- description:
- - Indicates whether attachment requests are automatically accepted.
- returned: always
- type: str
- sample: "enable"
- default_route_table_association:
- description:
- - Indicates whether resource attachments are automatically
- associated with the default association route table.
- returned: always
- type: str
- sample: "disable"
- association_default_route_table_id:
- description:
- - The ID of the default association route table.
- returned: when present
- type: str
- sample: "rtb-11223344"
- default_route_table_propagation:
- description:
- - Indicates whether resource attachments automatically
- propagate routes to the default propagation route table.
- returned: always
- type: str
- sample: "disable"
- dns_support:
- description:
- - Indicates whether DNS support is enabled.
- returned: always
- type: str
- sample: "enable"
- propagation_default_route_table_id:
- description:
- - The ID of the default propagation route table.
- returned: when present
- type: str
- sample: "rtb-11223344"
- vpn_ecmp_support:
- description:
- - Indicates whether Equal Cost Multipath Protocol support
- is enabled.
- returned: always
- type: str
- sample: "enable"
- owner_id:
- description: The AWS account number ID which owns the transit gateway.
- returned: always
- type: str
- sample: "1234567654323"
- state:
- description: The state of the transit gateway.
- returned: always
- type: str
- sample: "available"
- tags:
- description: A dict of tags associated with the transit gateway.
- returned: always
- type: dict
- sample: '{
- "Name": "A sample TGW"
- }'
- transit_gateway_arn:
- description: The Amazon Resource Name (ARN) of the transit gateway.
- returned: always
- type: str
- sample: "arn:aws:ec2:us-west-2:1234567654323:transit-gateway/tgw-02c42332e6b7da829"
- transit_gateway_id:
- description: The ID of the transit gateway.
- returned: always
- type: str
- sample: "tgw-02c42332e6b7da829"
-'''
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except Exception:
- pass
- # handled by imported AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (
- AWSRetry,
- boto3_tag_list_to_ansible_dict,
- camel_dict_to_snake_dict,
- ansible_dict_to_boto3_filter_list
-)
-
-
-class AnsibleEc2TgwInfo(object):
-
- def __init__(self, module, results):
- self._module = module
- self._results = results
- self._connection = self._module.client('ec2')
- self._check_mode = self._module.check_mode
-
- if not hasattr(self._connection, 'describe_transit_gateways'):
- self._module.fail_json(msg='transit gateway module requires boto3 >= 1.9.52')
-
- @AWSRetry.exponential_backoff()
- def describe_transit_gateways(self):
- """
- Describe transit gateways.
-
- module : AnsibleAWSModule object
- connection : boto3 client connection object
- """
- # collect parameters
- filters = ansible_dict_to_boto3_filter_list(self._module.params['filters'])
- transit_gateway_ids = self._module.params['transit_gateway_ids']
-
- # init empty list for return vars
- transit_gateway_info = list()
-
- # Get the basic transit gateway info
- try:
- response = self._connection.describe_transit_gateways(
- TransitGatewayIds=transit_gateway_ids, Filters=filters)
- except ClientError as e:
- if e.response['Error']['Code'] == 'InvalidTransitGatewayID.NotFound':
- self._results['transit_gateways'] = []
- return
- raise
-
- for transit_gateway in response['TransitGateways']:
- transit_gateway_info.append(camel_dict_to_snake_dict(transit_gateway, ignore_list=['Tags']))
- # convert tag list to ansible dict
- transit_gateway_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(transit_gateway.get('Tags', []))
-
- self._results['transit_gateways'] = transit_gateway_info
- return
-
-
-def setup_module_object():
- """
- merge argument spec and create Ansible module object
- :return: Ansible module object
- """
-
- argument_spec = dict(
- transit_gateway_ids=dict(type='list', default=[], elements='str', aliases=['transit_gateway_id']),
- filters=dict(type='dict', default={})
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- return module
-
-
-def main():
-
- module = setup_module_object()
-
- results = dict(
- changed=False
- )
-
- tgwf_manager = AnsibleEc2TgwInfo(module=module, results=results)
- try:
- tgwf_manager.describe_transit_gateways()
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_egress_igw.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_egress_igw.py
deleted file mode 100644
index 5e9d587a7c..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_egress_igw.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_egress_igw
-short_description: Manage an AWS VPC Egress Only Internet gateway
-description:
- - Manage an AWS VPC Egress Only Internet gateway
-version_added: "2.5"
-author: Daniel Shepherd (@shepdelacreme)
-options:
- vpc_id:
- description:
- - The VPC ID for the VPC that this Egress Only Internet Gateway should be attached.
- required: true
- type: str
- state:
- description:
- - Create or delete the EIGW.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Ensure that the VPC has an Internet Gateway.
-# The Internet Gateway ID is can be accessed via {{eigw.gateway_id}} for use in setting up NATs etc.
-ec2_vpc_egress_igw:
- vpc_id: vpc-abcdefgh
- state: present
-register: eigw
-
-'''
-
-RETURN = '''
-gateway_id:
- description: The ID of the Egress Only Internet Gateway or Null.
- returned: always
- type: str
- sample: eigw-0e00cf111ba5bc11e
-vpc_id:
- description: The ID of the VPC to attach or detach gateway from.
- returned: always
- type: str
- sample: vpc-012345678
-'''
-
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def delete_eigw(module, conn, eigw_id):
- """
- Delete EIGW.
-
- module : AnsibleModule object
- conn : boto3 client connection object
- eigw_id : ID of the EIGW to delete
- """
- changed = False
-
- try:
- response = conn.delete_egress_only_internet_gateway(DryRun=module.check_mode, EgressOnlyInternetGatewayId=eigw_id)
- except botocore.exceptions.ClientError as e:
- # When boto3 method is run with DryRun=True it returns an error on success
- # We need to catch the error and return something valid
- if e.response.get('Error', {}).get('Code') == "DryRunOperation":
- changed = True
- else:
- module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id))
-
- if not module.check_mode:
- changed = response.get('ReturnCode', False)
-
- return changed
-
-
-def create_eigw(module, conn, vpc_id):
- """
- Create EIGW.
-
- module : AnsibleModule object
- conn : boto3 client connection object
- vpc_id : ID of the VPC we are operating on
- """
- gateway_id = None
- changed = False
-
- try:
- response = conn.create_egress_only_internet_gateway(DryRun=module.check_mode, VpcId=vpc_id)
- except botocore.exceptions.ClientError as e:
- # When boto3 method is run with DryRun=True it returns an error on success
- # We need to catch the error and return something valid
- if e.response.get('Error', {}).get('Code') == "DryRunOperation":
- changed = True
- elif e.response.get('Error', {}).get('Code') == "InvalidVpcID.NotFound":
- module.fail_json_aws(e, msg="invalid vpc ID '{0}' provided".format(vpc_id))
- else:
- module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id))
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id))
-
- if not module.check_mode:
- gateway = response.get('EgressOnlyInternetGateway', {})
- state = gateway.get('Attachments', [{}])[0].get('State')
- gateway_id = gateway.get('EgressOnlyInternetGatewayId')
-
- if gateway_id and state in ('attached', 'attaching'):
- changed = True
- else:
- # EIGW gave back a bad attachment state or an invalid response so we error out
- module.fail_json(msg='Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad or no state in response'.format(vpc_id),
- **camel_dict_to_snake_dict(response))
-
- return changed, gateway_id
-
-
-def describe_eigws(module, conn, vpc_id):
- """
- Describe EIGWs.
-
- module : AnsibleModule object
- conn : boto3 client connection object
- vpc_id : ID of the VPC we are operating on
- """
- gateway_id = None
-
- try:
- response = conn.describe_egress_only_internet_gateways()
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Could not get list of existing Egress-Only Internet Gateways")
-
- for eigw in response.get('EgressOnlyInternetGateways', []):
- for attachment in eigw.get('Attachments', []):
- if attachment.get('VpcId') == vpc_id and attachment.get('State') in ('attached', 'attaching'):
- gateway_id = eigw.get('EgressOnlyInternetGatewayId')
-
- return gateway_id
-
-
-def main():
- argument_spec = dict(
- vpc_id=dict(required=True),
- state=dict(default='present', choices=['present', 'absent'])
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
-
- connection = module.client('ec2')
-
- vpc_id = module.params.get('vpc_id')
- state = module.params.get('state')
-
- eigw_id = describe_eigws(module, connection, vpc_id)
-
- result = dict(gateway_id=eigw_id, vpc_id=vpc_id)
- changed = False
-
- if state == 'present' and not eigw_id:
- changed, result['gateway_id'] = create_eigw(module, connection, vpc_id)
- elif state == 'absent' and eigw_id:
- changed = delete_eigw(module, connection, eigw_id)
-
- module.exit_json(changed=changed, **result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint.py
deleted file mode 100644
index 3d6b3fb8f3..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint.py
+++ /dev/null
@@ -1,400 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
-module: ec2_vpc_endpoint
-short_description: Create and delete AWS VPC Endpoints.
-description:
- - Creates AWS VPC endpoints.
- - Deletes AWS VPC endpoints.
- - This module supports check mode.
-version_added: "2.4"
-requirements: [ boto3 ]
-options:
- vpc_id:
- description:
- - Required when creating a VPC endpoint.
- required: false
- type: str
- service:
- description:
- - An AWS supported vpc endpoint service. Use the M(ec2_vpc_endpoint_info)
- module to describe the supported endpoint services.
- - Required when creating an endpoint.
- required: false
- type: str
- policy:
- description:
- - A properly formatted json policy as string, see
- U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813).
- Cannot be used with I(policy_file).
- - Option when creating an endpoint. If not provided AWS will
- utilise a default policy which provides full access to the service.
- required: false
- type: json
- policy_file:
- description:
- - The path to the properly json formatted policy file, see
- U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813)
- on how to use it properly. Cannot be used with I(policy).
- - Option when creating an endpoint. If not provided AWS will
- utilise a default policy which provides full access to the service.
- required: false
- aliases: [ "policy_path" ]
- type: path
- state:
- description:
- - present to ensure resource is created.
- - absent to remove resource
- required: false
- default: present
- choices: [ "present", "absent"]
- type: str
- wait:
- description:
- - When specified, will wait for either available status for state present.
- Unfortunately this is ignored for delete actions due to a difference in
- behaviour from AWS.
- required: false
- default: no
- type: bool
- wait_timeout:
- description:
- - Used in conjunction with wait. Number of seconds to wait for status.
- Unfortunately this is ignored for delete actions due to a difference in
- behaviour from AWS.
- required: false
- default: 320
- type: int
- route_table_ids:
- description:
- - List of one or more route table ids to attach to the endpoint. A route
- is added to the route table with the destination of the endpoint if
- provided.
- required: false
- type: list
- elements: str
- vpc_endpoint_id:
- description:
- - One or more vpc endpoint ids to remove from the AWS account
- required: false
- type: str
- client_token:
- description:
- - Optional client token to ensure idempotency
- required: false
- type: str
-author: Karen Cheng (@Etherdaemon)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Create new vpc endpoint with a json template for policy
- ec2_vpc_endpoint:
- state: present
- region: ap-southeast-2
- vpc_id: vpc-12345678
- service: com.amazonaws.ap-southeast-2.s3
- policy: " {{ lookup( 'template', 'endpoint_policy.json.j2') }} "
- route_table_ids:
- - rtb-12345678
- - rtb-87654321
- register: new_vpc_endpoint
-
-- name: Create new vpc endpoint with the default policy
- ec2_vpc_endpoint:
- state: present
- region: ap-southeast-2
- vpc_id: vpc-12345678
- service: com.amazonaws.ap-southeast-2.s3
- route_table_ids:
- - rtb-12345678
- - rtb-87654321
- register: new_vpc_endpoint
-
-- name: Create new vpc endpoint with json file
- ec2_vpc_endpoint:
- state: present
- region: ap-southeast-2
- vpc_id: vpc-12345678
- service: com.amazonaws.ap-southeast-2.s3
- policy_file: "{{ role_path }}/files/endpoint_policy.json"
- route_table_ids:
- - rtb-12345678
- - rtb-87654321
- register: new_vpc_endpoint
-
-- name: Delete newly created vpc endpoint
- ec2_vpc_endpoint:
- state: absent
- vpc_endpoint_id: "{{ new_vpc_endpoint.result['VpcEndpointId'] }}"
- region: ap-southeast-2
-'''
-
-RETURN = '''
-endpoints:
- description: The resulting endpoints from the module call
- returned: success
- type: list
- sample: [
- {
- "creation_timestamp": "2017-02-20T05:04:15+00:00",
- "policy_document": {
- "Id": "Policy1450910922815",
- "Statement": [
- {
- "Action": "s3:*",
- "Effect": "Allow",
- "Principal": "*",
- "Resource": [
- "arn:aws:s3:::*/*",
- "arn:aws:s3:::*"
- ],
- "Sid": "Stmt1450910920641"
- }
- ],
- "Version": "2012-10-17"
- },
- "route_table_ids": [
- "rtb-abcd1234"
- ],
- "service_name": "com.amazonaws.ap-southeast-2.s3",
- "vpc_endpoint_id": "vpce-a1b2c3d4",
- "vpc_id": "vpc-abbad0d0"
- }
- ]
-'''
-
-import datetime
-import json
-import time
-import traceback
-
-try:
- import botocore
-except ImportError:
- pass # will be picked up by imported HAS_BOTO3
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (get_aws_connection_info, boto3_conn, ec2_argument_spec, HAS_BOTO3,
- camel_dict_to_snake_dict)
-from ansible.module_utils.six import string_types
-
-
-def date_handler(obj):
- return obj.isoformat() if hasattr(obj, 'isoformat') else obj
-
-
-def wait_for_status(client, module, resource_id, status):
- polling_increment_secs = 15
- max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
- status_achieved = False
-
- for x in range(0, max_retries):
- try:
- resource = get_endpoints(client, module, resource_id)['VpcEndpoints'][0]
- if resource['State'] == status:
- status_achieved = True
- break
- else:
- time.sleep(polling_increment_secs)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=str(e), exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- return status_achieved, resource
-
-
-def get_endpoints(client, module, resource_id=None):
- params = dict()
- if resource_id:
- params['VpcEndpointIds'] = [resource_id]
-
- result = json.loads(json.dumps(client.describe_vpc_endpoints(**params), default=date_handler))
- return result
-
-
-def setup_creation(client, module):
- vpc_id = module.params.get('vpc_id')
- service_name = module.params.get('service')
-
- if module.params.get('route_table_ids'):
- route_table_ids = module.params.get('route_table_ids')
- existing_endpoints = get_endpoints(client, module)
- for endpoint in existing_endpoints['VpcEndpoints']:
- if endpoint['VpcId'] == vpc_id and endpoint['ServiceName'] == service_name:
- sorted_endpoint_rt_ids = sorted(endpoint['RouteTableIds'])
- sorted_route_table_ids = sorted(route_table_ids)
- if sorted_endpoint_rt_ids == sorted_route_table_ids:
- return False, camel_dict_to_snake_dict(endpoint)
-
- changed, result = create_vpc_endpoint(client, module)
-
- return changed, json.loads(json.dumps(result, default=date_handler))
-
-
-def create_vpc_endpoint(client, module):
- params = dict()
- changed = False
- token_provided = False
- params['VpcId'] = module.params.get('vpc_id')
- params['ServiceName'] = module.params.get('service')
- params['DryRun'] = module.check_mode
-
- if module.params.get('route_table_ids'):
- params['RouteTableIds'] = module.params.get('route_table_ids')
-
- if module.params.get('client_token'):
- token_provided = True
- request_time = datetime.datetime.utcnow()
- params['ClientToken'] = module.params.get('client_token')
-
- policy = None
- if module.params.get('policy'):
- try:
- policy = json.loads(module.params.get('policy'))
- except ValueError as e:
- module.fail_json(msg=str(e), exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- elif module.params.get('policy_file'):
- try:
- with open(module.params.get('policy_file'), 'r') as json_data:
- policy = json.load(json_data)
- except Exception as e:
- module.fail_json(msg=str(e), exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- if policy:
- params['PolicyDocument'] = json.dumps(policy)
-
- try:
- changed = True
- result = camel_dict_to_snake_dict(client.create_vpc_endpoint(**params)['VpcEndpoint'])
- if token_provided and (request_time > result['creation_timestamp'].replace(tzinfo=None)):
- changed = False
- elif module.params.get('wait') and not module.check_mode:
- status_achieved, result = wait_for_status(client, module, result['vpc_endpoint_id'], 'available')
- if not status_achieved:
- module.fail_json(msg='Error waiting for vpc endpoint to become available - please check the AWS console')
- except botocore.exceptions.ClientError as e:
- if "DryRunOperation" in e.message:
- changed = True
- result = 'Would have created VPC Endpoint if not in check mode'
- elif "IdempotentParameterMismatch" in e.message:
- module.fail_json(msg="IdempotentParameterMismatch - updates of endpoints are not allowed by the API")
- elif "RouteAlreadyExists" in e.message:
- module.fail_json(msg="RouteAlreadyExists for one of the route tables - update is not allowed by the API")
- else:
- module.fail_json(msg=str(e), exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- except Exception as e:
- module.fail_json(msg=str(e), exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- return changed, result
-
-
-def setup_removal(client, module):
- params = dict()
- changed = False
- params['DryRun'] = module.check_mode
- if isinstance(module.params.get('vpc_endpoint_id'), string_types):
- params['VpcEndpointIds'] = [module.params.get('vpc_endpoint_id')]
- else:
- params['VpcEndpointIds'] = module.params.get('vpc_endpoint_id')
- try:
- result = client.delete_vpc_endpoints(**params)['Unsuccessful']
- if not module.check_mode and (result != []):
- module.fail_json(msg=result)
- except botocore.exceptions.ClientError as e:
- if "DryRunOperation" in e.message:
- changed = True
- result = 'Would have deleted VPC Endpoint if not in check mode'
- else:
- module.fail_json(msg=str(e), exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- except Exception as e:
- module.fail_json(msg=str(e), exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- return changed, result
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- vpc_id=dict(),
- service=dict(),
- policy=dict(type='json'),
- policy_file=dict(type='path', aliases=['policy_path']),
- state=dict(default='present', choices=['present', 'absent']),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=320, required=False),
- route_table_ids=dict(type='list'),
- vpc_endpoint_id=dict(),
- client_token=dict(),
- )
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[['policy', 'policy_file']],
- required_if=[
- ['state', 'present', ['vpc_id', 'service']],
- ['state', 'absent', ['vpc_endpoint_id']],
- ]
- )
-
- # Validate Requirements
- if not HAS_BOTO3:
- module.fail_json(msg='botocore and boto3 are required for this module')
-
- state = module.params.get('state')
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- except NameError as e:
- # Getting around the get_aws_connection_info boto reliance for region
- if "global name 'boto' is not defined" in e.message:
- module.params['region'] = botocore.session.get_session().get_config_variable('region')
- if not module.params['region']:
- module.fail_json(msg="Error - no region provided")
- else:
- module.fail_json(msg="Can't retrieve connection information - " + str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg="Failed to connect to AWS due to wrong or missing credentials: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- # Ensure resource is present
- if state == 'present':
- (changed, results) = setup_creation(ec2, module)
- else:
- (changed, results) = setup_removal(ec2, module)
-
- module.exit_json(changed=changed, result=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint_info.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint_info.py
deleted file mode 100644
index 9f1c8f261f..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint_info.py
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/usr/bin/python
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-module: ec2_vpc_endpoint_info
-short_description: Retrieves AWS VPC endpoints details using AWS methods.
-description:
- - Gets various details related to AWS VPC Endpoints.
- - This module was called C(ec2_vpc_endpoint_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.4"
-requirements: [ boto3 ]
-options:
- query:
- description:
- - Specifies the query action to take. Services returns the supported
- AWS services that can be specified when creating an endpoint.
- required: True
- choices:
- - services
- - endpoints
- type: str
- vpc_endpoint_ids:
- description:
- - Get details of specific endpoint IDs
- type: list
- elements: str
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpoints.html)
- for possible filters.
- type: dict
-author: Karen Cheng (@Etherdaemon)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Simple example of listing all support AWS services for VPC endpoints
-- name: List supported AWS endpoint services
- ec2_vpc_endpoint_info:
- query: services
- region: ap-southeast-2
- register: supported_endpoint_services
-
-- name: Get all endpoints in ap-southeast-2 region
- ec2_vpc_endpoint_info:
- query: endpoints
- region: ap-southeast-2
- register: existing_endpoints
-
-- name: Get all endpoints with specific filters
- ec2_vpc_endpoint_info:
- query: endpoints
- region: ap-southeast-2
- filters:
- vpc-id:
- - vpc-12345678
- - vpc-87654321
- vpc-endpoint-state:
- - available
- - pending
- register: existing_endpoints
-
-- name: Get details on specific endpoint
- ec2_vpc_endpoint_info:
- query: endpoints
- region: ap-southeast-2
- vpc_endpoint_ids:
- - vpce-12345678
- register: endpoint_details
-'''
-
-RETURN = '''
-service_names:
- description: AWS VPC endpoint service names
- returned: I(query) is C(services)
- type: list
- sample:
- service_names:
- - com.amazonaws.ap-southeast-2.s3
-vpc_endpoints:
- description:
- - A list of endpoints that match the query. Each endpoint has the keys creation_timestamp,
- policy_document, route_table_ids, service_name, state, vpc_endpoint_id, vpc_id.
- returned: I(query) is C(endpoints)
- type: list
- sample:
- vpc_endpoints:
- - creation_timestamp: "2017-02-16T11:06:48+00:00"
- policy_document: >
- "{\"Version\":\"2012-10-17\",\"Id\":\"Policy1450910922815\",
- \"Statement\":[{\"Sid\":\"Stmt1450910920641\",\"Effect\":\"Allow\",
- \"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::*/*\",\"arn:aws:s3:::*\"]}]}"
- route_table_ids:
- - rtb-abcd1234
- service_name: "com.amazonaws.ap-southeast-2.s3"
- state: "available"
- vpc_endpoint_id: "vpce-abbad0d0"
- vpc_id: "vpc-1111ffff"
-'''
-
-import json
-
-try:
- import botocore
-except ImportError:
- pass # will be picked up from imported HAS_BOTO3
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (ec2_argument_spec, boto3_conn, get_aws_connection_info,
- ansible_dict_to_boto3_filter_list, HAS_BOTO3, camel_dict_to_snake_dict, AWSRetry)
-
-
-def date_handler(obj):
- return obj.isoformat() if hasattr(obj, 'isoformat') else obj
-
-
-@AWSRetry.exponential_backoff()
-def get_supported_services(client, module):
- results = list()
- params = dict()
- while True:
- response = client.describe_vpc_endpoint_services(**params)
- results.extend(response['ServiceNames'])
- if 'NextToken' in response:
- params['NextToken'] = response['NextToken']
- else:
- break
- return dict(service_names=results)
-
-
-@AWSRetry.exponential_backoff()
-def get_endpoints(client, module):
- results = list()
- params = dict()
- params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
- if module.params.get('vpc_endpoint_ids'):
- params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids')
- while True:
- response = client.describe_vpc_endpoints(**params)
- results.extend(response['VpcEndpoints'])
- if 'NextToken' in response:
- params['NextToken'] = response['NextToken']
- else:
- break
- try:
- results = json.loads(json.dumps(results, default=date_handler))
- except Exception as e:
- module.fail_json(msg=str(e.message))
- return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results])
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- query=dict(choices=['services', 'endpoints'], required=True),
- filters=dict(default={}, type='dict'),
- vpc_endpoint_ids=dict(type='list'),
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._name == 'ec2_vpc_endpoint_facts':
- module.deprecate("The 'ec2_vpc_endpoint_facts' module has been renamed to 'ec2_vpc_endpoint_info'", version='2.13')
-
- # Validate Requirements
- if not HAS_BOTO3:
- module.fail_json(msg='botocore and boto3 are required.')
-
- try:
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
- if region:
- connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
- else:
- module.fail_json(msg="region must be specified")
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg=str(e))
-
- invocations = {
- 'services': get_supported_services,
- 'endpoints': get_endpoints,
- }
- results = invocations[module.params.get('query')](connection, module)
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_igw.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_igw.py
deleted file mode 100644
index 5198527af7..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_igw.py
+++ /dev/null
@@ -1,283 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_igw
-short_description: Manage an AWS VPC Internet gateway
-description:
- - Manage an AWS VPC Internet gateway
-version_added: "2.0"
-author: Robert Estelle (@erydo)
-options:
- vpc_id:
- description:
- - The VPC ID for the VPC in which to manage the Internet Gateway.
- required: true
- type: str
- tags:
- description:
- - "A dict of tags to apply to the internet gateway. Any tags currently applied to the internet gateway and not present here will be removed."
- aliases: [ 'resource_tags' ]
- version_added: "2.4"
- type: dict
- state:
- description:
- - Create or terminate the IGW
- default: present
- choices: [ 'present', 'absent' ]
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - botocore
- - boto3
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Ensure that the VPC has an Internet Gateway.
-# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
-ec2_vpc_igw:
- vpc_id: vpc-abcdefgh
- state: present
-register: igw
-
-'''
-
-RETURN = '''
-changed:
- description: If any changes have been made to the Internet Gateway.
- type: bool
- returned: always
- sample:
- changed: false
-gateway_id:
- description: The unique identifier for the Internet Gateway.
- type: str
- returned: I(state=present)
- sample:
- gateway_id: "igw-XXXXXXXX"
-tags:
- description: The tags associated the Internet Gateway.
- type: dict
- returned: I(state=present)
- sample:
- tags:
- "Ansible": "Test"
-vpc_id:
- description: The VPC ID associated with the Internet Gateway.
- type: str
- returned: I(state=present)
- sample:
- vpc_id: "vpc-XXXXXXXX"
-'''
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.waiters import get_waiter
-from ansible.module_utils.ec2 import (
- AWSRetry,
- camel_dict_to_snake_dict,
- boto3_tag_list_to_ansible_dict,
- ansible_dict_to_boto3_filter_list,
- ansible_dict_to_boto3_tag_list,
- compare_aws_tags
-)
-from ansible.module_utils.six import string_types
-
-
-class AnsibleEc2Igw(object):
-
- def __init__(self, module, results):
- self._module = module
- self._results = results
- self._connection = self._module.client('ec2')
- self._check_mode = self._module.check_mode
-
- def process(self):
- vpc_id = self._module.params.get('vpc_id')
- state = self._module.params.get('state', 'present')
- tags = self._module.params.get('tags')
-
- if state == 'present':
- self.ensure_igw_present(vpc_id, tags)
- elif state == 'absent':
- self.ensure_igw_absent(vpc_id)
-
- def get_matching_igw(self, vpc_id):
- filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id})
- igws = []
- try:
- response = self._connection.describe_internet_gateways(Filters=filters)
- igws = response.get('InternetGateways', [])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self._module.fail_json_aws(e)
-
- igw = None
- if len(igws) > 1:
- self._module.fail_json(
- msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting'.format(vpc_id))
- elif igws:
- igw = camel_dict_to_snake_dict(igws[0])
-
- return igw
-
- def check_input_tags(self, tags):
- nonstring_tags = [k for k, v in tags.items() if not isinstance(v, string_types)]
- if nonstring_tags:
- self._module.fail_json(msg='One or more tags contain non-string values: {0}'.format(nonstring_tags))
-
- def ensure_tags(self, igw_id, tags, add_only):
- final_tags = []
-
- filters = ansible_dict_to_boto3_filter_list({'resource-id': igw_id, 'resource-type': 'internet-gateway'})
- cur_tags = None
- try:
- cur_tags = self._connection.describe_tags(Filters=filters)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="Couldn't describe tags")
-
- purge_tags = bool(not add_only)
- to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
- final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags'))
-
- if to_update:
- try:
- if self._check_mode:
- # update tags
- final_tags.update(to_update)
- else:
- AWSRetry.exponential_backoff()(self._connection.create_tags)(
- Resources=[igw_id],
- Tags=ansible_dict_to_boto3_tag_list(to_update)
- )
-
- self._results['changed'] = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="Couldn't create tags")
-
- if to_delete:
- try:
- if self._check_mode:
- # update tags
- for key in to_delete:
- del final_tags[key]
- else:
- tags_list = []
- for key in to_delete:
- tags_list.append({'Key': key})
-
- AWSRetry.exponential_backoff()(self._connection.delete_tags)(Resources=[igw_id], Tags=tags_list)
-
- self._results['changed'] = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="Couldn't delete tags")
-
- if not self._check_mode and (to_update or to_delete):
- try:
- response = self._connection.describe_tags(Filters=filters)
- final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags'))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="Couldn't describe tags")
-
- return final_tags
-
- @staticmethod
- def get_igw_info(igw):
- return {
- 'gateway_id': igw['internet_gateway_id'],
- 'tags': igw['tags'],
- 'vpc_id': igw['vpc_id']
- }
-
- def ensure_igw_absent(self, vpc_id):
- igw = self.get_matching_igw(vpc_id)
- if igw is None:
- return self._results
-
- if self._check_mode:
- self._results['changed'] = True
- return self._results
-
- try:
- self._results['changed'] = True
- self._connection.detach_internet_gateway(InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id)
- self._connection.delete_internet_gateway(InternetGatewayId=igw['internet_gateway_id'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self._module.fail_json_aws(e, msg="Unable to delete Internet Gateway")
-
- return self._results
-
- def ensure_igw_present(self, vpc_id, tags):
- self.check_input_tags(tags)
-
- igw = self.get_matching_igw(vpc_id)
-
- if igw is None:
- if self._check_mode:
- self._results['changed'] = True
- self._results['gateway_id'] = None
- return self._results
-
- try:
- response = self._connection.create_internet_gateway()
-
- # Ensure the gateway exists before trying to attach it or add tags
- waiter = get_waiter(self._connection, 'internet_gateway_exists')
- waiter.wait(InternetGatewayIds=[response['InternetGateway']['InternetGatewayId']])
-
- igw = camel_dict_to_snake_dict(response['InternetGateway'])
- self._connection.attach_internet_gateway(InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id)
- self._results['changed'] = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self._module.fail_json_aws(e, msg='Unable to create Internet Gateway')
-
- igw['vpc_id'] = vpc_id
-
- igw['tags'] = self.ensure_tags(igw_id=igw['internet_gateway_id'], tags=tags, add_only=False)
-
- igw_info = self.get_igw_info(igw)
- self._results.update(igw_info)
-
- return self._results
-
-
-def main():
- argument_spec = dict(
- vpc_id=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- tags=dict(default=dict(), required=False, type='dict', aliases=['resource_tags'])
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
- results = dict(
- changed=False
- )
- igw_manager = AnsibleEc2Igw(module=module, results=results)
- igw_manager.process()
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_igw_info.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_igw_info.py
deleted file mode 100644
index 11ee974ae5..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_igw_info.py
+++ /dev/null
@@ -1,159 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_igw_info
-short_description: Gather information about internet gateways in AWS
-description:
- - Gather information about internet gateways in AWS.
- - This module was called C(ec2_vpc_igw_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.3"
-requirements: [ boto3 ]
-author: "Nick Aslanidis (@naslanidis)"
-options:
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInternetGateways.html) for possible filters.
- type: dict
- internet_gateway_ids:
- description:
- - Get details of specific Internet Gateway ID. Provide this value as a list.
- type: list
- elements: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# # Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Gather information about all Internet Gateways for an account or profile
- ec2_vpc_igw_info:
- region: ap-southeast-2
- profile: production
- register: igw_info
-
-- name: Gather information about a filtered list of Internet Gateways
- ec2_vpc_igw_info:
- region: ap-southeast-2
- profile: production
- filters:
- "tag:Name": "igw-123"
- register: igw_info
-
-- name: Gather information about a specific internet gateway by InternetGatewayId
- ec2_vpc_igw_info:
- region: ap-southeast-2
- profile: production
- internet_gateway_ids: igw-c1231234
- register: igw_info
-'''
-
-RETURN = '''
-internet_gateways:
- description: The internet gateways for the account.
- returned: always
- type: list
- sample: [
- {
- "attachments": [
- {
- "state": "available",
- "vpc_id": "vpc-02123b67"
- }
- ],
- "internet_gateway_id": "igw-2123634d",
- "tags": [
- {
- "key": "Name",
- "value": "test-vpc-20-igw"
- }
- ]
- }
- ]
-
-changed:
- description: True if listing the internet gateways succeeds.
- type: bool
- returned: always
- sample: "false"
-'''
-
-try:
- import botocore
-except ImportError:
- pass # will be captured by imported HAS_BOTO3
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
- camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, HAS_BOTO3)
-
-
-def get_internet_gateway_info(internet_gateway):
- internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'],
- 'Attachments': internet_gateway['Attachments'],
- 'Tags': internet_gateway['Tags']}
- return internet_gateway_info
-
-
-def list_internet_gateways(client, module):
- params = dict()
-
- params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
-
- if module.params.get("internet_gateway_ids"):
- params['InternetGatewayIds'] = module.params.get("internet_gateway_ids")
-
- try:
- all_internet_gateways = client.describe_internet_gateways(**params)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=str(e))
-
- return [camel_dict_to_snake_dict(get_internet_gateway_info(igw))
- for igw in all_internet_gateways['InternetGateways']]
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- filters=dict(type='dict', default=dict()),
- internet_gateway_ids=dict(type='list', default=None)
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._name == 'ec2_vpc_igw_facts':
- module.deprecate("The 'ec2_vpc_igw_facts' module has been renamed to 'ec2_vpc_igw_info'", version='2.13')
-
- # Validate Requirements
- if not HAS_BOTO3:
- module.fail_json(msg='botocore and boto3 are required.')
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg="Can't authorize connection - " + str(e))
-
- # call your function here
- results = list_internet_gateways(connection, module)
-
- module.exit_json(internet_gateways=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py
deleted file mode 100644
index 5e8ea95a04..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py
+++ /dev/null
@@ -1,634 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
-module: ec2_vpc_nacl
-short_description: create and delete Network ACLs.
-description:
- - Read the AWS documentation for Network ACLS
- U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
-version_added: "2.2"
-options:
- name:
- description:
- - Tagged name identifying a network ACL.
- - One and only one of the I(name) or I(nacl_id) is required.
- required: false
- type: str
- nacl_id:
- description:
- - NACL id identifying a network ACL.
- - One and only one of the I(name) or I(nacl_id) is required.
- required: false
- version_added: "2.4"
- type: str
- vpc_id:
- description:
- - VPC id of the requesting VPC.
- - Required when state present.
- required: false
- type: str
- subnets:
- description:
- - The list of subnets that should be associated with the network ACL.
- - Must be specified as a list
- - Each subnet can be specified as subnet ID, or its tagged name.
- required: false
- type: list
- egress:
- description:
- - A list of rules for outgoing traffic. Each rule must be specified as a list.
- Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', '-1', 'all']),
- the rule action ('allow' or 'deny') the CIDR of the IPv4 network range to allow or deny,
- the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for
- TCP or UDP protocols, and the first port in the range for TCP or UDP protocols.
- See examples.
- default: []
- required: false
- type: list
- ingress:
- description:
- - List of rules for incoming traffic. Each rule must be specified as a list.
- Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', '-1', 'all']),
- the rule action ('allow' or 'deny') the CIDR of the IPv4 network range to allow or deny,
- the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for
- TCP or UDP protocols, and the first port in the range for TCP or UDP protocols.
- See examples.
- default: []
- required: false
- type: list
- tags:
- description:
- - Dictionary of tags to look for and apply when creating a network ACL.
- required: false
- type: dict
- state:
- description:
- - Creates or modifies an existing NACL
- - Deletes a NACL and reassociates subnets to the default NACL
- required: false
- type: str
- choices: ['present', 'absent']
- default: present
-author: Mike Mochan (@mmochan)
-extends_documentation_fragment:
- - aws
- - ec2
-requirements: [ botocore, boto3, json ]
-'''
-
-EXAMPLES = '''
-
-# Complete example to create and delete a network ACL
-# that allows SSH, HTTP and ICMP in, and all traffic out.
-- name: "Create and associate production DMZ network ACL with DMZ subnets"
- ec2_vpc_nacl:
- vpc_id: vpc-12345678
- name: prod-dmz-nacl
- region: ap-southeast-2
- subnets: ['prod-dmz-1', 'prod-dmz-2']
- tags:
- CostCode: CC1234
- Project: phoenix
- Description: production DMZ
- ingress:
- # rule no, protocol, allow/deny, cidr, icmp_type, icmp_code,
- # port from, port to
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- state: 'present'
-
-- name: "Remove the ingress and egress rules - defaults to deny all"
- ec2_vpc_nacl:
- vpc_id: vpc-12345678
- name: prod-dmz-nacl
- region: ap-southeast-2
- subnets:
- - prod-dmz-1
- - prod-dmz-2
- tags:
- CostCode: CC1234
- Project: phoenix
- Description: production DMZ
- state: present
-
-- name: "Remove the NACL subnet associations and tags"
- ec2_vpc_nacl:
- vpc_id: 'vpc-12345678'
- name: prod-dmz-nacl
- region: ap-southeast-2
- state: present
-
-- name: "Delete nacl and subnet associations"
- ec2_vpc_nacl:
- vpc_id: vpc-12345678
- name: prod-dmz-nacl
- state: absent
-
-- name: "Delete nacl by its id"
- ec2_vpc_nacl:
- nacl_id: acl-33b4ee5b
- state: absent
-'''
-RETURN = '''
-task:
- description: The result of the create, or delete action.
- returned: success
- type: dict
-nacl_id:
- description: The id of the NACL (when creating or updating an ACL)
- returned: success
- type: str
- sample: acl-123456789abcdef01
-'''
-
-try:
- import botocore
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import AWSRetry
-
-# VPC-supported IANA protocol numbers
-# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
-PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, }
-
-
-# Utility methods
-def icmp_present(entry):
- if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1:
- return True
-
-
-def load_tags(module):
- tags = []
- if module.params.get('tags'):
- for name, value in module.params.get('tags').items():
- tags.append({'Key': name, 'Value': str(value)})
- tags.append({'Key': "Name", 'Value': module.params.get('name')})
- else:
- tags.append({'Key': "Name", 'Value': module.params.get('name')})
- return tags
-
-
-def subnets_removed(nacl_id, subnets, client, module):
- results = find_acl_by_id(nacl_id, client, module)
- associations = results['NetworkAcls'][0]['Associations']
- subnet_ids = [assoc['SubnetId'] for assoc in associations]
- return [subnet for subnet in subnet_ids if subnet not in subnets]
-
-
-def subnets_added(nacl_id, subnets, client, module):
- results = find_acl_by_id(nacl_id, client, module)
- associations = results['NetworkAcls'][0]['Associations']
- subnet_ids = [assoc['SubnetId'] for assoc in associations]
- return [subnet for subnet in subnets if subnet not in subnet_ids]
-
-
-def subnets_changed(nacl, client, module):
- changed = False
- vpc_id = module.params.get('vpc_id')
- nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
- subnets = subnets_to_associate(nacl, client, module)
- if not subnets:
- default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
- subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module)
- if subnets:
- replace_network_acl_association(default_nacl_id, subnets, client, module)
- changed = True
- return changed
- changed = False
- return changed
- subs_added = subnets_added(nacl_id, subnets, client, module)
- if subs_added:
- replace_network_acl_association(nacl_id, subs_added, client, module)
- changed = True
- subs_removed = subnets_removed(nacl_id, subnets, client, module)
- if subs_removed:
- default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
- replace_network_acl_association(default_nacl_id, subs_removed, client, module)
- changed = True
- return changed
-
-
-def nacls_changed(nacl, client, module):
- changed = False
- params = dict()
- params['egress'] = module.params.get('egress')
- params['ingress'] = module.params.get('ingress')
-
- nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
- nacl = describe_network_acl(client, module)
- entries = nacl['NetworkAcls'][0]['Entries']
- egress = [rule for rule in entries if rule['Egress'] is True and rule['RuleNumber'] < 32767]
- ingress = [rule for rule in entries if rule['Egress'] is False and rule['RuleNumber'] < 32767]
- if rules_changed(egress, params['egress'], True, nacl_id, client, module):
- changed = True
- if rules_changed(ingress, params['ingress'], False, nacl_id, client, module):
- changed = True
- return changed
-
-
-def tags_changed(nacl_id, client, module):
- changed = False
- tags = dict()
- if module.params.get('tags'):
- tags = module.params.get('tags')
- if module.params.get('name') and not tags.get('Name'):
- tags['Name'] = module.params['name']
- nacl = find_acl_by_id(nacl_id, client, module)
- if nacl['NetworkAcls']:
- nacl_values = [t.values() for t in nacl['NetworkAcls'][0]['Tags']]
- nacl_tags = [item for sublist in nacl_values for item in sublist]
- tag_values = [[key, str(value)] for key, value in tags.items()]
- tags = [item for sublist in tag_values for item in sublist]
- if sorted(nacl_tags) == sorted(tags):
- changed = False
- return changed
- else:
- delete_tags(nacl_id, client, module)
- create_tags(nacl_id, client, module)
- changed = True
- return changed
- return changed
-
-
-def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module):
- changed = False
- rules = list()
- for entry in param_rules:
- rules.append(process_rule_entry(entry, Egress))
- if rules == aws_rules:
- return changed
- else:
- removed_rules = [x for x in aws_rules if x not in rules]
- if removed_rules:
- params = dict()
- for rule in removed_rules:
- params['NetworkAclId'] = nacl_id
- params['RuleNumber'] = rule['RuleNumber']
- params['Egress'] = Egress
- delete_network_acl_entry(params, client, module)
- changed = True
- added_rules = [x for x in rules if x not in aws_rules]
- if added_rules:
- for rule in added_rules:
- rule['NetworkAclId'] = nacl_id
- create_network_acl_entry(rule, client, module)
- changed = True
- return changed
-
-
-def process_rule_entry(entry, Egress):
- params = dict()
- params['RuleNumber'] = entry[0]
- params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]])
- params['RuleAction'] = entry[2]
- params['Egress'] = Egress
- params['CidrBlock'] = entry[3]
- if icmp_present(entry):
- params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])}
- else:
- if entry[6] or entry[7]:
- params['PortRange'] = {"From": entry[6], 'To': entry[7]}
- return params
-
-
-def restore_default_associations(assoc_ids, default_nacl_id, client, module):
- if assoc_ids:
- params = dict()
- params['NetworkAclId'] = default_nacl_id[0]
- for assoc_id in assoc_ids:
- params['AssociationId'] = assoc_id
- restore_default_acl_association(params, client, module)
- return True
-
-
-def construct_acl_entries(nacl, client, module):
- for entry in module.params.get('ingress'):
- params = process_rule_entry(entry, Egress=False)
- params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
- create_network_acl_entry(params, client, module)
- for rule in module.params.get('egress'):
- params = process_rule_entry(rule, Egress=True)
- params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
- create_network_acl_entry(params, client, module)
-
-
-# Module invocations
-def setup_network_acl(client, module):
- changed = False
- nacl = describe_network_acl(client, module)
- if not nacl['NetworkAcls']:
- nacl = create_network_acl(module.params.get('vpc_id'), client, module)
- nacl_id = nacl['NetworkAcl']['NetworkAclId']
- create_tags(nacl_id, client, module)
- subnets = subnets_to_associate(nacl, client, module)
- replace_network_acl_association(nacl_id, subnets, client, module)
- construct_acl_entries(nacl, client, module)
- changed = True
- return(changed, nacl['NetworkAcl']['NetworkAclId'])
- else:
- changed = False
- nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
- changed |= subnets_changed(nacl, client, module)
- changed |= nacls_changed(nacl, client, module)
- changed |= tags_changed(nacl_id, client, module)
- return (changed, nacl_id)
-
-
-def remove_network_acl(client, module):
- changed = False
- result = dict()
- nacl = describe_network_acl(client, module)
- if nacl['NetworkAcls']:
- nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
- vpc_id = nacl['NetworkAcls'][0]['VpcId']
- associations = nacl['NetworkAcls'][0]['Associations']
- assoc_ids = [a['NetworkAclAssociationId'] for a in associations]
- default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)
- if not default_nacl_id:
- result = {vpc_id: "Default NACL ID not found - Check the VPC ID"}
- return changed, result
- if restore_default_associations(assoc_ids, default_nacl_id, client, module):
- delete_network_acl(nacl_id, client, module)
- changed = True
- result[nacl_id] = "Successfully deleted"
- return changed, result
- if not assoc_ids:
- delete_network_acl(nacl_id, client, module)
- changed = True
- result[nacl_id] = "Successfully deleted"
- return changed, result
- return changed, result
-
-
-# Boto3 client methods
-@AWSRetry.jittered_backoff()
-def _create_network_acl(client, *args, **kwargs):
- return client.create_network_acl(*args, **kwargs)
-
-
-def create_network_acl(vpc_id, client, module):
- try:
- if module.check_mode:
- nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000"))
- else:
- nacl = _create_network_acl(client, VpcId=vpc_id)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
- return nacl
-
-
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
-def _create_network_acl_entry(client, *args, **kwargs):
- return client.create_network_acl_entry(*args, **kwargs)
-
-
-def create_network_acl_entry(params, client, module):
- try:
- if not module.check_mode:
- _create_network_acl_entry(client, **params)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
-
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
-def _create_tags(client, *args, **kwargs):
- return client.create_tags(*args, **kwargs)
-
-
-def create_tags(nacl_id, client, module):
- try:
- delete_tags(nacl_id, client, module)
- if not module.check_mode:
- _create_tags(client, Resources=[nacl_id], Tags=load_tags(module))
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
-
-@AWSRetry.jittered_backoff()
-def _delete_network_acl(client, *args, **kwargs):
- return client.delete_network_acl(*args, **kwargs)
-
-
-def delete_network_acl(nacl_id, client, module):
- try:
- if not module.check_mode:
- _delete_network_acl(client, NetworkAclId=nacl_id)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
-
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
-def _delete_network_acl_entry(client, *args, **kwargs):
- return client.delete_network_acl_entry(*args, **kwargs)
-
-
-def delete_network_acl_entry(params, client, module):
- try:
- if not module.check_mode:
- _delete_network_acl_entry(client, **params)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
-
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
-def _delete_tags(client, *args, **kwargs):
- return client.delete_tags(*args, **kwargs)
-
-
-def delete_tags(nacl_id, client, module):
- try:
- if not module.check_mode:
- _delete_tags(client, Resources=[nacl_id])
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
-
-@AWSRetry.jittered_backoff()
-def _describe_network_acls(client, **kwargs):
- return client.describe_network_acls(**kwargs)
-
-
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
-def _describe_network_acls_retry_missing(client, **kwargs):
- return client.describe_network_acls(**kwargs)
-
-
-def describe_acl_associations(subnets, client, module):
- if not subnets:
- return []
- try:
- results = _describe_network_acls_retry_missing(client, Filters=[
- {'Name': 'association.subnet-id', 'Values': subnets}
- ])
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
- associations = results['NetworkAcls'][0]['Associations']
- return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets]
-
-
-def describe_network_acl(client, module):
- try:
- if module.params.get('nacl_id'):
- nacl = _describe_network_acls(client, Filters=[
- {'Name': 'network-acl-id', 'Values': [module.params.get('nacl_id')]}
- ])
- else:
- nacl = _describe_network_acls(client, Filters=[
- {'Name': 'tag:Name', 'Values': [module.params.get('name')]}
- ])
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
- return nacl
-
-
-def find_acl_by_id(nacl_id, client, module):
- try:
- return _describe_network_acls_retry_missing(client, NetworkAclIds=[nacl_id])
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
-
-def find_default_vpc_nacl(vpc_id, client, module):
- try:
- response = _describe_network_acls_retry_missing(client, Filters=[
- {'Name': 'vpc-id', 'Values': [vpc_id]}])
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
- nacls = response['NetworkAcls']
- return [n['NetworkAclId'] for n in nacls if n['IsDefault'] is True]
-
-
-def find_subnet_ids_by_nacl_id(nacl_id, client, module):
- try:
- results = _describe_network_acls_retry_missing(client, Filters=[
- {'Name': 'association.network-acl-id', 'Values': [nacl_id]}
- ])
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
- if results['NetworkAcls']:
- associations = results['NetworkAcls'][0]['Associations']
- return [s['SubnetId'] for s in associations if s['SubnetId']]
- else:
- return []
-
-
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
-def _replace_network_acl_association(client, *args, **kwargs):
- return client.replace_network_acl_association(*args, **kwargs)
-
-
-def replace_network_acl_association(nacl_id, subnets, client, module):
- params = dict()
- params['NetworkAclId'] = nacl_id
- for association in describe_acl_associations(subnets, client, module):
- params['AssociationId'] = association
- try:
- if not module.check_mode:
- _replace_network_acl_association(client, **params)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
-
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
-def _replace_network_acl_entry(client, *args, **kwargs):
- return client.replace_network_acl_entry(*args, **kwargs)
-
-
-def replace_network_acl_entry(entries, Egress, nacl_id, client, module):
- for entry in entries:
- params = entry
- params['NetworkAclId'] = nacl_id
- try:
- if not module.check_mode:
- _replace_network_acl_entry(client, **params)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
-
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
-def _replace_network_acl_association(client, *args, **kwargs):
- return client.replace_network_acl_association(*args, **kwargs)
-
-
-def restore_default_acl_association(params, client, module):
- try:
- if not module.check_mode:
- _replace_network_acl_association(client, **params)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
-
-@AWSRetry.jittered_backoff()
-def _describe_subnets(client, *args, **kwargs):
- return client.describe_subnets(*args, **kwargs)
-
-
-def subnets_to_associate(nacl, client, module):
- params = list(module.params.get('subnets'))
- if not params:
- return []
- all_found = []
- if any(x.startswith("subnet-") for x in params):
- try:
- subnets = _describe_subnets(client, Filters=[
- {'Name': 'subnet-id', 'Values': params}])
- all_found.extend(subnets.get('Subnets', []))
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
- if len(params) != len(all_found):
- try:
- subnets = _describe_subnets(client, Filters=[
- {'Name': 'tag:Name', 'Values': params}])
- all_found.extend(subnets.get('Subnets', []))
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
- return list(set(s['SubnetId'] for s in all_found if s.get('SubnetId')))
-
-
-def main():
- argument_spec = dict(
- vpc_id=dict(),
- name=dict(),
- nacl_id=dict(),
- subnets=dict(required=False, type='list', default=list()),
- tags=dict(required=False, type='dict'),
- ingress=dict(required=False, type='list', default=list()),
- egress=dict(required=False, type='list', default=list()),
- state=dict(default='present', choices=['present', 'absent']),
- )
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True,
- required_one_of=[['name', 'nacl_id']],
- required_if=[['state', 'present', ['vpc_id']]])
-
- state = module.params.get('state').lower()
-
- client = module.client('ec2')
-
- invocations = {
- "present": setup_network_acl,
- "absent": remove_network_acl
- }
- (changed, results) = invocations[state](client, module)
- module.exit_json(changed=changed, nacl_id=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl_info.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl_info.py
deleted file mode 100644
index c643d23413..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl_info.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/python
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_nacl_info
-short_description: Gather information about Network ACLs in an AWS VPC
-description:
- - Gather information about Network ACLs in an AWS VPC
- - This module was called C(ec2_vpc_nacl_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.2"
-author: "Brad Davidson (@brandond)"
-requirements: [ boto3 ]
-options:
- nacl_ids:
- description:
- - A list of Network ACL IDs to retrieve information about.
- required: false
- default: []
- aliases: [nacl_id]
- type: list
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
- U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter
- names and values are case sensitive.
- required: false
- default: {}
- type: dict
-notes:
- - By default, the module will return all Network ACLs.
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all Network ACLs:
-- name: Get All NACLs
- register: all_nacls
- ec2_vpc_nacl_info:
- region: us-west-2
-
-# Retrieve default Network ACLs:
-- name: Get Default NACLs
- register: default_nacls
- ec2_vpc_nacl_info:
- region: us-west-2
- filters:
- 'default': 'true'
-'''
-
-RETURN = '''
-nacls:
- description: Returns an array of complex objects as described below.
- returned: success
- type: complex
- contains:
- nacl_id:
- description: The ID of the Network Access Control List.
- returned: always
- type: str
- vpc_id:
- description: The ID of the VPC that the NACL is attached to.
- returned: always
- type: str
- is_default:
- description: True if the NACL is the default for its VPC.
- returned: always
- type: bool
- tags:
- description: A dict of tags associated with the NACL.
- returned: always
- type: dict
- subnets:
- description: A list of subnet IDs that are associated with the NACL.
- returned: always
- type: list
- elements: str
- ingress:
- description:
- - A list of NACL ingress rules with the following format.
- - "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])"
- returned: always
- type: list
- elements: list
- sample: [[100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]]
- egress:
- description:
- - A list of NACL egress rules with the following format.
- - "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])"
- returned: always
- type: list
- elements: list
- sample: [[100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]]
-'''
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils._text import to_native
-from ansible.module_utils.ec2 import (AWSRetry, ansible_dict_to_boto3_filter_list,
- camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict)
-
-
-# VPC-supported IANA protocol numbers
-# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
-PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'}
-
-
-def list_ec2_vpc_nacls(connection, module):
-
- nacl_ids = module.params.get("nacl_ids")
- filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
-
- if nacl_ids is None:
- nacl_ids = []
-
- try:
- nacls = connection.describe_network_acls(aws_retry=True, NetworkAclIds=nacl_ids, Filters=filters)
- except ClientError as e:
- if e.response['Error']['Code'] == 'InvalidNetworkAclID.NotFound':
- module.fail_json(msg='Unable to describe ACL. NetworkAcl does not exist')
- module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids))
- except BotoCoreError as e:
- module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids))
-
- # Turn the boto3 result in to ansible_friendly_snaked_names
- snaked_nacls = []
- for nacl in nacls['NetworkAcls']:
- snaked_nacls.append(camel_dict_to_snake_dict(nacl))
-
- # Turn the boto3 result in to ansible friendly tag dictionary
- for nacl in snaked_nacls:
- if 'tags' in nacl:
- nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'], 'key', 'value')
- if 'entries' in nacl:
- nacl['egress'] = [nacl_entry_to_list(entry) for entry in nacl['entries']
- if entry['rule_number'] < 32767 and entry['egress']]
- nacl['ingress'] = [nacl_entry_to_list(entry) for entry in nacl['entries']
- if entry['rule_number'] < 32767 and not entry['egress']]
- del nacl['entries']
- if 'associations' in nacl:
- nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']]
- del nacl['associations']
- if 'network_acl_id' in nacl:
- nacl['nacl_id'] = nacl['network_acl_id']
- del nacl['network_acl_id']
-
- module.exit_json(nacls=snaked_nacls)
-
-
-def nacl_entry_to_list(entry):
-
- # entry list format
- # [ rule_num, protocol name or number, allow or deny, ipv4/6 cidr, icmp type, icmp code, port from, port to]
- elist = []
-
- elist.append(entry['rule_number'])
-
- if entry.get('protocol') in PROTOCOL_NAMES:
- elist.append(PROTOCOL_NAMES[entry['protocol']])
- else:
- elist.append(entry.get('protocol'))
-
- elist.append(entry['rule_action'])
-
- if entry.get('cidr_block'):
- elist.append(entry['cidr_block'])
- elif entry.get('ipv6_cidr_block'):
- elist.append(entry['ipv6_cidr_block'])
- else:
- elist.append(None)
-
- elist = elist + [None, None, None, None]
-
- if entry['protocol'] in ('1', '58'):
- elist[4] = entry.get('icmp_type_code', {}).get('type')
- elist[5] = entry.get('icmp_type_code', {}).get('code')
-
- if entry['protocol'] not in ('1', '6', '17', '58'):
- elist[6] = 0
- elist[7] = 65535
- elif 'port_range' in entry:
- elist[6] = entry['port_range']['from']
- elist[7] = entry['port_range']['to']
-
- return elist
-
-
-def main():
-
- argument_spec = dict(
- nacl_ids=dict(default=[], type='list', aliases=['nacl_id']),
- filters=dict(default={}, type='dict'))
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._name == 'ec2_vpc_nacl_facts':
- module.deprecate("The 'ec2_vpc_nacl_facts' module has been renamed to 'ec2_vpc_nacl_info'", version='2.13')
-
- connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
-
- list_ec2_vpc_nacls(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py
deleted file mode 100644
index 7598b23266..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py
+++ /dev/null
@@ -1,1020 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_nat_gateway
-short_description: Manage AWS VPC NAT Gateways.
-description:
- - Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids.
-version_added: "2.2"
-requirements: [boto3, botocore]
-options:
- state:
- description:
- - Ensure NAT Gateway is present or absent.
- default: "present"
- choices: ["present", "absent"]
- type: str
- nat_gateway_id:
- description:
- - The id AWS dynamically allocates to the NAT Gateway on creation.
- This is required when the absent option is present.
- type: str
- subnet_id:
- description:
- - The id of the subnet to create the NAT Gateway in. This is required
- with the present option.
- type: str
- allocation_id:
- description:
- - The id of the elastic IP allocation. If this is not passed and the
- eip_address is not passed. An EIP is generated for this NAT Gateway.
- type: str
- eip_address:
- description:
- - The elastic IP address of the EIP you want attached to this NAT Gateway.
- If this is not passed and the allocation_id is not passed,
- an EIP is generated for this NAT Gateway.
- type: str
- if_exist_do_not_create:
- description:
- - if a NAT Gateway exists already in the subnet_id, then do not create a new one.
- required: false
- default: false
- type: bool
- release_eip:
- description:
- - Deallocate the EIP from the VPC.
- - Option is only valid with the absent state.
- - You should use this with the wait option. Since you can not release an address while a delete operation is happening.
- default: false
- type: bool
- wait:
- description:
- - Wait for operation to complete before returning.
- default: false
- type: bool
- wait_timeout:
- description:
- - How many seconds to wait for an operation to complete before timing out.
- default: 320
- type: int
- client_token:
- description:
- - Optional unique token to be used during create to ensure idempotency.
- When specifying this option, ensure you specify the eip_address parameter
- as well otherwise any subsequent runs will fail.
- type: str
-author:
- - Allen Sanabria (@linuxdynasty)
- - Jon Hadfield (@jonhadfield)
- - Karen Cheng (@Etherdaemon)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Create new nat gateway with client token.
- ec2_vpc_nat_gateway:
- state: present
- subnet_id: subnet-12345678
- eip_address: 52.1.1.1
- region: ap-southeast-2
- client_token: abcd-12345678
- register: new_nat_gateway
-
-- name: Create new nat gateway using an allocation-id.
- ec2_vpc_nat_gateway:
- state: present
- subnet_id: subnet-12345678
- allocation_id: eipalloc-12345678
- region: ap-southeast-2
- register: new_nat_gateway
-
-- name: Create new nat gateway, using an EIP address and wait for available status.
- ec2_vpc_nat_gateway:
- state: present
- subnet_id: subnet-12345678
- eip_address: 52.1.1.1
- wait: true
- region: ap-southeast-2
- register: new_nat_gateway
-
-- name: Create new nat gateway and allocate new EIP.
- ec2_vpc_nat_gateway:
- state: present
- subnet_id: subnet-12345678
- wait: true
- region: ap-southeast-2
- register: new_nat_gateway
-
-- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet.
- ec2_vpc_nat_gateway:
- state: present
- subnet_id: subnet-12345678
- wait: true
- region: ap-southeast-2
- if_exist_do_not_create: true
- register: new_nat_gateway
-
-- name: Delete nat gateway using discovered nat gateways from facts module.
- ec2_vpc_nat_gateway:
- state: absent
- region: ap-southeast-2
- wait: true
- nat_gateway_id: "{{ item.NatGatewayId }}"
- release_eip: true
- register: delete_nat_gateway_result
- loop: "{{ gateways_to_remove.result }}"
-
-- name: Delete nat gateway and wait for deleted status.
- ec2_vpc_nat_gateway:
- state: absent
- nat_gateway_id: nat-12345678
- wait: true
- wait_timeout: 500
- region: ap-southeast-2
-
-- name: Delete nat gateway and release EIP.
- ec2_vpc_nat_gateway:
- state: absent
- nat_gateway_id: nat-12345678
- release_eip: true
- wait: yes
- wait_timeout: 300
- region: ap-southeast-2
-'''
-
-RETURN = '''
-create_time:
- description: The ISO 8601 date time format in UTC.
- returned: In all cases.
- type: str
- sample: "2016-03-05T05:19:20.282000+00:00'"
-nat_gateway_id:
- description: id of the VPC NAT Gateway
- returned: In all cases.
- type: str
- sample: "nat-0d1e3a878585988f8"
-subnet_id:
- description: id of the Subnet
- returned: In all cases.
- type: str
- sample: "subnet-12345"
-state:
- description: The current state of the NAT Gateway.
- returned: In all cases.
- type: str
- sample: "available"
-vpc_id:
- description: id of the VPC.
- returned: In all cases.
- type: str
- sample: "vpc-12345"
-nat_gateway_addresses:
- description: List of dictionaries containing the public_ip, network_interface_id, private_ip, and allocation_id.
- returned: In all cases.
- type: str
- sample: [
- {
- 'public_ip': '52.52.52.52',
- 'network_interface_id': 'eni-12345',
- 'private_ip': '10.0.0.100',
- 'allocation_id': 'eipalloc-12345'
- }
- ]
-'''
-
-import datetime
-import random
-import time
-
-try:
- import botocore
-except ImportError:
- pass # caught by imported HAS_BOTO3
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
- camel_dict_to_snake_dict, HAS_BOTO3)
-
-
-DRY_RUN_GATEWAYS = [
- {
- "nat_gateway_id": "nat-123456789",
- "subnet_id": "subnet-123456789",
- "nat_gateway_addresses": [
- {
- "public_ip": "55.55.55.55",
- "network_interface_id": "eni-1234567",
- "private_ip": "10.0.0.102",
- "allocation_id": "eipalloc-1234567"
- }
- ],
- "state": "available",
- "create_time": "2016-03-05T05:19:20.282000+00:00",
- "vpc_id": "vpc-12345678"
- }
-]
-
-DRY_RUN_ALLOCATION_UNCONVERTED = {
- 'Addresses': [
- {
- 'PublicIp': '55.55.55.55',
- 'Domain': 'vpc',
- 'AllocationId': 'eipalloc-1234567'
- }
- ]
-}
-
-DRY_RUN_MSGS = 'DryRun Mode:'
-
-
-def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None,
- states=None, check_mode=False):
- """Retrieve a list of NAT Gateways
- Args:
- client (botocore.client.EC2): Boto3 client
-
- Kwargs:
- subnet_id (str): The subnet_id the nat resides in.
- nat_gateway_id (str): The Amazon nat id.
- states (list): States available (pending, failed, available, deleting, and deleted)
- default=None
-
- Basic Usage:
- >>> client = boto3.client('ec2')
- >>> subnet_id = 'subnet-12345678'
- >>> get_nat_gateways(client, subnet_id)
- [
- true,
- "",
- {
- "nat_gateway_id": "nat-123456789",
- "subnet_id": "subnet-123456789",
- "nat_gateway_addresses": [
- {
- "public_ip": "55.55.55.55",
- "network_interface_id": "eni-1234567",
- "private_ip": "10.0.0.102",
- "allocation_id": "eipalloc-1234567"
- }
- ],
- "state": "deleted",
- "create_time": "2016-03-05T00:33:21.209000+00:00",
- "delete_time": "2016-03-05T00:36:37.329000+00:00",
- "vpc_id": "vpc-12345678"
- }
-
- Returns:
- Tuple (bool, str, list)
- """
- params = dict()
- err_msg = ""
- gateways_retrieved = False
- existing_gateways = list()
- if not states:
- states = ['available', 'pending']
- if nat_gateway_id:
- params['NatGatewayIds'] = [nat_gateway_id]
- else:
- params['Filter'] = [
- {
- 'Name': 'subnet-id',
- 'Values': [subnet_id]
- },
- {
- 'Name': 'state',
- 'Values': states
- }
- ]
-
- try:
- if not check_mode:
- gateways = client.describe_nat_gateways(**params)['NatGateways']
- if gateways:
- for gw in gateways:
- existing_gateways.append(camel_dict_to_snake_dict(gw))
- gateways_retrieved = True
- else:
- gateways_retrieved = True
- if nat_gateway_id:
- if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id:
- existing_gateways = DRY_RUN_GATEWAYS
- elif subnet_id:
- if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id:
- existing_gateways = DRY_RUN_GATEWAYS
- err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS)
-
- except botocore.exceptions.ClientError as e:
- err_msg = str(e)
-
- return gateways_retrieved, err_msg, existing_gateways
-
-
-def wait_for_status(client, wait_timeout, nat_gateway_id, status,
- check_mode=False):
- """Wait for the NAT Gateway to reach a status
- Args:
- client (botocore.client.EC2): Boto3 client
- wait_timeout (int): Number of seconds to wait, until this timeout is reached.
- nat_gateway_id (str): The Amazon nat id.
- status (str): The status to wait for.
- examples. status=available, status=deleted
-
- Basic Usage:
- >>> client = boto3.client('ec2')
- >>> subnet_id = 'subnet-12345678'
- >>> allocation_id = 'eipalloc-12345678'
- >>> wait_for_status(client, subnet_id, allocation_id)
- [
- true,
- "",
- {
- "nat_gateway_id": "nat-123456789",
- "subnet_id": "subnet-1234567",
- "nat_gateway_addresses": [
- {
- "public_ip": "55.55.55.55",
- "network_interface_id": "eni-1234567",
- "private_ip": "10.0.0.102",
- "allocation_id": "eipalloc-12345678"
- }
- ],
- "state": "deleted",
- "create_time": "2016-03-05T00:33:21.209000+00:00",
- "delete_time": "2016-03-05T00:36:37.329000+00:00",
- "vpc_id": "vpc-12345677"
- }
- ]
-
- Returns:
- Tuple (bool, str, dict)
- """
- polling_increment_secs = 5
- wait_timeout = time.time() + wait_timeout
- status_achieved = False
- nat_gateway = dict()
- states = ['pending', 'failed', 'available', 'deleting', 'deleted']
- err_msg = ""
-
- while wait_timeout > time.time():
- try:
- gws_retrieved, err_msg, nat_gateways = (
- get_nat_gateways(
- client, nat_gateway_id=nat_gateway_id,
- states=states, check_mode=check_mode
- )
- )
- if gws_retrieved and nat_gateways:
- nat_gateway = nat_gateways[0]
- if check_mode:
- nat_gateway['state'] = status
-
- if nat_gateway.get('state') == status:
- status_achieved = True
- break
-
- elif nat_gateway.get('state') == 'failed':
- err_msg = nat_gateway.get('failure_message')
- break
-
- elif nat_gateway.get('state') == 'pending':
- if 'failure_message' in nat_gateway:
- err_msg = nat_gateway.get('failure_message')
- status_achieved = False
- break
-
- else:
- time.sleep(polling_increment_secs)
-
- except botocore.exceptions.ClientError as e:
- err_msg = str(e)
-
- if not status_achieved:
- err_msg = "Wait time out reached, while waiting for results"
-
- return status_achieved, err_msg, nat_gateway
-
-
-def gateway_in_subnet_exists(client, subnet_id, allocation_id=None,
- check_mode=False):
- """Retrieve all NAT Gateways for a subnet.
- Args:
- subnet_id (str): The subnet_id the nat resides in.
-
- Kwargs:
- allocation_id (str): The EIP Amazon identifier.
- default = None
-
- Basic Usage:
- >>> client = boto3.client('ec2')
- >>> subnet_id = 'subnet-1234567'
- >>> allocation_id = 'eipalloc-1234567'
- >>> gateway_in_subnet_exists(client, subnet_id, allocation_id)
- (
- [
- {
- "nat_gateway_id": "nat-123456789",
- "subnet_id": "subnet-123456789",
- "nat_gateway_addresses": [
- {
- "public_ip": "55.55.55.55",
- "network_interface_id": "eni-1234567",
- "private_ip": "10.0.0.102",
- "allocation_id": "eipalloc-1234567"
- }
- ],
- "state": "deleted",
- "create_time": "2016-03-05T00:33:21.209000+00:00",
- "delete_time": "2016-03-05T00:36:37.329000+00:00",
- "vpc_id": "vpc-1234567"
- }
- ],
- False
- )
-
- Returns:
- Tuple (list, bool)
- """
- allocation_id_exists = False
- gateways = []
- states = ['available', 'pending']
- gws_retrieved, err_msg, gws = (
- get_nat_gateways(
- client, subnet_id, states=states, check_mode=check_mode
- )
- )
- if not gws_retrieved:
- return gateways, allocation_id_exists
- for gw in gws:
- for address in gw['nat_gateway_addresses']:
- if allocation_id:
- if address.get('allocation_id') == allocation_id:
- allocation_id_exists = True
- gateways.append(gw)
- else:
- gateways.append(gw)
-
- return gateways, allocation_id_exists
-
-
-def get_eip_allocation_id_by_address(client, eip_address, check_mode=False):
- """Release an EIP from your EIP Pool
- Args:
- client (botocore.client.EC2): Boto3 client
- eip_address (str): The Elastic IP Address of the EIP.
-
- Kwargs:
- check_mode (bool): if set to true, do not run anything and
- falsify the results.
-
- Basic Usage:
- >>> client = boto3.client('ec2')
- >>> eip_address = '52.87.29.36'
- >>> get_eip_allocation_id_by_address(client, eip_address)
- 'eipalloc-36014da3'
-
- Returns:
- Tuple (str, str)
- """
- params = {
- 'PublicIps': [eip_address],
- }
- allocation_id = None
- err_msg = ""
- try:
- if not check_mode:
- allocations = client.describe_addresses(**params)['Addresses']
- if len(allocations) == 1:
- allocation = allocations[0]
- else:
- allocation = None
- else:
- dry_run_eip = (
- DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp']
- )
- if dry_run_eip == eip_address:
- allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]
- else:
- allocation = None
- if allocation:
- if allocation.get('Domain') != 'vpc':
- err_msg = (
- "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP"
- .format(eip_address)
- )
- else:
- allocation_id = allocation.get('AllocationId')
- else:
- err_msg = (
- "EIP {0} does not exist".format(eip_address)
- )
-
- except botocore.exceptions.ClientError as e:
- err_msg = str(e)
-
- return allocation_id, err_msg
-
-
-def allocate_eip_address(client, check_mode=False):
- """Release an EIP from your EIP Pool
- Args:
- client (botocore.client.EC2): Boto3 client
-
- Kwargs:
- check_mode (bool): if set to true, do not run anything and
- falsify the results.
-
- Basic Usage:
- >>> client = boto3.client('ec2')
- >>> allocate_eip_address(client)
- True
-
- Returns:
- Tuple (bool, str)
- """
- ip_allocated = False
- new_eip = None
- err_msg = ''
- params = {
- 'Domain': 'vpc',
- }
- try:
- if check_mode:
- ip_allocated = True
- random_numbers = (
- ''.join(str(x) for x in random.sample(range(0, 9), 7))
- )
- new_eip = 'eipalloc-{0}'.format(random_numbers)
- else:
- new_eip = client.allocate_address(**params)['AllocationId']
- ip_allocated = True
- err_msg = 'eipalloc id {0} created'.format(new_eip)
-
- except botocore.exceptions.ClientError as e:
- err_msg = str(e)
-
- return ip_allocated, err_msg, new_eip
-
-
-def release_address(client, allocation_id, check_mode=False):
- """Release an EIP from your EIP Pool
- Args:
- client (botocore.client.EC2): Boto3 client
- allocation_id (str): The eip Amazon identifier.
-
- Kwargs:
- check_mode (bool): if set to true, do not run anything and
- falsify the results.
-
- Basic Usage:
- >>> client = boto3.client('ec2')
- >>> allocation_id = "eipalloc-123456"
- >>> release_address(client, allocation_id)
- True
-
- Returns:
- Boolean, string
- """
- err_msg = ''
- if check_mode:
- return True, ''
-
- ip_released = False
- try:
- client.describe_addresses(AllocationIds=[allocation_id])
- except botocore.exceptions.ClientError as e:
- # IP address likely already released
- # Happens with gateway in 'deleted' state that
- # still lists associations
- return True, str(e)
- try:
- client.release_address(AllocationId=allocation_id)
- ip_released = True
- except botocore.exceptions.ClientError as e:
- err_msg = str(e)
-
- return ip_released, err_msg
-
-
-def create(client, subnet_id, allocation_id, client_token=None,
- wait=False, wait_timeout=0, if_exist_do_not_create=False,
- check_mode=False):
- """Create an Amazon NAT Gateway.
- Args:
- client (botocore.client.EC2): Boto3 client
- subnet_id (str): The subnet_id the nat resides in.
- allocation_id (str): The eip Amazon identifier.
-
- Kwargs:
- if_exist_do_not_create (bool): if a nat gateway already exists in this
- subnet, than do not create another one.
- default = False
- wait (bool): Wait for the nat to be in the deleted state before returning.
- default = False
- wait_timeout (int): Number of seconds to wait, until this timeout is reached.
- default = 0
- client_token (str):
- default = None
-
- Basic Usage:
- >>> client = boto3.client('ec2')
- >>> subnet_id = 'subnet-1234567'
- >>> allocation_id = 'eipalloc-1234567'
- >>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
- [
- true,
- "",
- {
- "nat_gateway_id": "nat-123456789",
- "subnet_id": "subnet-1234567",
- "nat_gateway_addresses": [
- {
- "public_ip": "55.55.55.55",
- "network_interface_id": "eni-1234567",
- "private_ip": "10.0.0.102",
- "allocation_id": "eipalloc-1234567"
- }
- ],
- "state": "deleted",
- "create_time": "2016-03-05T00:33:21.209000+00:00",
- "delete_time": "2016-03-05T00:36:37.329000+00:00",
- "vpc_id": "vpc-1234567"
- }
- ]
-
- Returns:
- Tuple (bool, str, list)
- """
- params = {
- 'SubnetId': subnet_id,
- 'AllocationId': allocation_id
- }
- request_time = datetime.datetime.utcnow()
- changed = False
- success = False
- token_provided = False
- err_msg = ""
-
- if client_token:
- token_provided = True
- params['ClientToken'] = client_token
-
- try:
- if not check_mode:
- result = camel_dict_to_snake_dict(client.create_nat_gateway(**params)["NatGateway"])
- else:
- result = DRY_RUN_GATEWAYS[0]
- result['create_time'] = datetime.datetime.utcnow()
- result['nat_gateway_addresses'][0]['allocation_id'] = allocation_id
- result['subnet_id'] = subnet_id
-
- success = True
- changed = True
- create_time = result['create_time'].replace(tzinfo=None)
- if token_provided and (request_time > create_time):
- changed = False
- elif wait:
- success, err_msg, result = (
- wait_for_status(
- client, wait_timeout, result['nat_gateway_id'], 'available',
- check_mode=check_mode
- )
- )
- if success:
- err_msg = (
- 'NAT gateway {0} created'.format(result['nat_gateway_id'])
- )
-
- except botocore.exceptions.ClientError as e:
- if "IdempotentParameterMismatch" in e.message:
- err_msg = (
- 'NAT Gateway does not support update and token has already been provided: ' + str(e)
- )
- else:
- err_msg = str(e)
- success = False
- changed = False
- result = None
-
- return success, changed, err_msg, result
-
-
-def pre_create(client, subnet_id, allocation_id=None, eip_address=None,
- if_exist_do_not_create=False, wait=False, wait_timeout=0,
- client_token=None, check_mode=False):
- """Create an Amazon NAT Gateway.
- Args:
- client (botocore.client.EC2): Boto3 client
- subnet_id (str): The subnet_id the nat resides in.
-
- Kwargs:
- allocation_id (str): The EIP Amazon identifier.
- default = None
- eip_address (str): The Elastic IP Address of the EIP.
- default = None
- if_exist_do_not_create (bool): if a nat gateway already exists in this
- subnet, than do not create another one.
- default = False
- wait (bool): Wait for the nat to be in the deleted state before returning.
- default = False
- wait_timeout (int): Number of seconds to wait, until this timeout is reached.
- default = 0
- client_token (str):
- default = None
-
- Basic Usage:
- >>> client = boto3.client('ec2')
- >>> subnet_id = 'subnet-w4t12897'
- >>> allocation_id = 'eipalloc-36014da3'
- >>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
- [
- true,
- "",
- {
- "nat_gateway_id": "nat-03835afb6e31df79b",
- "subnet_id": "subnet-w4t12897",
- "nat_gateway_addresses": [
- {
- "public_ip": "52.87.29.36",
- "network_interface_id": "eni-5579742d",
- "private_ip": "10.0.0.102",
- "allocation_id": "eipalloc-36014da3"
- }
- ],
- "state": "deleted",
- "create_time": "2016-03-05T00:33:21.209000+00:00",
- "delete_time": "2016-03-05T00:36:37.329000+00:00",
- "vpc_id": "vpc-w68571b5"
- }
- ]
-
- Returns:
- Tuple (bool, bool, str, list)
- """
- success = False
- changed = False
- err_msg = ""
- results = list()
-
- if not allocation_id and not eip_address:
- existing_gateways, allocation_id_exists = (
- gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)
- )
-
- if len(existing_gateways) > 0 and if_exist_do_not_create:
- success = True
- changed = False
- results = existing_gateways[0]
- err_msg = (
- 'NAT Gateway {0} already exists in subnet_id {1}'
- .format(
- existing_gateways[0]['nat_gateway_id'], subnet_id
- )
- )
- return success, changed, err_msg, results
- else:
- success, err_msg, allocation_id = (
- allocate_eip_address(client, check_mode=check_mode)
- )
- if not success:
- return success, 'False', err_msg, dict()
-
- elif eip_address or allocation_id:
- if eip_address and not allocation_id:
- allocation_id, err_msg = (
- get_eip_allocation_id_by_address(
- client, eip_address, check_mode=check_mode
- )
- )
- if not allocation_id:
- success = False
- changed = False
- return success, changed, err_msg, dict()
-
- existing_gateways, allocation_id_exists = (
- gateway_in_subnet_exists(
- client, subnet_id, allocation_id, check_mode=check_mode
- )
- )
- if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create):
- success = True
- changed = False
- results = existing_gateways[0]
- err_msg = (
- 'NAT Gateway {0} already exists in subnet_id {1}'
- .format(
- existing_gateways[0]['nat_gateway_id'], subnet_id
- )
- )
- return success, changed, err_msg, results
-
- success, changed, err_msg, results = create(
- client, subnet_id, allocation_id, client_token,
- wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode
- )
-
- return success, changed, err_msg, results
-
-
-def remove(client, nat_gateway_id, wait=False, wait_timeout=0,
- release_eip=False, check_mode=False):
- """Delete an Amazon NAT Gateway.
- Args:
- client (botocore.client.EC2): Boto3 client
- nat_gateway_id (str): The Amazon nat id.
-
- Kwargs:
- wait (bool): Wait for the nat to be in the deleted state before returning.
- wait_timeout (int): Number of seconds to wait, until this timeout is reached.
- release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc.
-
- Basic Usage:
- >>> client = boto3.client('ec2')
- >>> nat_gw_id = 'nat-03835afb6e31df79b'
- >>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True)
- [
- true,
- "",
- {
- "nat_gateway_id": "nat-03835afb6e31df79b",
- "subnet_id": "subnet-w4t12897",
- "nat_gateway_addresses": [
- {
- "public_ip": "52.87.29.36",
- "network_interface_id": "eni-5579742d",
- "private_ip": "10.0.0.102",
- "allocation_id": "eipalloc-36014da3"
- }
- ],
- "state": "deleted",
- "create_time": "2016-03-05T00:33:21.209000+00:00",
- "delete_time": "2016-03-05T00:36:37.329000+00:00",
- "vpc_id": "vpc-w68571b5"
- }
- ]
-
- Returns:
- Tuple (bool, str, list)
- """
- params = {
- 'NatGatewayId': nat_gateway_id
- }
- success = False
- changed = False
- err_msg = ""
- results = list()
- states = ['pending', 'available']
- try:
- exist, err_msg, gw = (
- get_nat_gateways(
- client, nat_gateway_id=nat_gateway_id,
- states=states, check_mode=check_mode
- )
- )
- if exist and len(gw) == 1:
- results = gw[0]
- if not check_mode:
- client.delete_nat_gateway(**params)
-
- allocation_id = (
- results['nat_gateway_addresses'][0]['allocation_id']
- )
- changed = True
- success = True
- err_msg = (
- 'NAT gateway {0} is in a deleting state. Delete was successful'
- .format(nat_gateway_id)
- )
-
- if wait:
- status_achieved, err_msg, results = (
- wait_for_status(
- client, wait_timeout, nat_gateway_id, 'deleted',
- check_mode=check_mode
- )
- )
- if status_achieved:
- err_msg = (
- 'NAT gateway {0} was deleted successfully'
- .format(nat_gateway_id)
- )
-
- except botocore.exceptions.ClientError as e:
- err_msg = str(e)
-
- if release_eip:
- eip_released, eip_err = (
- release_address(client, allocation_id, check_mode)
- )
- if not eip_released:
- err_msg = (
- "{0}: Failed to release EIP {1}: {2}"
- .format(err_msg, allocation_id, eip_err)
- )
- success = False
-
- return success, changed, err_msg, results
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- subnet_id=dict(type='str'),
- eip_address=dict(type='str'),
- allocation_id=dict(type='str'),
- if_exist_do_not_create=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent']),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=320, required=False),
- release_eip=dict(type='bool', default=False),
- nat_gateway_id=dict(type='str'),
- client_token=dict(type='str'),
- )
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[
- ['allocation_id', 'eip_address']
- ],
- required_if=[['state', 'absent', ['nat_gateway_id']],
- ['state', 'present', ['subnet_id']]]
- )
-
- # Validate Requirements
- if not HAS_BOTO3:
- module.fail_json(msg='botocore/boto3 is required.')
-
- state = module.params.get('state').lower()
- check_mode = module.check_mode
- subnet_id = module.params.get('subnet_id')
- allocation_id = module.params.get('allocation_id')
- eip_address = module.params.get('eip_address')
- nat_gateway_id = module.params.get('nat_gateway_id')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- release_eip = module.params.get('release_eip')
- client_token = module.params.get('client_token')
- if_exist_do_not_create = module.params.get('if_exist_do_not_create')
-
- try:
- region, ec2_url, aws_connect_kwargs = (
- get_aws_connection_info(module, boto3=True)
- )
- client = (
- boto3_conn(
- module, conn_type='client', resource='ec2',
- region=region, endpoint=ec2_url, **aws_connect_kwargs
- )
- )
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
-
- changed = False
- err_msg = ''
-
- if state == 'present':
- success, changed, err_msg, results = (
- pre_create(
- client, subnet_id, allocation_id, eip_address,
- if_exist_do_not_create, wait, wait_timeout,
- client_token, check_mode=check_mode
- )
- )
- else:
- success, changed, err_msg, results = (
- remove(
- client, nat_gateway_id, wait, wait_timeout, release_eip,
- check_mode=check_mode
- )
- )
-
- if not success:
- module.fail_json(
- msg=err_msg, success=success, changed=changed
- )
- else:
- module.exit_json(
- msg=err_msg, success=success, changed=changed, **results
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway_info.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway_info.py
deleted file mode 100644
index 6ecb27b588..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway_info.py
+++ /dev/null
@@ -1,156 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
-module: ec2_vpc_nat_gateway_info
-short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods.
-description:
- - Gets various details related to AWS VPC Managed Nat Gateways
- - This module was called C(ec2_vpc_nat_gateway_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.3"
-requirements: [ boto3 ]
-options:
- nat_gateway_ids:
- description:
- - List of specific nat gateway IDs to fetch details for.
- type: list
- elements: str
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNatGateways.html)
- for possible filters.
- type: dict
-author: Karen Cheng (@Etherdaemon)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Simple example of listing all nat gateways
-- name: List all managed nat gateways in ap-southeast-2
- ec2_vpc_nat_gateway_info:
- region: ap-southeast-2
- register: all_ngws
-
-- name: Debugging the result
- debug:
- msg: "{{ all_ngws.result }}"
-
-- name: Get details on specific nat gateways
- ec2_vpc_nat_gateway_info:
- nat_gateway_ids:
- - nat-1234567891234567
- - nat-7654321987654321
- region: ap-southeast-2
- register: specific_ngws
-
-- name: Get all nat gateways with specific filters
- ec2_vpc_nat_gateway_info:
- region: ap-southeast-2
- filters:
- state: ['pending']
- register: pending_ngws
-
-- name: Get nat gateways with specific filter
- ec2_vpc_nat_gateway_info:
- region: ap-southeast-2
- filters:
- subnet-id: subnet-12345678
- state: ['available']
- register: existing_nat_gateways
-'''
-
-RETURN = '''
-result:
- description: The result of the describe, converted to ansible snake case style.
- See http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways for the response.
- returned: success
- type: list
-'''
-
-import json
-
-try:
- import botocore
-except ImportError:
- pass # will be detected by imported HAS_BOTO3
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
- camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, HAS_BOTO3)
-
-
-def date_handler(obj):
- return obj.isoformat() if hasattr(obj, 'isoformat') else obj
-
-
-def get_nat_gateways(client, module, nat_gateway_id=None):
- params = dict()
- nat_gateways = list()
-
- params['Filter'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
- params['NatGatewayIds'] = module.params.get('nat_gateway_ids')
-
- try:
- result = json.loads(json.dumps(client.describe_nat_gateways(**params), default=date_handler))
- except Exception as e:
- module.fail_json(msg=str(e.message))
-
- for gateway in result['NatGateways']:
- # Turn the boto3 result into ansible_friendly_snaked_names
- converted_gateway = camel_dict_to_snake_dict(gateway)
- if 'tags' in converted_gateway:
- # Turn the boto3 result into ansible friendly tag dictionary
- converted_gateway['tags'] = boto3_tag_list_to_ansible_dict(converted_gateway['tags'])
-
- nat_gateways.append(converted_gateway)
-
- return nat_gateways
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- filters=dict(default={}, type='dict'),
- nat_gateway_ids=dict(default=[], type='list'),
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
- if module._name == 'ec2_vpc_nat_gateway_facts':
- module.deprecate("The 'ec2_vpc_nat_gateway_facts' module has been renamed to 'ec2_vpc_nat_gateway_info'", version='2.13')
-
- # Validate Requirements
- if not HAS_BOTO3:
- module.fail_json(msg='botocore/boto3 is required.')
-
- try:
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
- if region:
- connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
- else:
- module.fail_json(msg="region must be specified")
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg=str(e))
-
- results = get_nat_gateways(connection, module)
-
- module.exit_json(result=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py
deleted file mode 100644
index fff0a97528..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py
+++ /dev/null
@@ -1,448 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
-module: ec2_vpc_peer
-short_description: create, delete, accept, and reject VPC peering connections between two VPCs.
-description:
- - Read the AWS documentation for VPC Peering Connections
- U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html).
-version_added: "2.2"
-options:
- vpc_id:
- description:
- - VPC id of the requesting VPC.
- required: false
- type: str
- peering_id:
- description:
- - Peering connection id.
- required: false
- type: str
- peer_region:
- description:
- - Region of the accepting VPC.
- required: false
- version_added: '2.5'
- type: str
- peer_vpc_id:
- description:
- - VPC id of the accepting VPC.
- required: false
- type: str
- peer_owner_id:
- description:
- - The AWS account number for cross account peering.
- required: false
- type: str
- tags:
- description:
- - Dictionary of tags to look for and apply when creating a Peering Connection.
- required: false
- type: dict
- state:
- description:
- - Create, delete, accept, reject a peering connection.
- required: false
- default: present
- choices: ['present', 'absent', 'accept', 'reject']
- type: str
-author: Mike Mochan (@mmochan)
-extends_documentation_fragment:
- - aws
- - ec2
-requirements: [ botocore, boto3, json ]
-'''
-
-EXAMPLES = '''
-# Complete example to create and accept a local peering connection.
-- name: Create local account VPC peering Connection
- ec2_vpc_peer:
- region: ap-southeast-2
- vpc_id: vpc-12345678
- peer_vpc_id: vpc-87654321
- state: present
- tags:
- Name: Peering connection for VPC 21 to VPC 22
- CostCode: CC1234
- Project: phoenix
- register: vpc_peer
-
-- name: Accept local VPC peering request
- ec2_vpc_peer:
- region: ap-southeast-2
- peering_id: "{{ vpc_peer.peering_id }}"
- state: accept
- register: action_peer
-
-# Complete example to delete a local peering connection.
-- name: Create local account VPC peering Connection
- ec2_vpc_peer:
- region: ap-southeast-2
- vpc_id: vpc-12345678
- peer_vpc_id: vpc-87654321
- state: present
- tags:
- Name: Peering connection for VPC 21 to VPC 22
- CostCode: CC1234
- Project: phoenix
- register: vpc_peer
-
-- name: delete a local VPC peering Connection
- ec2_vpc_peer:
- region: ap-southeast-2
- peering_id: "{{ vpc_peer.peering_id }}"
- state: absent
- register: vpc_peer
-
- # Complete example to create and accept a cross account peering connection.
-- name: Create cross account VPC peering Connection
- ec2_vpc_peer:
- region: ap-southeast-2
- vpc_id: vpc-12345678
- peer_vpc_id: vpc-12345678
- peer_owner_id: 123456789102
- state: present
- tags:
- Name: Peering connection for VPC 21 to VPC 22
- CostCode: CC1234
- Project: phoenix
- register: vpc_peer
-
-- name: Accept peering connection from remote account
- ec2_vpc_peer:
- region: ap-southeast-2
- peering_id: "{{ vpc_peer.peering_id }}"
- profile: bot03_profile_for_cross_account
- state: accept
- register: vpc_peer
-
-# Complete example to create and accept an intra-region peering connection.
-- name: Create intra-region VPC peering Connection
- ec2_vpc_peer:
- region: us-east-1
- vpc_id: vpc-12345678
- peer_vpc_id: vpc-87654321
- peer_region: us-west-2
- state: present
- tags:
- Name: Peering connection for us-east-1 VPC to us-west-2 VPC
- CostCode: CC1234
- Project: phoenix
- register: vpc_peer
-
-- name: Accept peering connection from peer region
- ec2_vpc_peer:
- region: us-west-2
- peering_id: "{{ vpc_peer.peering_id }}"
- state: accept
- register: vpc_peer
-
-# Complete example to create and reject a local peering connection.
-- name: Create local account VPC peering Connection
- ec2_vpc_peer:
- region: ap-southeast-2
- vpc_id: vpc-12345678
- peer_vpc_id: vpc-87654321
- state: present
- tags:
- Name: Peering connection for VPC 21 to VPC 22
- CostCode: CC1234
- Project: phoenix
- register: vpc_peer
-
-- name: Reject a local VPC peering Connection
- ec2_vpc_peer:
- region: ap-southeast-2
- peering_id: "{{ vpc_peer.peering_id }}"
- state: reject
-
-# Complete example to create and accept a cross account peering connection.
-- name: Create cross account VPC peering Connection
- ec2_vpc_peer:
- region: ap-southeast-2
- vpc_id: vpc-12345678
- peer_vpc_id: vpc-12345678
- peer_owner_id: 123456789102
- state: present
- tags:
- Name: Peering connection for VPC 21 to VPC 22
- CostCode: CC1234
- Project: phoenix
- register: vpc_peer
-
-- name: Accept a cross account VPC peering connection request
- ec2_vpc_peer:
- region: ap-southeast-2
- peering_id: "{{ vpc_peer.peering_id }}"
- profile: bot03_profile_for_cross_account
- state: accept
- tags:
- Name: Peering connection for VPC 21 to VPC 22
- CostCode: CC1234
- Project: phoenix
-
-# Complete example to create and reject a cross account peering connection.
-- name: Create cross account VPC peering Connection
- ec2_vpc_peer:
- region: ap-southeast-2
- vpc_id: vpc-12345678
- peer_vpc_id: vpc-12345678
- peer_owner_id: 123456789102
- state: present
- tags:
- Name: Peering connection for VPC 21 to VPC 22
- CostCode: CC1234
- Project: phoenix
- register: vpc_peer
-
-- name: Reject a cross account VPC peering Connection
- ec2_vpc_peer:
- region: ap-southeast-2
- peering_id: "{{ vpc_peer.peering_id }}"
- profile: bot03_profile_for_cross_account
- state: reject
-
-'''
-RETURN = '''
-task:
- description: The result of the create, accept, reject or delete action.
- returned: success
- type: dict
-'''
-
-try:
- import botocore
-except ImportError:
- pass # caught by imported HAS_BOTO3
-
-import distutils.version
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info, HAS_BOTO3
-from ansible.module_utils.aws.core import is_boto3_error_code
-
-
-def tags_changed(pcx_id, client, module):
- changed = False
- tags = dict()
- if module.params.get('tags'):
- tags = module.params.get('tags')
- pcx = find_pcx_by_id(pcx_id, client, module)
- if pcx['VpcPeeringConnections']:
- pcx_values = [t.values() for t in pcx['VpcPeeringConnections'][0]['Tags']]
- pcx_tags = [item for sublist in pcx_values for item in sublist]
- tag_values = [[key, str(value)] for key, value in tags.items()]
- tags = [item for sublist in tag_values for item in sublist]
- if sorted(pcx_tags) == sorted(tags):
- changed = False
- elif tags:
- delete_tags(pcx_id, client, module)
- create_tags(pcx_id, client, module)
- changed = True
- return changed
-
-
-def describe_peering_connections(params, client):
- result = client.describe_vpc_peering_connections(
- Filters=[
- {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['VpcId']]},
- {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]}
- ]
- )
- if result['VpcPeeringConnections'] == []:
- result = client.describe_vpc_peering_connections(
- Filters=[
- {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]},
- {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['VpcId']]}
- ]
- )
- return result
-
-
-def is_active(peering_conn):
- return peering_conn['Status']['Code'] == 'active'
-
-
-def is_pending(peering_conn):
- return peering_conn['Status']['Code'] == 'pending-acceptance'
-
-
-def create_peer_connection(client, module):
- changed = False
- params = dict()
- params['VpcId'] = module.params.get('vpc_id')
- params['PeerVpcId'] = module.params.get('peer_vpc_id')
- if module.params.get('peer_region'):
- if distutils.version.StrictVersion(botocore.__version__) < distutils.version.StrictVersion('1.8.6'):
- module.fail_json(msg="specifying peer_region parameter requires botocore >= 1.8.6")
- params['PeerRegion'] = module.params.get('peer_region')
- if module.params.get('peer_owner_id'):
- params['PeerOwnerId'] = str(module.params.get('peer_owner_id'))
- peering_conns = describe_peering_connections(params, client)
- for peering_conn in peering_conns['VpcPeeringConnections']:
- pcx_id = peering_conn['VpcPeeringConnectionId']
- if tags_changed(pcx_id, client, module):
- changed = True
- if is_active(peering_conn):
- return (changed, peering_conn['VpcPeeringConnectionId'])
- if is_pending(peering_conn):
- return (changed, peering_conn['VpcPeeringConnectionId'])
- try:
- peering_conn = client.create_vpc_peering_connection(**params)
- pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId']
- if module.params.get('tags'):
- create_tags(pcx_id, client, module)
- changed = True
- return (changed, peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=str(e))
-
-
-def remove_peer_connection(client, module):
- pcx_id = module.params.get('peering_id')
- if not pcx_id:
- params = dict()
- params['VpcId'] = module.params.get('vpc_id')
- params['PeerVpcId'] = module.params.get('peer_vpc_id')
- params['PeerRegion'] = module.params.get('peer_region')
- if module.params.get('peer_owner_id'):
- params['PeerOwnerId'] = str(module.params.get('peer_owner_id'))
- peering_conns = describe_peering_connections(params, client)
- if not peering_conns:
- module.exit_json(changed=False)
- else:
- pcx_id = peering_conns['VpcPeeringConnections'][0]['VpcPeeringConnectionId']
-
- try:
- params = dict()
- params['VpcPeeringConnectionId'] = pcx_id
- client.delete_vpc_peering_connection(**params)
- module.exit_json(changed=True)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=str(e))
-
-
-def peer_status(client, module):
- params = dict()
- params['VpcPeeringConnectionIds'] = [module.params.get('peering_id')]
- try:
- vpc_peering_connection = client.describe_vpc_peering_connections(**params)
- return vpc_peering_connection['VpcPeeringConnections'][0]['Status']['Code']
- except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e: # pylint: disable=duplicate-except
- module.fail_json(msg='Malformed connection ID: {0}'.format(e), traceback=traceback.format_exc())
- except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
- module.fail_json(msg='Error while describing peering connection by peering_id: {0}'.format(e), traceback=traceback.format_exc())
-
-
-def accept_reject(state, client, module):
- changed = False
- params = dict()
- params['VpcPeeringConnectionId'] = module.params.get('peering_id')
- if peer_status(client, module) != 'active':
- try:
- if state == 'accept':
- client.accept_vpc_peering_connection(**params)
- else:
- client.reject_vpc_peering_connection(**params)
- if module.params.get('tags'):
- create_tags(params['VpcPeeringConnectionId'], client, module)
- changed = True
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=str(e))
- if tags_changed(params['VpcPeeringConnectionId'], client, module):
- changed = True
- return changed, params['VpcPeeringConnectionId']
-
-
-def load_tags(module):
- tags = []
- if module.params.get('tags'):
- for name, value in module.params.get('tags').items():
- tags.append({'Key': name, 'Value': str(value)})
- return tags
-
-
-def create_tags(pcx_id, client, module):
- try:
- delete_tags(pcx_id, client, module)
- client.create_tags(Resources=[pcx_id], Tags=load_tags(module))
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=str(e))
-
-
-def delete_tags(pcx_id, client, module):
- try:
- client.delete_tags(Resources=[pcx_id])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=str(e))
-
-
-def find_pcx_by_id(pcx_id, client, module):
- try:
- return client.describe_vpc_peering_connections(VpcPeeringConnectionIds=[pcx_id])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=str(e))
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- vpc_id=dict(),
- peer_vpc_id=dict(),
- peer_region=dict(),
- peering_id=dict(),
- peer_owner_id=dict(),
- tags=dict(required=False, type='dict'),
- profile=dict(),
- state=dict(default='present', choices=['present', 'absent', 'accept', 'reject'])
- )
- )
- required_if = [
- ('state', 'present', ['vpc_id', 'peer_vpc_id']),
- ('state', 'accept', ['peering_id']),
- ('state', 'reject', ['peering_id'])
- ]
-
- module = AnsibleModule(argument_spec=argument_spec, required_if=required_if)
-
- if not HAS_BOTO3:
- module.fail_json(msg='json, botocore and boto3 are required.')
- state = module.params.get('state')
- peering_id = module.params.get('peering_id')
- vpc_id = module.params.get('vpc_id')
- peer_vpc_id = module.params.get('peer_vpc_id')
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- client = boto3_conn(module, conn_type='client', resource='ec2',
- region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg="Can't authorize connection - " + str(e))
-
- if state == 'present':
- (changed, results) = create_peer_connection(client, module)
- module.exit_json(changed=changed, peering_id=results)
- elif state == 'absent':
- if not peering_id and (not vpc_id or not peer_vpc_id):
- module.fail_json(msg='state is absent but one of the following is missing: peering_id or [vpc_id, peer_vpc_id]')
-
- remove_peer_connection(client, module)
- else:
- (changed, results) = accept_reject(state, client, module)
- module.exit_json(changed=changed, peering_id=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_peering_info.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_peering_info.py
deleted file mode 100644
index 13a22a3e3a..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_peering_info.py
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-module: ec2_vpc_peering_info
-short_description: Retrieves AWS VPC Peering details using AWS methods.
-description:
- - Gets various details related to AWS VPC Peers
- - This module was called C(ec2_vpc_peering_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.4"
-requirements: [ boto3 ]
-options:
- peer_connection_ids:
- description:
- - List of specific VPC peer IDs to get details for.
- type: list
- elements: str
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcPeeringConnections.html)
- for possible filters.
- type: dict
-author: Karen Cheng (@Etherdaemon)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Simple example of listing all VPC Peers
-- name: List all vpc peers
- ec2_vpc_peering_info:
- region: ap-southeast-2
- register: all_vpc_peers
-
-- name: Debugging the result
- debug:
- msg: "{{ all_vpc_peers.result }}"
-
-- name: Get details on specific VPC peer
- ec2_vpc_peering_info:
- peer_connection_ids:
- - pcx-12345678
- - pcx-87654321
- region: ap-southeast-2
- register: all_vpc_peers
-
-- name: Get all vpc peers with specific filters
- ec2_vpc_peering_info:
- region: ap-southeast-2
- filters:
- status-code: ['pending-acceptance']
- register: pending_vpc_peers
-'''
-
-RETURN = '''
-result:
- description: The result of the describe.
- returned: success
- type: list
-'''
-
-import json
-
-try:
- import botocore
-except ImportError:
- pass # will be picked up by imported HAS_BOTO3
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (boto3_tag_list_to_ansible_dict,
- ec2_argument_spec, boto3_conn, get_aws_connection_info,
- ansible_dict_to_boto3_filter_list, HAS_BOTO3, camel_dict_to_snake_dict)
-
-
-def date_handler(obj):
- return obj.isoformat() if hasattr(obj, 'isoformat') else obj
-
-
-def get_vpc_peers(client, module):
- params = dict()
- params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
- if module.params.get('peer_connection_ids'):
- params['VpcPeeringConnectionIds'] = module.params.get('peer_connection_ids')
- try:
- result = json.loads(json.dumps(client.describe_vpc_peering_connections(**params), default=date_handler))
- except Exception as e:
- module.fail_json(msg=str(e.message))
-
- return result['VpcPeeringConnections']
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- filters=dict(default=dict(), type='dict'),
- peer_connection_ids=dict(default=None, type='list'),
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
- if module._name == 'ec2_vpc_peering_facts':
- module.deprecate("The 'ec2_vpc_peering_facts' module has been renamed to 'ec2_vpc_peering_info'", version='2.13')
-
- # Validate Requirements
- if not HAS_BOTO3:
- module.fail_json(msg='botocore and boto3 are required.')
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- except NameError as e:
- # Getting around the get_aws_connection_info boto reliance for region
- if "global name 'boto' is not defined" in e.message:
- module.params['region'] = botocore.session.get_session().get_config_variable('region')
- if not module.params['region']:
- module.fail_json(msg="Error - no region provided")
- else:
- module.fail_json(msg="Can't retrieve connection information - " + str(e))
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg=str(e))
-
- # Turn the boto3 result in to ansible friendly_snaked_names
- results = [camel_dict_to_snake_dict(peer) for peer in get_vpc_peers(ec2, module)]
-
- # Turn the boto3 result in to ansible friendly tag dictionary
- for peer in results:
- peer['tags'] = boto3_tag_list_to_ansible_dict(peer.get('tags', []))
-
- module.exit_json(result=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py
deleted file mode 100644
index 96c9b2d04d..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py
+++ /dev/null
@@ -1,750 +0,0 @@
-#!/usr/bin/python
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_route_table
-short_description: Manage route tables for AWS virtual private clouds
-description:
- - Manage route tables for AWS virtual private clouds
-version_added: "2.0"
-author:
-- Robert Estelle (@erydo)
-- Rob White (@wimnat)
-- Will Thames (@willthames)
-options:
- lookup:
- description: Look up route table by either tags or by route table ID. Non-unique tag lookup will fail.
- If no tags are specified then no lookup for an existing route table is performed and a new
- route table will be created. To change tags of a route table you must look up by id.
- default: tag
- choices: [ 'tag', 'id' ]
- type: str
- propagating_vgw_ids:
- description: Enable route propagation from virtual gateways specified by ID.
- type: list
- elements: str
- purge_routes:
- version_added: "2.3"
- description: Purge existing routes that are not found in routes.
- type: bool
- default: 'yes'
- purge_subnets:
- version_added: "2.3"
- description: Purge existing subnets that are not found in subnets. Ignored unless the subnets option is supplied.
- default: 'true'
- type: bool
- purge_tags:
- version_added: "2.5"
- description: Purge existing tags that are not found in route table.
- type: bool
- default: 'no'
- route_table_id:
- description:
- - The ID of the route table to update or delete.
- - Required when I(lookup=id).
- type: str
- routes:
- description: List of routes in the route table.
- Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id',
- 'instance_id', 'network_interface_id', or 'vpc_peering_connection_id'.
- If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'.
- Routes are required for present states.
- type: list
- elements: dict
- state:
- description: Create or destroy the VPC route table.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- subnets:
- description: An array of subnets to add to this route table. Subnets may be specified
- by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'.
- type: list
- elements: str
- tags:
- description: >
- A dictionary of resource tags of the form: C({ tag1: value1, tag2: value2 }). Tags are
- used to uniquely identify route tables within a VPC when the route_table_id is not supplied.
- aliases: [ "resource_tags" ]
- type: dict
- vpc_id:
- description:
- - VPC ID of the VPC in which to create the route table.
- - Required when I(state=present) or I(lookup=tag).
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Basic creation example:
-- name: Set up public subnet route table
- ec2_vpc_route_table:
- vpc_id: vpc-1245678
- region: us-west-1
- tags:
- Name: Public
- subnets:
- - "{{ jumpbox_subnet.subnet.id }}"
- - "{{ frontend_subnet.subnet.id }}"
- - "{{ vpn_subnet.subnet_id }}"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- register: public_route_table
-
-- name: Set up NAT-protected route table
- ec2_vpc_route_table:
- vpc_id: vpc-1245678
- region: us-west-1
- tags:
- Name: Internal
- subnets:
- - "{{ application_subnet.subnet.id }}"
- - 'Database Subnet'
- - '10.0.0.0/8'
- routes:
- - dest: 0.0.0.0/0
- instance_id: "{{ nat.instance_id }}"
- register: nat_route_table
-
-- name: delete route table
- ec2_vpc_route_table:
- vpc_id: vpc-1245678
- region: us-west-1
- route_table_id: "{{ route_table.id }}"
- lookup: id
- state: absent
-'''
-
-RETURN = '''
-route_table:
- description: Route Table result
- returned: always
- type: complex
- contains:
- associations:
- description: List of subnets associated with the route table
- returned: always
- type: complex
- contains:
- main:
- description: Whether this is the main route table
- returned: always
- type: bool
- sample: false
- route_table_association_id:
- description: ID of association between route table and subnet
- returned: always
- type: str
- sample: rtbassoc-ab47cfc3
- route_table_id:
- description: ID of the route table
- returned: always
- type: str
- sample: rtb-bf779ed7
- subnet_id:
- description: ID of the subnet
- returned: always
- type: str
- sample: subnet-82055af9
- id:
- description: ID of the route table (same as route_table_id for backwards compatibility)
- returned: always
- type: str
- sample: rtb-bf779ed7
- propagating_vgws:
- description: List of Virtual Private Gateways propagating routes
- returned: always
- type: list
- sample: []
- route_table_id:
- description: ID of the route table
- returned: always
- type: str
- sample: rtb-bf779ed7
- routes:
- description: List of routes in the route table
- returned: always
- type: complex
- contains:
- destination_cidr_block:
- description: CIDR block of destination
- returned: always
- type: str
- sample: 10.228.228.0/22
- gateway_id:
- description: ID of the gateway
- returned: when gateway is local or internet gateway
- type: str
- sample: local
- instance_id:
- description: ID of a NAT instance
- returned: when the route is via an EC2 instance
- type: str
- sample: i-abcd123456789
- instance_owner_id:
- description: AWS account owning the NAT instance
- returned: when the route is via an EC2 instance
- type: str
- sample: 123456789012
- nat_gateway_id:
- description: ID of the NAT gateway
- returned: when the route is via a NAT gateway
- type: str
- sample: local
- origin:
- description: mechanism through which the route is in the table
- returned: always
- type: str
- sample: CreateRouteTable
- state:
- description: state of the route
- returned: always
- type: str
- sample: active
- tags:
- description: Tags applied to the route table
- returned: always
- type: dict
- sample:
- Name: Public route table
- Public: 'true'
- vpc_id:
- description: ID for the VPC in which the route lives
- returned: always
- type: str
- sample: vpc-6e2d2407
-'''
-
-import re
-from time import sleep
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.waiters import get_waiter
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, snake_dict_to_camel_dict
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
-from ansible.module_utils.ec2 import compare_aws_tags, AWSRetry
-
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$')
-SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$')
-ROUTE_TABLE_RE = re.compile(r'^rtb-[A-z0-9]+$')
-
-
-@AWSRetry.exponential_backoff()
-def describe_subnets_with_backoff(connection, **params):
- return connection.describe_subnets(**params)['Subnets']
-
-
-def find_subnets(connection, module, vpc_id, identified_subnets):
- """
- Finds a list of subnets, each identified either by a raw ID, a unique
- 'Name' tag, or a CIDR such as 10.0.0.0/8.
-
- Note that this function is duplicated in other ec2 modules, and should
- potentially be moved into a shared module_utils
- """
- subnet_ids = []
- subnet_names = []
- subnet_cidrs = []
- for subnet in (identified_subnets or []):
- if re.match(SUBNET_RE, subnet):
- subnet_ids.append(subnet)
- elif re.match(CIDR_RE, subnet):
- subnet_cidrs.append(subnet)
- else:
- subnet_names.append(subnet)
-
- subnets_by_id = []
- if subnet_ids:
- filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id})
- try:
- subnets_by_id = describe_subnets_with_backoff(connection, SubnetIds=subnet_ids, Filters=filters)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't find subnet with id %s" % subnet_ids)
-
- subnets_by_cidr = []
- if subnet_cidrs:
- filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr': subnet_cidrs})
- try:
- subnets_by_cidr = describe_subnets_with_backoff(connection, Filters=filters)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't find subnet with cidr %s" % subnet_cidrs)
-
- subnets_by_name = []
- if subnet_names:
- filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'tag:Name': subnet_names})
- try:
- subnets_by_name = describe_subnets_with_backoff(connection, Filters=filters)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't find subnet with names %s" % subnet_names)
-
- for name in subnet_names:
- matching_count = len([1 for s in subnets_by_name for t in s.get('Tags', []) if t['Key'] == 'Name' and t['Value'] == name])
- if matching_count == 0:
- module.fail_json(msg='Subnet named "{0}" does not exist'.format(name))
- elif matching_count > 1:
- module.fail_json(msg='Multiple subnets named "{0}"'.format(name))
-
- return subnets_by_id + subnets_by_cidr + subnets_by_name
-
-
-def find_igw(connection, module, vpc_id):
- """
- Finds the Internet gateway for the given VPC ID.
- """
- filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id})
- try:
- igw = connection.describe_internet_gateways(Filters=filters)['InternetGateways']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='No IGW found for VPC {0}'.format(vpc_id))
- if len(igw) == 1:
- return igw[0]['InternetGatewayId']
- elif len(igw) == 0:
- module.fail_json(msg='No IGWs found for VPC {0}'.format(vpc_id))
- else:
- module.fail_json(msg='Multiple IGWs found for VPC {0}'.format(vpc_id))
-
-
-@AWSRetry.exponential_backoff()
-def describe_tags_with_backoff(connection, resource_id):
- filters = ansible_dict_to_boto3_filter_list({'resource-id': resource_id})
- paginator = connection.get_paginator('describe_tags')
- tags = paginator.paginate(Filters=filters).build_full_result()['Tags']
- return boto3_tag_list_to_ansible_dict(tags)
-
-
-def tags_match(match_tags, candidate_tags):
- return all((k in candidate_tags and candidate_tags[k] == v
- for k, v in match_tags.items()))
-
-
-def ensure_tags(connection=None, module=None, resource_id=None, tags=None, purge_tags=None, check_mode=None):
- try:
- cur_tags = describe_tags_with_backoff(connection, resource_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to list tags for VPC')
-
- to_add, to_delete = compare_aws_tags(cur_tags, tags, purge_tags)
-
- if not to_add and not to_delete:
- return {'changed': False, 'tags': cur_tags}
- if check_mode:
- if not purge_tags:
- tags = cur_tags.update(tags)
- return {'changed': True, 'tags': tags}
-
- if to_delete:
- try:
- connection.delete_tags(Resources=[resource_id], Tags=[{'Key': k} for k in to_delete])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete tags")
- if to_add:
- try:
- connection.create_tags(Resources=[resource_id], Tags=ansible_dict_to_boto3_tag_list(to_add))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create tags")
-
- try:
- latest_tags = describe_tags_with_backoff(connection, resource_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to list tags for VPC')
- return {'changed': True, 'tags': latest_tags}
-
-
-@AWSRetry.exponential_backoff()
-def describe_route_tables_with_backoff(connection, **params):
- try:
- return connection.describe_route_tables(**params)['RouteTables']
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'InvalidRouteTableID.NotFound':
- return None
- else:
- raise
-
-
-def get_route_table_by_id(connection, module, route_table_id):
-
- route_table = None
- try:
- route_tables = describe_route_tables_with_backoff(connection, RouteTableIds=[route_table_id])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get route table")
- if route_tables:
- route_table = route_tables[0]
-
- return route_table
-
-
-def get_route_table_by_tags(connection, module, vpc_id, tags):
- count = 0
- route_table = None
- filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id})
- try:
- route_tables = describe_route_tables_with_backoff(connection, Filters=filters)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get route table")
- for table in route_tables:
- this_tags = describe_tags_with_backoff(connection, table['RouteTableId'])
- if tags_match(tags, this_tags):
- route_table = table
- count += 1
-
- if count > 1:
- module.fail_json(msg="Tags provided do not identify a unique route table")
- else:
- return route_table
-
-
-def route_spec_matches_route(route_spec, route):
- if route_spec.get('GatewayId') and 'nat-' in route_spec['GatewayId']:
- route_spec['NatGatewayId'] = route_spec.pop('GatewayId')
- if route_spec.get('GatewayId') and 'vpce-' in route_spec['GatewayId']:
- if route_spec.get('DestinationCidrBlock', '').startswith('pl-'):
- route_spec['DestinationPrefixListId'] = route_spec.pop('DestinationCidrBlock')
-
- return set(route_spec.items()).issubset(route.items())
-
-
-def route_spec_matches_route_cidr(route_spec, route):
- return route_spec['DestinationCidrBlock'] == route.get('DestinationCidrBlock')
-
-
-def rename_key(d, old_key, new_key):
- d[new_key] = d.pop(old_key)
-
-
-def index_of_matching_route(route_spec, routes_to_match):
- for i, route in enumerate(routes_to_match):
- if route_spec_matches_route(route_spec, route):
- return "exact", i
- elif 'Origin' in route_spec and route_spec['Origin'] != 'EnableVgwRoutePropagation':
- if route_spec_matches_route_cidr(route_spec, route):
- return "replace", i
-
-
-def ensure_routes(connection=None, module=None, route_table=None, route_specs=None,
- propagating_vgw_ids=None, check_mode=None, purge_routes=None):
- routes_to_match = [route for route in route_table['Routes']]
- route_specs_to_create = []
- route_specs_to_recreate = []
- for route_spec in route_specs:
- match = index_of_matching_route(route_spec, routes_to_match)
- if match is None:
- if route_spec.get('DestinationCidrBlock'):
- route_specs_to_create.append(route_spec)
- else:
- module.warn("Skipping creating {0} because it has no destination cidr block. "
- "To add VPC endpoints to route tables use the ec2_vpc_endpoint module.".format(route_spec))
- else:
- if match[0] == "replace":
- if route_spec.get('DestinationCidrBlock'):
- route_specs_to_recreate.append(route_spec)
- else:
- module.warn("Skipping recreating route {0} because it has no destination cidr block.".format(route_spec))
- del routes_to_match[match[1]]
-
- routes_to_delete = []
- if purge_routes:
- for r in routes_to_match:
- if not r.get('DestinationCidrBlock'):
- module.warn("Skipping purging route {0} because it has no destination cidr block. "
- "To remove VPC endpoints from route tables use the ec2_vpc_endpoint module.".format(r))
- continue
- if r['Origin'] == 'CreateRoute':
- routes_to_delete.append(r)
-
- changed = bool(routes_to_delete or route_specs_to_create or route_specs_to_recreate)
- if changed and not check_mode:
- for route in routes_to_delete:
- try:
- connection.delete_route(RouteTableId=route_table['RouteTableId'], DestinationCidrBlock=route['DestinationCidrBlock'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete route")
-
- for route_spec in route_specs_to_recreate:
- try:
- connection.replace_route(RouteTableId=route_table['RouteTableId'],
- **route_spec)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't recreate route")
-
- for route_spec in route_specs_to_create:
- try:
- connection.create_route(RouteTableId=route_table['RouteTableId'],
- **route_spec)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create route")
-
- return {'changed': bool(changed)}
-
-
-def ensure_subnet_association(connection=None, module=None, vpc_id=None, route_table_id=None, subnet_id=None,
- check_mode=None):
- filters = ansible_dict_to_boto3_filter_list({'association.subnet-id': subnet_id, 'vpc-id': vpc_id})
- try:
- route_tables = describe_route_tables_with_backoff(connection, Filters=filters)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get route tables")
- for route_table in route_tables:
- if route_table['RouteTableId'] is None:
- continue
- for a in route_table['Associations']:
- if a['Main']:
- continue
- if a['SubnetId'] == subnet_id:
- if route_table['RouteTableId'] == route_table_id:
- return {'changed': False, 'association_id': a['RouteTableAssociationId']}
- else:
- if check_mode:
- return {'changed': True}
- try:
- connection.disassociate_route_table(AssociationId=a['RouteTableAssociationId'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table")
-
- try:
- association_id = connection.associate_route_table(RouteTableId=route_table_id, SubnetId=subnet_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't associate subnet with route table")
- return {'changed': True, 'association_id': association_id}
-
-
-def ensure_subnet_associations(connection=None, module=None, route_table=None, subnets=None,
- check_mode=None, purge_subnets=None):
- current_association_ids = [a['RouteTableAssociationId'] for a in route_table['Associations'] if not a['Main']]
- new_association_ids = []
- changed = False
- for subnet in subnets:
- result = ensure_subnet_association(connection=connection, module=module, vpc_id=route_table['VpcId'],
- route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId'], check_mode=check_mode)
- changed = changed or result['changed']
- if changed and check_mode:
- return {'changed': True}
- new_association_ids.append(result['association_id'])
-
- if purge_subnets:
- to_delete = [a_id for a_id in current_association_ids
- if a_id not in new_association_ids]
-
- for a_id in to_delete:
- changed = True
- if not check_mode:
- try:
- connection.disassociate_route_table(AssociationId=a_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table")
-
- return {'changed': changed}
-
-
-def ensure_propagation(connection=None, module=None, route_table=None, propagating_vgw_ids=None,
- check_mode=None):
- changed = False
- gateways = [gateway['GatewayId'] for gateway in route_table['PropagatingVgws']]
- to_add = set(propagating_vgw_ids) - set(gateways)
- if to_add:
- changed = True
- if not check_mode:
- for vgw_id in to_add:
- try:
- connection.enable_vgw_route_propagation(RouteTableId=route_table['RouteTableId'],
- GatewayId=vgw_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't enable route propagation")
-
- return {'changed': changed}
-
-
-def ensure_route_table_absent(connection, module):
-
- lookup = module.params.get('lookup')
- route_table_id = module.params.get('route_table_id')
- tags = module.params.get('tags')
- vpc_id = module.params.get('vpc_id')
- purge_subnets = module.params.get('purge_subnets')
-
- if lookup == 'tag':
- if tags is not None:
- route_table = get_route_table_by_tags(connection, module, vpc_id, tags)
- else:
- route_table = None
- elif lookup == 'id':
- route_table = get_route_table_by_id(connection, module, route_table_id)
-
- if route_table is None:
- return {'changed': False}
-
- # disassociate subnets before deleting route table
- if not module.check_mode:
- ensure_subnet_associations(connection=connection, module=module, route_table=route_table,
- subnets=[], check_mode=False, purge_subnets=purge_subnets)
- try:
- connection.delete_route_table(RouteTableId=route_table['RouteTableId'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error deleting route table")
-
- return {'changed': True}
-
-
-def get_route_table_info(connection, module, route_table):
- result = get_route_table_by_id(connection, module, route_table['RouteTableId'])
- try:
- result['Tags'] = describe_tags_with_backoff(connection, route_table['RouteTableId'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get tags for route table")
- result = camel_dict_to_snake_dict(result, ignore_list=['Tags'])
- # backwards compatibility
- result['id'] = result['route_table_id']
- return result
-
-
-def create_route_spec(connection, module, vpc_id):
- routes = module.params.get('routes')
-
- for route_spec in routes:
- rename_key(route_spec, 'dest', 'destination_cidr_block')
-
- if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw':
- igw = find_igw(connection, module, vpc_id)
- route_spec['gateway_id'] = igw
- if route_spec.get('gateway_id') and route_spec['gateway_id'].startswith('nat-'):
- rename_key(route_spec, 'gateway_id', 'nat_gateway_id')
-
- return snake_dict_to_camel_dict(routes, capitalize_first=True)
-
-
-def ensure_route_table_present(connection, module):
-
- lookup = module.params.get('lookup')
- propagating_vgw_ids = module.params.get('propagating_vgw_ids')
- purge_routes = module.params.get('purge_routes')
- purge_subnets = module.params.get('purge_subnets')
- purge_tags = module.params.get('purge_tags')
- route_table_id = module.params.get('route_table_id')
- subnets = module.params.get('subnets')
- tags = module.params.get('tags')
- vpc_id = module.params.get('vpc_id')
- routes = create_route_spec(connection, module, vpc_id)
-
- changed = False
- tags_valid = False
-
- if lookup == 'tag':
- if tags is not None:
- try:
- route_table = get_route_table_by_tags(connection, module, vpc_id, tags)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error finding route table with lookup 'tag'")
- else:
- route_table = None
- elif lookup == 'id':
- try:
- route_table = get_route_table_by_id(connection, module, route_table_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error finding route table with lookup 'id'")
-
- # If no route table returned then create new route table
- if route_table is None:
- changed = True
- if not module.check_mode:
- try:
- route_table = connection.create_route_table(VpcId=vpc_id)['RouteTable']
- # try to wait for route table to be present before moving on
- get_waiter(
- connection, 'route_table_exists'
- ).wait(
- RouteTableIds=[route_table['RouteTableId']],
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Error creating route table")
- else:
- route_table = {"id": "rtb-xxxxxxxx", "route_table_id": "rtb-xxxxxxxx", "vpc_id": vpc_id}
- module.exit_json(changed=changed, route_table=route_table)
-
- if routes is not None:
- result = ensure_routes(connection=connection, module=module, route_table=route_table,
- route_specs=routes, propagating_vgw_ids=propagating_vgw_ids,
- check_mode=module.check_mode, purge_routes=purge_routes)
- changed = changed or result['changed']
-
- if propagating_vgw_ids is not None:
- result = ensure_propagation(connection=connection, module=module, route_table=route_table,
- propagating_vgw_ids=propagating_vgw_ids, check_mode=module.check_mode)
- changed = changed or result['changed']
-
- if not tags_valid and tags is not None:
- result = ensure_tags(connection=connection, module=module, resource_id=route_table['RouteTableId'], tags=tags,
- purge_tags=purge_tags, check_mode=module.check_mode)
- route_table['Tags'] = result['tags']
- changed = changed or result['changed']
-
- if subnets is not None:
- associated_subnets = find_subnets(connection, module, vpc_id, subnets)
-
- result = ensure_subnet_associations(connection=connection, module=module, route_table=route_table,
- subnets=associated_subnets, check_mode=module.check_mode,
- purge_subnets=purge_subnets)
- changed = changed or result['changed']
-
- if changed:
- # pause to allow route table routes/subnets/associations to be updated before exiting with final state
- sleep(5)
- module.exit_json(changed=changed, route_table=get_route_table_info(connection, module, route_table))
-
-
-def main():
- argument_spec = dict(
- lookup=dict(default='tag', choices=['tag', 'id']),
- propagating_vgw_ids=dict(type='list'),
- purge_routes=dict(default=True, type='bool'),
- purge_subnets=dict(default=True, type='bool'),
- purge_tags=dict(default=False, type='bool'),
- route_table_id=dict(),
- routes=dict(default=[], type='list'),
- state=dict(default='present', choices=['present', 'absent']),
- subnets=dict(type='list'),
- tags=dict(type='dict', aliases=['resource_tags']),
- vpc_id=dict()
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[['lookup', 'id', ['route_table_id']],
- ['lookup', 'tag', ['vpc_id']],
- ['state', 'present', ['vpc_id']]],
- supports_check_mode=True)
-
- connection = module.client('ec2')
-
- state = module.params.get('state')
-
- if state == 'present':
- result = ensure_route_table_present(connection, module)
- elif state == 'absent':
- result = ensure_route_table_absent(connection, module)
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_info.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_info.py
deleted file mode 100644
index c3b4004608..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_info.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_route_table_info
-short_description: Gather information about ec2 VPC route tables in AWS
-description:
- - Gather information about ec2 VPC route tables in AWS
- - This module was called C(ec2_vpc_route_table_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.0"
-author: "Rob White (@wimnat)"
-options:
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
- type: dict
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all VPC route tables
-- ec2_vpc_route_table_info:
-
-# Gather information about a particular VPC route table using route table ID
-- ec2_vpc_route_table_info:
- filters:
- route-table-id: rtb-00112233
-
-# Gather information about any VPC route table with a tag key Name and value Example
-- ec2_vpc_route_table_info:
- filters:
- "tag:Name": Example
-
-# Gather information about any VPC route table within VPC with ID vpc-abcdef00
-- ec2_vpc_route_table_info:
- filters:
- vpc-id: vpc-abcdef00
-
-'''
-
-try:
- import boto.vpc
- from boto.exception import BotoServerError
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
-
-
-def get_route_table_info(route_table):
-
- # Add any routes to array
- routes = []
- associations = []
- for route in route_table.routes:
- routes.append(route.__dict__)
- for association in route_table.associations:
- associations.append(association.__dict__)
-
- route_table_info = {'id': route_table.id,
- 'routes': routes,
- 'associations': associations,
- 'tags': route_table.tags,
- 'vpc_id': route_table.vpc_id
- }
-
- return route_table_info
-
-
-def list_ec2_vpc_route_tables(connection, module):
-
- filters = module.params.get("filters")
- route_table_dict_array = []
-
- try:
- all_route_tables = connection.get_all_route_tables(filters=filters)
- except BotoServerError as e:
- module.fail_json(msg=e.message)
-
- for route_table in all_route_tables:
- route_table_dict_array.append(get_route_table_info(route_table))
-
- module.exit_json(route_tables=route_table_dict_array)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- filters=dict(default=None, type='dict')
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
- if module._name == 'ec2_vpc_route_table_facts':
- module.deprecate("The 'ec2_vpc_route_table_facts' module has been renamed to 'ec2_vpc_route_table_info'", version='2.13')
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
-
- if region:
- try:
- connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- module.fail_json(msg=str(e))
- else:
- module.fail_json(msg="region must be specified")
-
- list_ec2_vpc_route_tables(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py
deleted file mode 100644
index 6bcc007c7f..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py
+++ /dev/null
@@ -1,581 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
-module: ec2_vpc_vgw
-short_description: Create and delete AWS VPN Virtual Gateways.
-description:
- - Creates AWS VPN Virtual Gateways
- - Deletes AWS VPN Virtual Gateways
- - Attaches Virtual Gateways to VPCs
- - Detaches Virtual Gateways from VPCs
-version_added: "2.2"
-requirements: [ boto3 ]
-options:
- state:
- description:
- - present to ensure resource is created.
- - absent to remove resource
- default: present
- choices: [ "present", "absent"]
- type: str
- name:
- description:
- - name of the vgw to be created or deleted
- type: str
- type:
- description:
- - type of the virtual gateway to be created
- choices: [ "ipsec.1" ]
- default: "ipsec.1"
- type: str
- vpn_gateway_id:
- description:
- - vpn gateway id of an existing virtual gateway
- type: str
- vpc_id:
- description:
- - the vpc-id of a vpc to attach or detach
- type: str
- asn:
- description:
- - the BGP ASN of the amazon side
- version_added: "2.6"
- type: int
- wait_timeout:
- description:
- - number of seconds to wait for status during vpc attach and detach
- default: 320
- type: int
- tags:
- description:
- - dictionary of resource tags
- aliases: [ "resource_tags" ]
- type: dict
-author: Nick Aslanidis (@naslanidis)
-extends_documentation_fragment:
- - ec2
- - aws
-'''
-
-EXAMPLES = '''
-- name: Create a new vgw attached to a specific VPC
- ec2_vpc_vgw:
- state: present
- region: ap-southeast-2
- profile: personal
- vpc_id: vpc-12345678
- name: personal-testing
- type: ipsec.1
- register: created_vgw
-
-- name: Create a new unattached vgw
- ec2_vpc_vgw:
- state: present
- region: ap-southeast-2
- profile: personal
- name: personal-testing
- type: ipsec.1
- tags:
- environment: production
- owner: ABC
- register: created_vgw
-
-- name: Remove a new vgw using the name
- ec2_vpc_vgw:
- state: absent
- region: ap-southeast-2
- profile: personal
- name: personal-testing
- type: ipsec.1
- register: deleted_vgw
-
-- name: Remove a new vgw using the vpn_gateway_id
- ec2_vpc_vgw:
- state: absent
- region: ap-southeast-2
- profile: personal
- vpn_gateway_id: vgw-3a9aa123
- register: deleted_vgw
-'''
-
-RETURN = '''
-result:
- description: The result of the create, or delete action.
- returned: success
- type: dict
-'''
-
-import time
-import traceback
-
-try:
- import botocore
- import boto3
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.aws.core import is_boto3_error_code
-from ansible.module_utils.aws.waiters import get_waiter
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import HAS_BOTO3, boto3_conn, ec2_argument_spec, get_aws_connection_info, AWSRetry
-from ansible.module_utils._text import to_native
-
-
-def get_vgw_info(vgws):
- if not isinstance(vgws, list):
- return
-
- for vgw in vgws:
- vgw_info = {
- 'id': vgw['VpnGatewayId'],
- 'type': vgw['Type'],
- 'state': vgw['State'],
- 'vpc_id': None,
- 'tags': dict()
- }
-
- for tag in vgw['Tags']:
- vgw_info['tags'][tag['Key']] = tag['Value']
-
- if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached':
- vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId']
-
- return vgw_info
-
-
-def wait_for_status(client, module, vpn_gateway_id, status):
- polling_increment_secs = 15
- max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
- status_achieved = False
-
- for x in range(0, max_retries):
- try:
- response = find_vgw(client, module, vpn_gateway_id)
- if response[0]['VpcAttachments'][0]['State'] == status:
- status_achieved = True
- break
- else:
- time.sleep(polling_increment_secs)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- result = response
- return status_achieved, result
-
-
-def attach_vgw(client, module, vpn_gateway_id):
- params = dict()
- params['VpcId'] = module.params.get('vpc_id')
-
- try:
- # Immediately after a detachment, the EC2 API sometimes will report the VpnGateways[0].State
- # as available several seconds before actually permitting a new attachment.
- # So we catch and retry that error. See https://github.com/ansible/ansible/issues/53185
- response = AWSRetry.jittered_backoff(retries=5,
- catch_extra_error_codes=['InvalidParameterValue']
- )(client.attach_vpn_gateway)(VpnGatewayId=vpn_gateway_id,
- VpcId=params['VpcId'])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached')
- if not status_achieved:
- module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console')
-
- result = response
- return result
-
-
-def detach_vgw(client, module, vpn_gateway_id, vpc_id=None):
- params = dict()
- params['VpcId'] = module.params.get('vpc_id')
-
- if vpc_id:
- try:
- response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
- else:
- try:
- response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached')
- if not status_achieved:
- module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console')
-
- result = response
- return result
-
-
-def create_vgw(client, module):
- params = dict()
- params['Type'] = module.params.get('type')
- if module.params.get('asn'):
- params['AmazonSideAsn'] = module.params.get('asn')
-
- try:
- response = client.create_vpn_gateway(**params)
- get_waiter(
- client, 'vpn_gateway_exists'
- ).wait(
- VpnGatewayIds=[response['VpnGateway']['VpnGatewayId']]
- )
- except botocore.exceptions.WaiterError as e:
- module.fail_json(msg="Failed to wait for Vpn Gateway {0} to be available".format(response['VpnGateway']['VpnGatewayId']),
- exception=traceback.format_exc())
- except is_boto3_error_code('VpnGatewayLimitExceeded'):
- module.fail_json(msg="Too many VPN gateways exist in this account.", exception=traceback.format_exc())
- except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- result = response
- return result
-
-
-def delete_vgw(client, module, vpn_gateway_id):
-
- try:
- response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- # return the deleted VpnGatewayId as this is not included in the above response
- result = vpn_gateway_id
- return result
-
-
-def create_tags(client, module, vpn_gateway_id):
- params = dict()
-
- try:
- response = client.create_tags(Resources=[vpn_gateway_id], Tags=load_tags(module))
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- result = response
- return result
-
-
-def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None):
- params = dict()
-
- if tags_to_delete:
- try:
- response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
- else:
- try:
- response = client.delete_tags(Resources=[vpn_gateway_id])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- result = response
- return result
-
-
-def load_tags(module):
- tags = []
-
- if module.params.get('tags'):
- for name, value in module.params.get('tags').items():
- tags.append({'Key': name, 'Value': str(value)})
- tags.append({'Key': "Name", 'Value': module.params.get('name')})
- else:
- tags.append({'Key': "Name", 'Value': module.params.get('name')})
- return tags
-
-
-def find_tags(client, module, resource_id=None):
-
- if resource_id:
- try:
- response = client.describe_tags(Filters=[
- {'Name': 'resource-id', 'Values': [resource_id]}
- ])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- result = response
- return result
-
-
-def check_tags(client, module, existing_vgw, vpn_gateway_id):
- params = dict()
- params['Tags'] = module.params.get('tags')
- vgw = existing_vgw
- changed = False
- tags_list = {}
-
- # format tags for comparison
- for tags in existing_vgw[0]['Tags']:
- if tags['Key'] != 'Name':
- tags_list[tags['Key']] = tags['Value']
-
- # if existing tags don't match the tags arg, delete existing and recreate with new list
- if params['Tags'] is not None and tags_list != params['Tags']:
- delete_tags(client, module, vpn_gateway_id)
- create_tags(client, module, vpn_gateway_id)
- vgw = find_vgw(client, module)
- changed = True
-
- # if no tag args are supplied, delete any existing tags with the exception of the name tag
- if params['Tags'] is None and tags_list != {}:
- tags_to_delete = []
- for tags in existing_vgw[0]['Tags']:
- if tags['Key'] != 'Name':
- tags_to_delete.append(tags)
-
- delete_tags(client, module, vpn_gateway_id, tags_to_delete)
- vgw = find_vgw(client, module)
- changed = True
-
- return vgw, changed
-
-
-def find_vpc(client, module):
- params = dict()
- params['vpc_id'] = module.params.get('vpc_id')
-
- if params['vpc_id']:
- try:
- response = client.describe_vpcs(VpcIds=[params['vpc_id']])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- result = response
- return result
-
-
-def find_vgw(client, module, vpn_gateway_id=None):
- params = dict()
- if vpn_gateway_id:
- params['VpnGatewayIds'] = vpn_gateway_id
- else:
- params['Filters'] = [
- {'Name': 'type', 'Values': [module.params.get('type')]},
- {'Name': 'tag:Name', 'Values': [module.params.get('name')]},
- ]
- if module.params.get('state') == 'present':
- params['Filters'].append({'Name': 'state', 'Values': ['pending', 'available']})
- try:
- response = client.describe_vpn_gateways(**params)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- return sorted(response['VpnGateways'], key=lambda k: k['VpnGatewayId'])
-
-
-def ensure_vgw_present(client, module):
-
- # If an existing vgw name and type matches our args, then a match is considered to have been
- # found and we will not create another vgw.
-
- changed = False
- params = dict()
- result = dict()
- params['Name'] = module.params.get('name')
- params['VpcId'] = module.params.get('vpc_id')
- params['Type'] = module.params.get('type')
- params['Tags'] = module.params.get('tags')
- params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
-
- # check that the vpc_id exists. If not, an exception is thrown
- if params['VpcId']:
- vpc = find_vpc(client, module)
-
- # check if a gateway matching our module args already exists
- existing_vgw = find_vgw(client, module)
-
- if existing_vgw != []:
- vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
- vgw, changed = check_tags(client, module, existing_vgw, vpn_gateway_id)
-
- # if a vpc_id was provided, check if it exists and if it's attached
- if params['VpcId']:
-
- current_vpc_attachments = existing_vgw[0]['VpcAttachments']
-
- if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached':
- if current_vpc_attachments[0]['VpcId'] != params['VpcId'] or current_vpc_attachments[0]['State'] != 'attached':
- # detach the existing vpc from the virtual gateway
- vpc_to_detach = current_vpc_attachments[0]['VpcId']
- detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
- get_waiter(client, 'vpn_gateway_detached').wait(VpnGatewayIds=[vpn_gateway_id])
- attached_vgw = attach_vgw(client, module, vpn_gateway_id)
- changed = True
- else:
- # attach the vgw to the supplied vpc
- attached_vgw = attach_vgw(client, module, vpn_gateway_id)
- changed = True
-
- # if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it.
- else:
- existing_vgw = find_vgw(client, module, [vpn_gateway_id])
-
- if existing_vgw[0]['VpcAttachments'] != []:
- if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
- # detach the vpc from the vgw
- vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
- detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
- changed = True
-
- else:
- # create a new vgw
- new_vgw = create_vgw(client, module)
- changed = True
- vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId']
-
- # tag the new virtual gateway
- create_tags(client, module, vpn_gateway_id)
-
- # if a vpc-id was supplied, attempt to attach it to the vgw
- if params['VpcId']:
- attached_vgw = attach_vgw(client, module, vpn_gateway_id)
- changed = True
-
- # return current state of the vgw
- vgw = find_vgw(client, module, [vpn_gateway_id])
- result = get_vgw_info(vgw)
- return changed, result
-
-
-def ensure_vgw_absent(client, module):
-
- # If an existing vgw name and type matches our args, then a match is considered to have been
- # found and we will take steps to delete it.
-
- changed = False
- params = dict()
- result = dict()
- params['Name'] = module.params.get('name')
- params['VpcId'] = module.params.get('vpc_id')
- params['Type'] = module.params.get('type')
- params['Tags'] = module.params.get('tags')
- params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
-
- # check if a gateway matching our module args already exists
- if params['VpnGatewayIds']:
- existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']])
- if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted':
- existing_vgw = existing_vgw_with_id
- if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
- if params['VpcId']:
- if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
- module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
-
- else:
- # detach the vpc from the vgw
- detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId'])
- deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
- changed = True
-
- else:
- # attempt to detach any attached vpcs
- vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
- detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach)
- deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
- changed = True
-
- else:
- # no vpc's are attached so attempt to delete the vgw
- deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
- changed = True
-
- else:
- changed = False
- deleted_vgw = "Nothing to do"
-
- else:
- # Check that a name and type argument has been supplied if no vgw-id
- if not module.params.get('name') or not module.params.get('type'):
- module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is supplied')
-
- existing_vgw = find_vgw(client, module)
- if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
- vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
- if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
- if params['VpcId']:
- if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
- module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
-
- else:
- # detach the vpc from the vgw
- detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
-
- # now that the vpc has been detached, delete the vgw
- deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
- changed = True
-
- else:
- # attempt to detach any attached vpcs
- vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
- detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
- changed = True
-
- # now that the vpc has been detached, delete the vgw
- deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
-
- else:
- # no vpc's are attached so attempt to delete the vgw
- deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
- changed = True
-
- else:
- changed = False
- deleted_vgw = None
-
- result = deleted_vgw
- return changed, result
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(default='present', choices=['present', 'absent']),
- name=dict(),
- vpn_gateway_id=dict(),
- vpc_id=dict(),
- asn=dict(type='int'),
- wait_timeout=dict(type='int', default=320),
- type=dict(default='ipsec.1', choices=['ipsec.1']),
- tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
- )
- )
- module = AnsibleModule(argument_spec=argument_spec,
- required_if=[['state', 'present', ['name']]])
-
- if not HAS_BOTO3:
- module.fail_json(msg='json and boto3 is required.')
-
- state = module.params.get('state').lower()
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg="Can't authorize connection - %s" % to_native(e), exception=traceback.format_exc())
-
- if state == 'present':
- (changed, results) = ensure_vgw_present(client, module)
- else:
- (changed, results) = ensure_vgw_absent(client, module)
- module.exit_json(changed=changed, vgw=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_info.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_info.py
deleted file mode 100644
index 77d1eaea6a..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_info.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_vgw_info
-short_description: Gather information about virtual gateways in AWS
-description:
- - Gather information about virtual gateways in AWS.
- - This module was called C(ec2_vpc_vgw_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.3"
-requirements: [ boto3 ]
-options:
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnGateways.html) for possible filters.
- type: dict
- vpn_gateway_ids:
- description:
- - Get details of a specific Virtual Gateway ID. This value should be provided as a list.
- type: list
- elements: str
-author: "Nick Aslanidis (@naslanidis)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# # Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Gather information about all virtual gateways for an account or profile
- ec2_vpc_vgw_info:
- region: ap-southeast-2
- profile: production
- register: vgw_info
-
-- name: Gather information about a filtered list of Virtual Gateways
- ec2_vpc_vgw_info:
- region: ap-southeast-2
- profile: production
- filters:
- "tag:Name": "main-virt-gateway"
- register: vgw_info
-
-- name: Gather information about a specific virtual gateway by VpnGatewayIds
- ec2_vpc_vgw_info:
- region: ap-southeast-2
- profile: production
- vpn_gateway_ids: vgw-c432f6a7
- register: vgw_info
-'''
-
-RETURN = '''
-virtual_gateways:
- description: The virtual gateways for the account.
- returned: always
- type: list
- sample: [
- {
- "state": "available",
- "tags": [
- {
- "key": "Name",
- "value": "TEST-VGW"
- }
- ],
- "type": "ipsec.1",
- "vpc_attachments": [
- {
- "state": "attached",
- "vpc_id": "vpc-22a93c74"
- }
- ],
- "vpn_gateway_id": "vgw-23e3d64e"
- }
- ]
-
-changed:
- description: True if listing the virtual gateways succeeds.
- returned: always
- type: bool
- sample: "false"
-'''
-import traceback
-
-try:
- import botocore
-except ImportError:
- pass # will be captured by imported HAS_BOTO3
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
- camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, HAS_BOTO3)
-
-
-def get_virtual_gateway_info(virtual_gateway):
- virtual_gateway_info = {'VpnGatewayId': virtual_gateway['VpnGatewayId'],
- 'State': virtual_gateway['State'],
- 'Type': virtual_gateway['Type'],
- 'VpcAttachments': virtual_gateway['VpcAttachments'],
- 'Tags': virtual_gateway.get('Tags', [])}
- return virtual_gateway_info
-
-
-def list_virtual_gateways(client, module):
- params = dict()
-
- params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
- params['DryRun'] = module.check_mode
-
- if module.params.get("vpn_gateway_ids"):
- params['VpnGatewayIds'] = module.params.get("vpn_gateway_ids")
-
- try:
- all_virtual_gateways = client.describe_vpn_gateways(**params)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg=str(e), exception=traceback.format_exc())
-
- return [camel_dict_to_snake_dict(get_virtual_gateway_info(vgw))
- for vgw in all_virtual_gateways['VpnGateways']]
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- filters=dict(type='dict', default=dict()),
- vpn_gateway_ids=dict(type='list', default=None)
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._name == 'ec2_vpc_vgw_facts':
- module.deprecate("The 'ec2_vpc_vgw_facts' module has been renamed to 'ec2_vpc_vgw_info'", version='2.13')
-
- # Validate Requirements
- if not HAS_BOTO3:
- module.fail_json(msg='json and boto3 is required.')
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg="Can't authorize connection - " + str(e))
-
- # call your function here
- results = list_virtual_gateways(connection, module)
-
- module.exit_json(virtual_gateways=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_vpn.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_vpn.py
deleted file mode 100644
index 29d65326ff..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_vpn.py
+++ /dev/null
@@ -1,783 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: ec2_vpc_vpn
-short_description: Create, modify, and delete EC2 VPN connections.
-description:
- - This module creates, modifies, and deletes VPN connections. Idempotence is achieved by using the filters
- option or specifying the VPN connection identifier.
-version_added: "2.4"
-extends_documentation_fragment:
- - ec2
- - aws
-requirements: ['boto3', 'botocore']
-author: "Sloane Hertel (@s-hertel)"
-options:
- state:
- description:
- - The desired state of the VPN connection.
- choices: ['present', 'absent']
- default: present
- required: no
- type: str
- customer_gateway_id:
- description:
- - The ID of the customer gateway.
- type: str
- connection_type:
- description:
- - The type of VPN connection.
- - At this time only 'ipsec.1' is supported.
- default: ipsec.1
- type: str
- vpn_gateway_id:
- description:
- - The ID of the virtual private gateway.
- type: str
- vpn_connection_id:
- description:
- - The ID of the VPN connection. Required to modify or delete a connection if the filters option does not provide a unique match.
- type: str
- tags:
- description:
- - Tags to attach to the VPN connection.
- type: dict
- purge_tags:
- description:
- - Whether or not to delete VPN connections tags that are associated with the connection but not specified in the task.
- type: bool
- default: false
- static_only:
- description:
- - Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.
- default: False
- type: bool
- required: no
- tunnel_options:
- description:
- - An optional list object containing no more than two dict members, each of which may contain 'TunnelInsideCidr'
- and/or 'PreSharedKey' keys with appropriate string values. AWS defaults will apply in absence of either of
- the aforementioned keys.
- required: no
- version_added: "2.5"
- type: list
- elements: dict
- suboptions:
- TunnelInsideCidr:
- type: str
- description: The range of inside IP addresses for the tunnel.
- PreSharedKey:
- type: str
- description: The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway.
- filters:
- description:
- - An alternative to using vpn_connection_id. If multiple matches are found, vpn_connection_id is required.
- If one of the following suboptions is a list of items to filter by, only one item needs to match to find the VPN
- that correlates. e.g. if the filter 'cidr' is ['194.168.2.0/24', '192.168.2.0/24'] and the VPN route only has the
- destination cidr block of '192.168.2.0/24' it will be found with this filter (assuming there are not multiple
- VPNs that are matched). Another example, if the filter 'vpn' is equal to ['vpn-ccf7e7ad', 'vpn-cb0ae2a2'] and one
- of of the VPNs has the state deleted (exists but is unmodifiable) and the other exists and is not deleted,
- it will be found via this filter. See examples.
- suboptions:
- cgw-config:
- description:
- - The customer gateway configuration of the VPN as a string (in the format of the return value) or a list of those strings.
- static-routes-only:
- description:
- - The type of routing; true or false.
- cidr:
- description:
- - The destination cidr of the VPN's route as a string or a list of those strings.
- bgp:
- description:
- - The BGP ASN number associated with a BGP device. Only works if the connection is attached.
- This filtering option is currently not working.
- vpn:
- description:
- - The VPN connection id as a string or a list of those strings.
- vgw:
- description:
- - The virtual private gateway as a string or a list of those strings.
- tag-keys:
- description:
- - The key of a tag as a string or a list of those strings.
- tag-values:
- description:
- - The value of a tag as a string or a list of those strings.
- tags:
- description:
- - A dict of key value pairs.
- cgw:
- description:
- - The customer gateway id as a string or a list of those strings.
- type: dict
- routes:
- description:
- - Routes to add to the connection.
- type: list
- elements: str
- purge_routes:
- description:
- - Whether or not to delete VPN connections routes that are not specified in the task.
- type: bool
- wait_timeout:
- description:
- - How long before wait gives up, in seconds.
- default: 600
- type: int
- required: false
- version_added: "2.8"
- delay:
- description:
- - The time to wait before checking operation again. in seconds.
- required: false
- type: int
- default: 15
- version_added: "2.8"
-"""
-
-EXAMPLES = """
-# Note: None of these examples set aws_access_key, aws_secret_key, or region.
-# It is assumed that their matching environment variables are set.
-
-- name: create a VPN connection
- ec2_vpc_vpn:
- state: present
- vpn_gateway_id: vgw-XXXXXXXX
- customer_gateway_id: cgw-XXXXXXXX
-
-- name: modify VPN connection tags
- ec2_vpc_vpn:
- state: present
- vpn_connection_id: vpn-XXXXXXXX
- tags:
- Name: ansible-tag-1
- Other: ansible-tag-2
-
-- name: delete a connection
- ec2_vpc_vpn:
- vpn_connection_id: vpn-XXXXXXXX
- state: absent
-
-- name: modify VPN tags (identifying VPN by filters)
- ec2_vpc_vpn:
- state: present
- filters:
- cidr: 194.168.1.0/24
- tag-keys:
- - Ansible
- - Other
- tags:
- New: Tag
- purge_tags: true
- static_only: true
-
-- name: set up VPN with tunnel options utilizing 'TunnelInsideCidr' only
- ec2_vpc_vpn:
- state: present
- filters:
- vpn: vpn-XXXXXXXX
- static_only: true
- tunnel_options:
- -
- TunnelInsideCidr: '169.254.100.1/30'
- -
- TunnelInsideCidr: '169.254.100.5/30'
-
-- name: add routes and remove any preexisting ones
- ec2_vpc_vpn:
- state: present
- filters:
- vpn: vpn-XXXXXXXX
- routes:
- - 195.168.2.0/24
- - 196.168.2.0/24
- purge_routes: true
-
-- name: remove all routes
- ec2_vpc_vpn:
- state: present
- vpn_connection_id: vpn-XXXXXXXX
- routes: []
- purge_routes: true
-
-- name: delete a VPN identified by filters
- ec2_vpc_vpn:
- state: absent
- filters:
- tags:
- Ansible: Tag
-"""
-
-RETURN = """
-changed:
- description: If the VPN connection has changed.
- type: bool
- returned: always
- sample:
- changed: true
-customer_gateway_configuration:
- description: The configuration of the VPN connection.
- returned: I(state=present)
- type: str
-customer_gateway_id:
- description: The customer gateway connected via the connection.
- type: str
- returned: I(state=present)
- sample:
- customer_gateway_id: cgw-1220c87b
-vpn_gateway_id:
- description: The virtual private gateway connected via the connection.
- type: str
- returned: I(state=present)
- sample:
- vpn_gateway_id: vgw-cb0ae2a2
-options:
- description: The VPN connection options (currently only containing static_routes_only).
- type: complex
- returned: I(state=present)
- contains:
- static_routes_only:
- description: If the VPN connection only allows static routes.
- returned: I(state=present)
- type: str
- sample:
- static_routes_only: true
-routes:
- description: The routes of the VPN connection.
- type: list
- returned: I(state=present)
- sample:
- routes: [{
- 'destination_cidr_block': '192.168.1.0/24',
- 'state': 'available'
- }]
-state:
- description: The status of the VPN connection.
- type: str
- returned: I(state=present)
- sample:
- state: available
-tags:
- description: The tags associated with the connection.
- type: dict
- returned: I(state=present)
- sample:
- tags:
- name: ansible-test
- other: tag
-type:
- description: The type of VPN connection (currently only ipsec.1 is available).
- type: str
- returned: I(state=present)
- sample:
- type: "ipsec.1"
-vgw_telemetry:
- type: list
- returned: I(state=present)
- description: The telemetry for the VPN tunnel.
- sample:
- vgw_telemetry: [{
- 'outside_ip_address': 'string',
- 'status': 'up',
- 'last_status_change': datetime(2015, 1, 1),
- 'status_message': 'string',
- 'accepted_route_count': 123
- }]
-vpn_connection_id:
- description: The identifier for the VPN connection.
- type: str
- returned: I(state=present)
- sample:
- vpn_connection_id: vpn-781e0e19
-"""
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils._text import to_text
-from ansible.module_utils.ec2 import (
- camel_dict_to_snake_dict,
- boto3_tag_list_to_ansible_dict,
- compare_aws_tags,
- ansible_dict_to_boto3_tag_list,
-)
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError, WaiterError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-
-class VPNConnectionException(Exception):
- def __init__(self, msg, exception=None):
- self.msg = msg
- self.exception = exception
-
-
-def find_connection(connection, module_params, vpn_connection_id=None):
- ''' Looks for a unique VPN connection. Uses find_connection_response() to return the connection found, None,
- or raise an error if there were multiple viable connections. '''
-
- filters = module_params.get('filters')
-
- # vpn_connection_id may be provided via module option; takes precedence over any filter values
- if not vpn_connection_id and module_params.get('vpn_connection_id'):
- vpn_connection_id = module_params.get('vpn_connection_id')
-
- if not isinstance(vpn_connection_id, list) and vpn_connection_id:
- vpn_connection_id = [to_text(vpn_connection_id)]
- elif isinstance(vpn_connection_id, list):
- vpn_connection_id = [to_text(connection) for connection in vpn_connection_id]
-
- formatted_filter = []
- # if vpn_connection_id is provided it will take precedence over any filters since it is a unique identifier
- if not vpn_connection_id:
- formatted_filter = create_filter(module_params, provided_filters=filters)
-
- # see if there is a unique matching connection
- try:
- if vpn_connection_id:
- existing_conn = connection.describe_vpn_connections(VpnConnectionIds=vpn_connection_id,
- Filters=formatted_filter)
- else:
- existing_conn = connection.describe_vpn_connections(Filters=formatted_filter)
- except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed while describing VPN connection.",
- exception=e)
-
- return find_connection_response(connections=existing_conn)
-
-
-def add_routes(connection, vpn_connection_id, routes_to_add):
- for route in routes_to_add:
- try:
- connection.create_vpn_connection_route(VpnConnectionId=vpn_connection_id,
- DestinationCidrBlock=route)
- except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id),
- exception=e)
-
-
-def remove_routes(connection, vpn_connection_id, routes_to_remove):
- for route in routes_to_remove:
- try:
- connection.delete_vpn_connection_route(VpnConnectionId=vpn_connection_id,
- DestinationCidrBlock=route)
- except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id),
- exception=e)
-
-
-def create_filter(module_params, provided_filters):
- """ Creates a filter using the user-specified parameters and unmodifiable options that may have been specified in the task """
- boto3ify_filter = {'cgw-config': 'customer-gateway-configuration',
- 'static-routes-only': 'option.static-routes-only',
- 'cidr': 'route.destination-cidr-block',
- 'bgp': 'bgp-asn',
- 'vpn': 'vpn-connection-id',
- 'vgw': 'vpn-gateway-id',
- 'tag-keys': 'tag-key',
- 'tag-values': 'tag-value',
- 'tags': 'tag',
- 'cgw': 'customer-gateway-id'}
-
- # unmodifiable options and their filter name counterpart
- param_to_filter = {"customer_gateway_id": "customer-gateway-id",
- "vpn_gateway_id": "vpn-gateway-id",
- "vpn_connection_id": "vpn-connection-id"}
-
- flat_filter_dict = {}
- formatted_filter = []
-
- for raw_param in dict(provided_filters):
-
- # fix filter names to be recognized by boto3
- if raw_param in boto3ify_filter:
- param = boto3ify_filter[raw_param]
- provided_filters[param] = provided_filters.pop(raw_param)
- elif raw_param in list(boto3ify_filter.items()):
- param = raw_param
- else:
- raise VPNConnectionException(msg="{0} is not a valid filter.".format(raw_param))
-
- # reformat filters with special formats
- if param == 'tag':
- for key in provided_filters[param]:
- formatted_key = 'tag:' + key
- if isinstance(provided_filters[param][key], list):
- flat_filter_dict[formatted_key] = str(provided_filters[param][key])
- else:
- flat_filter_dict[formatted_key] = [str(provided_filters[param][key])]
- elif param == 'option.static-routes-only':
- flat_filter_dict[param] = [str(provided_filters[param]).lower()]
- else:
- if isinstance(provided_filters[param], list):
- flat_filter_dict[param] = provided_filters[param]
- else:
- flat_filter_dict[param] = [str(provided_filters[param])]
-
- # if customer_gateway, vpn_gateway, or vpn_connection was specified in the task but not the filter, add it
- for param in param_to_filter:
- if param_to_filter[param] not in flat_filter_dict and module_params.get(param):
- flat_filter_dict[param_to_filter[param]] = [module_params.get(param)]
-
- # change the flat dict into something boto3 will understand
- formatted_filter = [{'Name': key, 'Values': value} for key, value in flat_filter_dict.items()]
-
- return formatted_filter
-
-
-def find_connection_response(connections=None):
- """ Determine if there is a viable unique match in the connections described. Returns the unique VPN connection if one is found,
- returns None if the connection does not exist, raise an error if multiple matches are found. """
-
- # Found no connections
- if not connections or 'VpnConnections' not in connections:
- return None
-
- # Too many results
- elif connections and len(connections['VpnConnections']) > 1:
- viable = []
- for each in connections['VpnConnections']:
- # deleted connections are not modifiable
- if each['State'] not in ("deleted", "deleting"):
- viable.append(each)
- if len(viable) == 1:
- # Found one viable result; return unique match
- return viable[0]
- elif len(viable) == 0:
- # Found a result but it was deleted already; since there was only one viable result create a new one
- return None
- else:
- raise VPNConnectionException(msg="More than one matching VPN connection was found. "
- "To modify or delete a VPN please specify vpn_connection_id or add filters.")
-
- # Found unique match
- elif connections and len(connections['VpnConnections']) == 1:
- # deleted connections are not modifiable
- if connections['VpnConnections'][0]['State'] not in ("deleted", "deleting"):
- return connections['VpnConnections'][0]
-
-
-def create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type, max_attempts, delay, tunnel_options=None):
- """ Creates a VPN connection """
-
- options = {'StaticRoutesOnly': static_only}
- if tunnel_options and len(tunnel_options) <= 2:
- t_opt = []
- for m in tunnel_options:
- # See Boto3 docs regarding 'create_vpn_connection'
- # tunnel options for allowed 'TunnelOptions' keys.
- if not isinstance(m, dict):
- raise TypeError("non-dict list member")
- t_opt.append(m)
- if t_opt:
- options['TunnelOptions'] = t_opt
-
- if not (customer_gateway_id and vpn_gateway_id):
- raise VPNConnectionException(msg="No matching connection was found. To create a new connection you must provide "
- "both vpn_gateway_id and customer_gateway_id.")
- try:
- vpn = connection.create_vpn_connection(Type=connection_type,
- CustomerGatewayId=customer_gateway_id,
- VpnGatewayId=vpn_gateway_id,
- Options=options)
- connection.get_waiter('vpn_connection_available').wait(
- VpnConnectionIds=[vpn['VpnConnection']['VpnConnectionId']],
- WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
- )
- except WaiterError as e:
- raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be available".format(vpn['VpnConnection']['VpnConnectionId']),
- exception=e)
- except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed to create VPN connection",
- exception=e)
-
- return vpn['VpnConnection']
-
-
-def delete_connection(connection, vpn_connection_id, delay, max_attempts):
- """ Deletes a VPN connection """
- try:
- connection.delete_vpn_connection(VpnConnectionId=vpn_connection_id)
- connection.get_waiter('vpn_connection_deleted').wait(
- VpnConnectionIds=[vpn_connection_id],
- WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}
- )
- except WaiterError as e:
- raise VPNConnectionException(msg="Failed to wait for VPN connection {0} to be removed".format(vpn_connection_id),
- exception=e)
- except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed to delete the VPN connection: {0}".format(vpn_connection_id),
- exception=e)
-
-
-def add_tags(connection, vpn_connection_id, add):
- try:
- connection.create_tags(Resources=[vpn_connection_id],
- Tags=add)
- except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add),
- exception=e)
-
-
-def remove_tags(connection, vpn_connection_id, remove):
- # format tags since they are a list in the format ['tag1', 'tag2', 'tag3']
- key_dict_list = [{'Key': tag} for tag in remove]
- try:
- connection.delete_tags(Resources=[vpn_connection_id],
- Tags=key_dict_list)
- except (BotoCoreError, ClientError) as e:
- raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove),
- exception=e)
-
-
-def check_for_update(connection, module_params, vpn_connection_id):
- """ Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change. """
- tags = module_params.get('tags')
- routes = module_params.get('routes')
- purge_tags = module_params.get('purge_tags')
- purge_routes = module_params.get('purge_routes')
-
- vpn_connection = find_connection(connection, module_params, vpn_connection_id=vpn_connection_id)
- current_attrs = camel_dict_to_snake_dict(vpn_connection)
-
- # Initialize changes dict
- changes = {'tags_to_add': [],
- 'tags_to_remove': [],
- 'routes_to_add': [],
- 'routes_to_remove': []}
-
- # Get changes to tags
- current_tags = boto3_tag_list_to_ansible_dict(current_attrs.get('tags', []), u'key', u'value')
- tags_to_add, changes['tags_to_remove'] = compare_aws_tags(current_tags, tags, purge_tags)
- changes['tags_to_add'] = ansible_dict_to_boto3_tag_list(tags_to_add)
- # Get changes to routes
- if 'Routes' in vpn_connection:
- current_routes = [route['DestinationCidrBlock'] for route in vpn_connection['Routes']]
- if purge_routes:
- changes['routes_to_remove'] = [old_route for old_route in current_routes if old_route not in routes]
- changes['routes_to_add'] = [new_route for new_route in routes if new_route not in current_routes]
-
- # Check if nonmodifiable attributes are attempted to be modified
- for attribute in current_attrs:
- if attribute in ("tags", "routes", "state"):
- continue
- elif attribute == 'options':
- will_be = module_params.get('static_only', None)
- is_now = bool(current_attrs[attribute]['static_routes_only'])
- attribute = 'static_only'
- elif attribute == 'type':
- will_be = module_params.get("connection_type", None)
- is_now = current_attrs[attribute]
- else:
- is_now = current_attrs[attribute]
- will_be = module_params.get(attribute, None)
-
- if will_be is not None and to_text(will_be) != to_text(is_now):
- raise VPNConnectionException(msg="You cannot modify {0}, the current value of which is {1}. Modifiable VPN "
- "connection attributes are tags and routes. The value you tried to change it to "
- "is {2}.".format(attribute, is_now, will_be))
-
- return changes
-
-
-def make_changes(connection, vpn_connection_id, changes):
- """ changes is a dict with the keys 'tags_to_add', 'tags_to_remove', 'routes_to_add', 'routes_to_remove',
- the values of which are lists (generated by check_for_update()).
- """
- changed = False
-
- if changes['tags_to_add']:
- changed = True
- add_tags(connection, vpn_connection_id, changes['tags_to_add'])
-
- if changes['tags_to_remove']:
- changed = True
- remove_tags(connection, vpn_connection_id, changes['tags_to_remove'])
-
- if changes['routes_to_add']:
- changed = True
- add_routes(connection, vpn_connection_id, changes['routes_to_add'])
-
- if changes['routes_to_remove']:
- changed = True
- remove_routes(connection, vpn_connection_id, changes['routes_to_remove'])
-
- return changed
-
-
-def get_check_mode_results(connection, module_params, vpn_connection_id=None, current_state=None):
- """ Returns the changes that would be made to a VPN Connection """
- state = module_params.get('state')
- if state == 'absent':
- if vpn_connection_id:
- return True, {}
- else:
- return False, {}
-
- changed = False
- results = {'customer_gateway_configuration': '',
- 'customer_gateway_id': module_params.get('customer_gateway_id'),
- 'vpn_gateway_id': module_params.get('vpn_gateway_id'),
- 'options': {'static_routes_only': module_params.get('static_only')},
- 'routes': [module_params.get('routes')]}
-
- # get combined current tags and tags to set
- present_tags = module_params.get('tags')
- if current_state and 'Tags' in current_state:
- current_tags = boto3_tag_list_to_ansible_dict(current_state['Tags'])
- if module_params.get('purge_tags'):
- if current_tags != present_tags:
- changed = True
- elif current_tags != present_tags:
- if not set(present_tags.keys()) < set(current_tags.keys()):
- changed = True
- # add preexisting tags that new tags didn't overwrite
- present_tags.update((tag, current_tags[tag]) for tag in current_tags if tag not in present_tags)
- elif current_tags.keys() == present_tags.keys() and set(present_tags.values()) != set(current_tags.values()):
- changed = True
- elif module_params.get('tags'):
- changed = True
- if present_tags:
- results['tags'] = present_tags
-
- # get combined current routes and routes to add
- present_routes = module_params.get('routes')
- if current_state and 'Routes' in current_state:
- current_routes = [route['DestinationCidrBlock'] for route in current_state['Routes']]
- if module_params.get('purge_routes'):
- if set(current_routes) != set(present_routes):
- changed = True
- elif set(present_routes) != set(current_routes):
- if not set(present_routes) < set(current_routes):
- changed = True
- present_routes.extend([route for route in current_routes if route not in present_routes])
- elif module_params.get('routes'):
- changed = True
- results['routes'] = [{"destination_cidr_block": cidr, "state": "available"} for cidr in present_routes]
-
- # return the vpn_connection_id if it's known
- if vpn_connection_id:
- results['vpn_connection_id'] = vpn_connection_id
- else:
- changed = True
- results['vpn_connection_id'] = 'vpn-XXXXXXXX'
-
- return changed, results
-
-
-def ensure_present(connection, module_params, check_mode=False):
- """ Creates and adds tags to a VPN connection. If the connection already exists update tags. """
- vpn_connection = find_connection(connection, module_params)
- changed = False
- delay = module_params.get('delay')
- max_attempts = module_params.get('wait_timeout') // delay
-
- # No match but vpn_connection_id was specified.
- if not vpn_connection and module_params.get('vpn_connection_id'):
- raise VPNConnectionException(msg="There is no VPN connection available or pending with that id. Did you delete it?")
-
- # Unique match was found. Check if attributes provided differ.
- elif vpn_connection:
- vpn_connection_id = vpn_connection['VpnConnectionId']
- # check_for_update returns a dict with the keys tags_to_add, tags_to_remove, routes_to_add, routes_to_remove
- changes = check_for_update(connection, module_params, vpn_connection_id)
- if check_mode:
- return get_check_mode_results(connection, module_params, vpn_connection_id, current_state=vpn_connection)
- changed = make_changes(connection, vpn_connection_id, changes)
-
- # No match was found. Create and tag a connection and add routes.
- else:
- changed = True
- if check_mode:
- return get_check_mode_results(connection, module_params)
- vpn_connection = create_connection(connection,
- customer_gateway_id=module_params.get('customer_gateway_id'),
- static_only=module_params.get('static_only'),
- vpn_gateway_id=module_params.get('vpn_gateway_id'),
- connection_type=module_params.get('connection_type'),
- tunnel_options=module_params.get('tunnel_options'),
- max_attempts=max_attempts,
- delay=delay)
- changes = check_for_update(connection, module_params, vpn_connection['VpnConnectionId'])
- make_changes(connection, vpn_connection['VpnConnectionId'], changes)
-
- # get latest version if a change has been made and make tags output nice before returning it
- if vpn_connection:
- vpn_connection = find_connection(connection, module_params, vpn_connection['VpnConnectionId'])
- if 'Tags' in vpn_connection:
- vpn_connection['Tags'] = boto3_tag_list_to_ansible_dict(vpn_connection['Tags'])
-
- return changed, vpn_connection
-
-
-def ensure_absent(connection, module_params, check_mode=False):
- """ Deletes a VPN connection if it exists. """
- vpn_connection = find_connection(connection, module_params)
-
- if check_mode:
- return get_check_mode_results(connection, module_params, vpn_connection['VpnConnectionId'] if vpn_connection else None)
-
- delay = module_params.get('delay')
- max_attempts = module_params.get('wait_timeout') // delay
-
- if vpn_connection:
- delete_connection(connection, vpn_connection['VpnConnectionId'], delay=delay, max_attempts=max_attempts)
- changed = True
- else:
- changed = False
-
- return changed, {}
-
-
-def main():
- argument_spec = dict(
- state=dict(type='str', default='present', choices=['present', 'absent']),
- filters=dict(type='dict', default={}),
- vpn_gateway_id=dict(type='str'),
- tags=dict(default={}, type='dict'),
- connection_type=dict(default='ipsec.1', type='str'),
- tunnel_options=dict(no_log=True, type='list', default=[]),
- static_only=dict(default=False, type='bool'),
- customer_gateway_id=dict(type='str'),
- vpn_connection_id=dict(type='str'),
- purge_tags=dict(type='bool', default=False),
- routes=dict(type='list', default=[]),
- purge_routes=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=600),
- delay=dict(type='int', default=15),
- )
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True)
- connection = module.client('ec2')
-
- state = module.params.get('state')
- parameters = dict(module.params)
-
- try:
- if state == 'present':
- changed, response = ensure_present(connection, parameters, module.check_mode)
- elif state == 'absent':
- changed, response = ensure_absent(connection, parameters, module.check_mode)
- except VPNConnectionException as e:
- if e.exception:
- module.fail_json_aws(e.exception, msg=e.msg)
- else:
- module.fail_json(msg=e.msg)
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_vpn_info.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_vpn_info.py
deleted file mode 100644
index dbc706fc55..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_vpn_info.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'status': ['preview'],
- 'supported_by': 'community',
- 'metadata_version': '1.1'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_vpc_vpn_info
-short_description: Gather information about VPN Connections in AWS.
-description:
- - Gather information about VPN Connections in AWS.
- - This module was called C(ec2_vpc_vpn_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.6"
-requirements: [ boto3 ]
-author: Madhura Naniwadekar (@Madhura-CSI)
-options:
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) for possible filters.
- required: false
- type: dict
- vpn_connection_ids:
- description:
- - Get details of a specific VPN connections using vpn connection ID/IDs. This value should be provided as a list.
- required: false
- type: list
- elements: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# # Note: These examples do not set authentication details, see the AWS Guide for details.
-- name: Gather information about all vpn connections
- ec2_vpc_vpn_info:
-
-- name: Gather information about a filtered list of vpn connections, based on tags
- ec2_vpc_vpn_info:
- filters:
- "tag:Name": test-connection
- register: vpn_conn_info
-
-- name: Gather information about vpn connections by specifying connection IDs.
- ec2_vpc_vpn_info:
- filters:
- vpn-gateway-id: vgw-cbe66beb
- register: vpn_conn_info
-'''
-
-RETURN = '''
-vpn_connections:
- description: List of one or more VPN Connections.
- returned: always
- type: complex
- contains:
- category:
- description: The category of the VPN connection.
- returned: always
- type: str
- sample: VPN
- customer_gatway_configuration:
- description: The configuration information for the VPN connection's customer gateway (in the native XML format).
- returned: always
- type: str
- customer_gateway_id:
- description: The ID of the customer gateway at your end of the VPN connection.
- returned: always
- type: str
- sample: cgw-17a53c37
- options:
- description: The VPN connection options.
- returned: always
- type: dict
- sample: {
- "static_routes_only": false
- }
- routes:
- description: List of static routes associated with the VPN connection.
- returned: always
- type: complex
- contains:
- destination_cidr_block:
- description: The CIDR block associated with the local subnet of the customer data center.
- returned: always
- type: str
- sample: 10.0.0.0/16
- state:
- description: The current state of the static route.
- returned: always
- type: str
- sample: available
- state:
- description: The current state of the VPN connection.
- returned: always
- type: str
- sample: available
- tags:
- description: Any tags assigned to the VPN connection.
- returned: always
- type: dict
- sample: {
- "Name": "test-conn"
- }
- type:
- description: The type of VPN connection.
- returned: always
- type: str
- sample: ipsec.1
- vgw_telemetry:
- description: Information about the VPN tunnel.
- returned: always
- type: complex
- contains:
- accepted_route_count:
- description: The number of accepted routes.
- returned: always
- type: int
- sample: 0
- last_status_change:
- description: The date and time of the last change in status.
- returned: always
- type: str
- sample: "2018-02-09T14:35:27+00:00"
- outside_ip_address:
- description: The Internet-routable IP address of the virtual private gateway's outside interface.
- returned: always
- type: str
- sample: 13.127.79.191
- status:
- description: The status of the VPN tunnel.
- returned: always
- type: str
- sample: DOWN
- status_message:
- description: If an error occurs, a description of the error.
- returned: always
- type: str
- sample: IPSEC IS DOWN
- certificate_arn:
- description: The Amazon Resource Name of the virtual private gateway tunnel endpoint certificate.
- returned: when a private certificate is used for authentication
- type: str
- sample: "arn:aws:acm:us-east-1:123456789101:certificate/c544d8ce-20b8-4fff-98b0-example"
- vpn_connection_id:
- description: The ID of the VPN connection.
- returned: always
- type: str
- sample: vpn-f700d5c0
- vpn_gateway_id:
- description: The ID of the virtual private gateway at the AWS side of the VPN connection.
- returned: always
- type: str
- sample: vgw-cbe56bfb
-'''
-
-import json
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
-
-
-def date_handler(obj):
- return obj.isoformat() if hasattr(obj, 'isoformat') else obj
-
-
-def list_vpn_connections(connection, module):
- params = dict()
-
- params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
- params['VpnConnectionIds'] = module.params.get('vpn_connection_ids')
-
- try:
- result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler))
- except ValueError as e:
- module.fail_json_aws(e, msg="Cannot validate JSON data")
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Could not describe customer gateways")
- snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']]
- if snaked_vpn_connections:
- for vpn_connection in snaked_vpn_connections:
- vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', []))
- module.exit_json(changed=False, vpn_connections=snaked_vpn_connections)
-
-
-def main():
-
- argument_spec = dict(
- vpn_connection_ids=dict(default=[], type='list'),
- filters=dict(default={}, type='dict')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- mutually_exclusive=[['vpn_connection_ids', 'filters']],
- supports_check_mode=True)
- if module._module._name == 'ec2_vpc_vpn_facts':
- module._module.deprecate("The 'ec2_vpc_vpn_facts' module has been renamed to 'ec2_vpc_vpn_info'", version='2.13')
-
- connection = module.client('ec2')
-
- list_vpn_connections(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_win_password.py b/lib/ansible/modules/cloud/amazon/ec2_win_password.py
deleted file mode 100644
index 4cb0c5003d..0000000000
--- a/lib/ansible/modules/cloud/amazon/ec2_win_password.py
+++ /dev/null
@@ -1,208 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ec2_win_password
-short_description: Gets the default administrator password for ec2 windows instances
-description:
- - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. C(i-XXXXXXX)).
- - This module has a dependency on python-boto.
-version_added: "2.0"
-author: "Rick Mendes (@rickmendes)"
-options:
- instance_id:
- description:
- - The instance id to get the password data from.
- required: true
- type: str
- key_file:
- description:
- - Path to the file containing the key pair used on the instance.
- - Conflicts with I(key_data).
- required: false
- type: path
- key_data:
- version_added: "2.8"
- description:
- - The private key (usually stored in vault).
- - Conflicts with I(key_file),
- required: false
- type: str
- key_passphrase:
- version_added: "2.0"
- description:
- - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to
- convert your password protected keys if they do not use DES or 3DES. ex) C(openssl rsa -in current_key -out new_key -des3).
- type: str
- wait:
- version_added: "2.0"
- description:
- - Whether or not to wait for the password to be available before returning.
- type: bool
- default: false
- wait_timeout:
- version_added: "2.0"
- description:
- - Number of seconds to wait before giving up.
- default: 120
- type: int
-
-extends_documentation_fragment:
- - aws
- - ec2
-
-requirements:
- - cryptography
-
-notes:
- - As of Ansible 2.4, this module requires the python cryptography module rather than the
- older pycrypto module.
-'''
-
-EXAMPLES = '''
-# Example of getting a password
-- name: get the Administrator password
- ec2_win_password:
- profile: my-boto-profile
- instance_id: i-XXXXXX
- region: us-east-1
- key_file: "~/aws-creds/my_test_key.pem"
-
-# Example of getting a password using a variable
-- name: get the Administrator password
- ec2_win_password:
- profile: my-boto-profile
- instance_id: i-XXXXXX
- region: us-east-1
- key_data: "{{ ec2_private_key }}"
-
-# Example of getting a password with a password protected key
-- name: get the Administrator password
- ec2_win_password:
- profile: my-boto-profile
- instance_id: i-XXXXXX
- region: us-east-1
- key_file: "~/aws-creds/my_protected_test_key.pem"
- key_passphrase: "secret"
-
-# Example of waiting for a password
-- name: get the Administrator password
- ec2_win_password:
- profile: my-boto-profile
- instance_id: i-XXXXXX
- region: us-east-1
- key_file: "~/aws-creds/my_test_key.pem"
- wait: yes
- wait_timeout: 45
-'''
-
-import datetime
-import time
-from base64 import b64decode
-
-try:
- from cryptography.hazmat.backends import default_backend
- from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
- from cryptography.hazmat.primitives.serialization import load_pem_private_key
- HAS_CRYPTOGRAPHY = True
-except ImportError:
- HAS_CRYPTOGRAPHY = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
-from ansible.module_utils._text import to_bytes
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- instance_id=dict(required=True),
- key_file=dict(required=False, default=None, type='path'),
- key_passphrase=dict(no_log=True, default=None, required=False),
- key_data=dict(no_log=True, default=None, required=False),
- wait=dict(type='bool', default=False, required=False),
- wait_timeout=dict(default=120, required=False, type='int'),
- )
- )
- module = AnsibleModule(argument_spec=argument_spec)
-
- if not HAS_BOTO:
- module.fail_json(msg='Boto required for this module.')
-
- if not HAS_CRYPTOGRAPHY:
- module.fail_json(msg='cryptography package required for this module.')
-
- instance_id = module.params.get('instance_id')
- key_file = module.params.get('key_file')
- key_data = module.params.get('key_data')
- if module.params.get('key_passphrase') is None:
- b_key_passphrase = None
- else:
- b_key_passphrase = to_bytes(module.params.get('key_passphrase'), errors='surrogate_or_strict')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- ec2 = ec2_connect(module)
-
- if wait:
- start = datetime.datetime.now()
- end = start + datetime.timedelta(seconds=wait_timeout)
-
- while datetime.datetime.now() < end:
- data = ec2.get_password_data(instance_id)
- decoded = b64decode(data)
- if not decoded:
- time.sleep(5)
- else:
- break
- else:
- data = ec2.get_password_data(instance_id)
- decoded = b64decode(data)
-
- if wait and datetime.datetime.now() >= end:
- module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout)
-
- if key_file is not None and key_data is None:
- try:
- with open(key_file, 'rb') as f:
- key = load_pem_private_key(f.read(), b_key_passphrase, default_backend())
- except IOError as e:
- # Handle bad files
- module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
- except (ValueError, TypeError) as e:
- # Handle issues loading key
- module.fail_json(msg="unable to parse key file")
- elif key_data is not None and key_file is None:
- try:
- key = load_pem_private_key(key_data, b_key_passphrase, default_backend())
- except (ValueError, TypeError) as e:
- module.fail_json(msg="unable to parse key data")
-
- try:
- decrypted = key.decrypt(decoded, PKCS1v15())
- except ValueError as e:
- decrypted = None
-
- if decrypted is None:
- module.exit_json(win_password='', changed=False)
- else:
- if wait:
- elapsed = datetime.datetime.now() - start
- module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)
- else:
- module.exit_json(win_password=decrypted, changed=True)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_attribute.py b/lib/ansible/modules/cloud/amazon/ecs_attribute.py
deleted file mode 100644
index 3d5ec3646b..0000000000
--- a/lib/ansible/modules/cloud/amazon/ecs_attribute.py
+++ /dev/null
@@ -1,311 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: ecs_attribute
-short_description: manage ecs attributes
-description:
- - Create, update or delete ECS container instance attributes.
-version_added: "2.4"
-author: Andrej Svenke (@anryko)
-requirements: [ botocore, boto3 ]
-options:
- cluster:
- description:
- - The short name or full Amazon Resource Name (ARN) of the cluster
- that contains the resource to apply attributes.
- required: true
- type: str
- state:
- description:
- - The desired state of the attributes.
- required: false
- default: present
- choices: ['present', 'absent']
- type: str
- attributes:
- description:
- - List of attributes.
- required: true
- type: list
- elements: dict
- suboptions:
- name:
- description:
- - The name of the attribute. Up to 128 letters (uppercase and lowercase),
- numbers, hyphens, underscores, and periods are allowed.
- required: true
- type: str
- value:
- description:
- - The value of the attribute. Up to 128 letters (uppercase and lowercase),
- numbers, hyphens, underscores, periods, at signs (@), forward slashes, colons,
- and spaces are allowed.
- required: false
- type: str
- ec2_instance_id:
- description:
- - EC2 instance ID of ECS cluster container instance.
- required: true
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Set attributes
-- ecs_attribute:
- state: present
- cluster: test-cluster
- ec2_instance_id: "{{ ec2_id }}"
- attributes:
- - flavor: test
- - migrated
- delegate_to: localhost
-
-# Delete attributes
-- ecs_attribute:
- state: absent
- cluster: test-cluster
- ec2_instance_id: "{{ ec2_id }}"
- attributes:
- - flavor: test
- - migrated
- delegate_to: localhost
-'''
-
-RETURN = '''
-attributes:
- description: attributes
- type: complex
- returned: always
- contains:
- cluster:
- description: cluster name
- type: str
- ec2_instance_id:
- description: ec2 instance id of ecs container instance
- type: str
- attributes:
- description: list of attributes
- type: list
- elements: dict
- contains:
- name:
- description: name of the attribute
- type: str
- value:
- description: value of the attribute
- returned: if present
- type: str
-'''
-
-try:
- import boto3
- from botocore.exceptions import ClientError, EndpointConnectionError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
-
-
-class EcsAttributes(object):
- """Handles ECS Cluster Attribute"""
-
- def __init__(self, module, attributes):
- self.module = module
- self.attributes = attributes if self._validate_attrs(attributes) else self._parse_attrs(attributes)
-
- def __bool__(self):
- return bool(self.attributes)
-
- __nonzero__ = __bool__
-
- def __iter__(self):
- return iter(self.attributes)
-
- @staticmethod
- def _validate_attrs(attrs):
- return all(tuple(attr.keys()) in (('name', 'value'), ('value', 'name')) for attr in attrs)
-
- def _parse_attrs(self, attrs):
- attrs_parsed = []
- for attr in attrs:
- if isinstance(attr, dict):
- if len(attr) != 1:
- self.module.fail_json(msg="Incorrect attribute format - %s" % str(attr))
- name, value = list(attr.items())[0]
- attrs_parsed.append({'name': name, 'value': value})
- elif isinstance(attr, str):
- attrs_parsed.append({'name': attr, 'value': None})
- else:
- self.module.fail_json(msg="Incorrect attributes format - %s" % str(attrs))
-
- return attrs_parsed
-
- def _setup_attr_obj(self, ecs_arn, name, value=None, skip_value=False):
- attr_obj = {'targetType': 'container-instance',
- 'targetId': ecs_arn,
- 'name': name}
- if not skip_value and value is not None:
- attr_obj['value'] = value
-
- return attr_obj
-
- def get_for_ecs_arn(self, ecs_arn, skip_value=False):
- """
- Returns list of attribute dicts ready to be passed to boto3
- attributes put/delete methods.
- """
- return [self._setup_attr_obj(ecs_arn, skip_value=skip_value, **attr) for attr in self.attributes]
-
- def diff(self, attrs):
- """
- Returns EcsAttributes Object containing attributes which are present
- in self but are absent in passed attrs (EcsAttributes Object).
- """
- attrs_diff = [attr for attr in self.attributes if attr not in attrs]
- return EcsAttributes(self.module, attrs_diff)
-
-
-class Ec2EcsInstance(object):
- """Handle ECS Cluster Remote Operations"""
-
- def __init__(self, module, cluster, ec2_id):
- self.module = module
- self.cluster = cluster
- self.ec2_id = ec2_id
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- if not region:
- module.fail_json(msg=("Region must be specified as a parameter,"
- " in EC2_REGION or AWS_REGION environment"
- " variables or in boto configuration file"))
- self.ecs = boto3_conn(module, conn_type='client', resource='ecs',
- region=region, endpoint=ec2_url, **aws_connect_kwargs)
-
- self.ecs_arn = self._get_ecs_arn()
-
- def _get_ecs_arn(self):
- try:
- ecs_instances_arns = self.ecs.list_container_instances(cluster=self.cluster)['containerInstanceArns']
- ec2_instances = self.ecs.describe_container_instances(cluster=self.cluster,
- containerInstances=ecs_instances_arns)['containerInstances']
- except (ClientError, EndpointConnectionError) as e:
- self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e))
-
- try:
- ecs_arn = next(inst for inst in ec2_instances
- if inst['ec2InstanceId'] == self.ec2_id)['containerInstanceArn']
- except StopIteration:
- self.module.fail_json(msg="EC2 instance Id not found in ECS cluster - %s" % str(self.cluster))
-
- return ecs_arn
-
- def attrs_put(self, attrs):
- """Puts attributes on ECS container instance"""
- try:
- self.ecs.put_attributes(cluster=self.cluster,
- attributes=attrs.get_for_ecs_arn(self.ecs_arn))
- except ClientError as e:
- self.module.fail_json(msg=str(e))
-
- def attrs_delete(self, attrs):
- """Deletes attributes from ECS container instance."""
- try:
- self.ecs.delete_attributes(cluster=self.cluster,
- attributes=attrs.get_for_ecs_arn(self.ecs_arn, skip_value=True))
- except ClientError as e:
- self.module.fail_json(msg=str(e))
-
- def attrs_get_by_name(self, attrs):
- """
- Returns EcsAttributes object containing attributes from ECS container instance with names
- matching to attrs.attributes (EcsAttributes Object).
- """
- attr_objs = [{'targetType': 'container-instance', 'attributeName': attr['name']}
- for attr in attrs]
-
- try:
- matched_ecs_targets = [attr_found for attr_obj in attr_objs
- for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)['attributes']]
- except ClientError as e:
- self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e))
-
- matched_objs = [target for target in matched_ecs_targets
- if target['targetId'] == self.ecs_arn]
-
- results = [{'name': match['name'], 'value': match.get('value', None)}
- for match in matched_objs]
-
- return EcsAttributes(self.module, results)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(required=False, default='present', choices=['present', 'absent']),
- cluster=dict(required=True, type='str'),
- ec2_instance_id=dict(required=True, type='str'),
- attributes=dict(required=True, type='list'),
- ))
-
- required_together = [['cluster', 'ec2_instance_id', 'attributes']]
-
- module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True,
- required_together=required_together)
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required.')
-
- cluster = module.params['cluster']
- ec2_instance_id = module.params['ec2_instance_id']
- attributes = module.params['attributes']
-
- conti = Ec2EcsInstance(module, cluster, ec2_instance_id)
- attrs = EcsAttributes(module, attributes)
-
- results = {'changed': False,
- 'attributes': [
- {'cluster': cluster,
- 'ec2_instance_id': ec2_instance_id,
- 'attributes': attributes}
- ]}
-
- attrs_present = conti.attrs_get_by_name(attrs)
-
- if module.params['state'] == 'present':
- attrs_diff = attrs.diff(attrs_present)
- if not attrs_diff:
- module.exit_json(**results)
-
- conti.attrs_put(attrs_diff)
- results['changed'] = True
-
- elif module.params['state'] == 'absent':
- if not attrs_present:
- module.exit_json(**results)
-
- conti.attrs_delete(attrs_present)
- results['changed'] = True
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_cluster.py b/lib/ansible/modules/cloud/amazon/ecs_cluster.py
deleted file mode 100644
index 355e74551e..0000000000
--- a/lib/ansible/modules/cloud/amazon/ecs_cluster.py
+++ /dev/null
@@ -1,233 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ecs_cluster
-short_description: Create or terminate ECS clusters.
-notes:
- - When deleting a cluster, the information returned is the state of the cluster prior to deletion.
- - It will also wait for a cluster to have instances registered to it.
-description:
- - Creates or terminates ecs clusters.
-version_added: "2.0"
-author: Mark Chance (@Java1Guy)
-requirements: [ boto3 ]
-options:
- state:
- description:
- - The desired state of the cluster.
- required: true
- choices: ['present', 'absent', 'has_instances']
- type: str
- name:
- description:
- - The cluster name.
- required: true
- type: str
- delay:
- description:
- - Number of seconds to wait.
- required: false
- type: int
- default: 10
- repeat:
- description:
- - The number of times to wait for the cluster to have an instance.
- required: false
- type: int
- default: 10
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Cluster creation
-- ecs_cluster:
- name: default
- state: present
-
-# Cluster deletion
-- ecs_cluster:
- name: default
- state: absent
-
-- name: Wait for register
- ecs_cluster:
- name: "{{ new_cluster }}"
- state: has_instances
- delay: 10
- repeat: 10
- register: task_output
-
-'''
-RETURN = '''
-activeServicesCount:
- description: how many services are active in this cluster
- returned: 0 if a new cluster
- type: int
-clusterArn:
- description: the ARN of the cluster just created
- type: str
- returned: 0 if a new cluster
- sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok
-clusterName:
- description: name of the cluster just created (should match the input argument)
- type: str
- returned: always
- sample: test-cluster-mfshcdok
-pendingTasksCount:
- description: how many tasks are waiting to run in this cluster
- returned: 0 if a new cluster
- type: int
-registeredContainerInstancesCount:
- description: how many container instances are available in this cluster
- returned: 0 if a new cluster
- type: int
-runningTasksCount:
- description: how many tasks are running in this cluster
- returned: 0 if a new cluster
- type: int
-status:
- description: the status of the new cluster
- returned: always
- type: str
- sample: ACTIVE
-'''
-import time
-
-try:
- import boto3
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
-
-
-class EcsClusterManager:
- """Handles ECS Clusters"""
-
- def __init__(self, module):
- self.module = module
-
- # self.ecs = boto3.client('ecs')
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- self.ecs = boto3_conn(module, conn_type='client', resource='ecs',
- region=region, endpoint=ec2_url, **aws_connect_kwargs)
-
- def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'):
- for c in array_of_clusters:
- if c[field_name].endswith(cluster_name):
- return c
- return None
-
- def describe_cluster(self, cluster_name):
- response = self.ecs.describe_clusters(clusters=[
- cluster_name
- ])
- if len(response['failures']) > 0:
- c = self.find_in_array(response['failures'], cluster_name, 'arn')
- if c and c['reason'] == 'MISSING':
- return None
- # fall thru and look through found ones
- if len(response['clusters']) > 0:
- c = self.find_in_array(response['clusters'], cluster_name)
- if c:
- return c
- raise Exception("Unknown problem describing cluster %s." % cluster_name)
-
- def create_cluster(self, clusterName='default'):
- response = self.ecs.create_cluster(clusterName=clusterName)
- return response['cluster']
-
- def delete_cluster(self, clusterName):
- return self.ecs.delete_cluster(cluster=clusterName)
-
-
-def main():
-
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(required=True, choices=['present', 'absent', 'has_instances']),
- name=dict(required=True, type='str'),
- delay=dict(required=False, type='int', default=10),
- repeat=dict(required=False, type='int', default=10)
- ))
- required_together = [['state', 'name']]
-
- module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required.')
-
- cluster_mgr = EcsClusterManager(module)
- try:
- existing = cluster_mgr.describe_cluster(module.params['name'])
- except Exception as e:
- module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e))
-
- results = dict(changed=False)
- if module.params['state'] == 'present':
- if existing and 'status' in existing and existing['status'] == "ACTIVE":
- results['cluster'] = existing
- else:
- if not module.check_mode:
- # doesn't exist. create it.
- results['cluster'] = cluster_mgr.create_cluster(module.params['name'])
- results['changed'] = True
-
- # delete the cluster
- elif module.params['state'] == 'absent':
- if not existing:
- pass
- else:
- # it exists, so we should delete it and mark changed.
- # return info about the cluster deleted
- results['cluster'] = existing
- if 'status' in existing and existing['status'] == "INACTIVE":
- results['changed'] = False
- else:
- if not module.check_mode:
- cluster_mgr.delete_cluster(module.params['name'])
- results['changed'] = True
- elif module.params['state'] == 'has_instances':
- if not existing:
- module.fail_json(msg="Cluster '" + module.params['name'] + " not found.")
- return
- # it exists, so we should delete it and mark changed.
- # return info about the cluster deleted
- delay = module.params['delay']
- repeat = module.params['repeat']
- time.sleep(delay)
- count = 0
- for i in range(repeat):
- existing = cluster_mgr.describe_cluster(module.params['name'])
- count = existing['registeredContainerInstancesCount']
- if count > 0:
- results['changed'] = True
- break
- time.sleep(delay)
- if count == 0 and i is repeat - 1:
- module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
- return
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_ecr.py b/lib/ansible/modules/cloud/amazon/ecs_ecr.py
deleted file mode 100644
index 3eb70c6d3c..0000000000
--- a/lib/ansible/modules/cloud/amazon/ecs_ecr.py
+++ /dev/null
@@ -1,531 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*
-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: ecs_ecr
-version_added: "2.3"
-short_description: Manage Elastic Container Registry repositories
-description:
- - Manage Elastic Container Registry repositories.
-requirements: [ boto3 ]
-options:
- name:
- description:
- - The name of the repository.
- required: true
- type: str
- registry_id:
- description:
- - AWS account id associated with the registry.
- - If not specified, the default registry is assumed.
- required: false
- type: str
- policy:
- description:
- - JSON or dict that represents the new policy.
- required: false
- type: json
- force_set_policy:
- description:
- - If I(force_set_policy=false), it prevents setting a policy that would prevent you from
- setting another policy in the future.
- required: false
- default: false
- type: bool
- purge_policy:
- description:
- - If yes, remove the policy from the repository.
- - Alias C(delete_policy) has been deprecated and will be removed in Ansible 2.14
- required: false
- default: false
- type: bool
- aliases: [ delete_policy ]
- image_tag_mutability:
- description:
- - Configure whether repository should be mutable (ie. an already existing tag can be overwritten) or not.
- required: false
- choices: [mutable, immutable]
- default: 'mutable'
- version_added: '2.10'
- type: str
- lifecycle_policy:
- description:
- - JSON or dict that represents the new lifecycle policy
- required: false
- version_added: '2.10'
- type: json
- purge_lifecycle_policy:
- description:
- - if yes, remove the lifecycle policy from the repository
- required: false
- default: false
- version_added: '2.10'
- type: bool
- state:
- description:
- - Create or destroy the repository.
- required: false
- choices: [present, absent]
- default: 'present'
- type: str
-author:
- - David M. Lee (@leedm777)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# If the repository does not exist, it is created. If it does exist, would not
-# affect any policies already on it.
-- name: ecr-repo
- ecs_ecr: name=super/cool
-
-- name: destroy-ecr-repo
- ecs_ecr: name=old/busted state=absent
-
-- name: Cross account ecr-repo
- ecs_ecr: registry_id=999999999999 name=cross/account
-
-- name: set-policy as object
- ecs_ecr:
- name: needs-policy-object
- policy:
- Version: '2008-10-17'
- Statement:
- - Sid: read-only
- Effect: Allow
- Principal:
- AWS: '{{ read_only_arn }}'
- Action:
- - ecr:GetDownloadUrlForLayer
- - ecr:BatchGetImage
- - ecr:BatchCheckLayerAvailability
-
-- name: set-policy as string
- ecs_ecr:
- name: needs-policy-string
- policy: "{{ lookup('template', 'policy.json.j2') }}"
-
-- name: delete-policy
- ecs_ecr:
- name: needs-no-policy
- purge_policy: yes
-
-- name: create immutable ecr-repo
- ecs_ecr:
- name: super/cool
- image_tag_mutability: immutable
-
-- name: set-lifecycle-policy
- ecs_ecr:
- name: needs-lifecycle-policy
- lifecycle_policy:
- rules:
- - rulePriority: 1
- description: new policy
- selection:
- tagStatus: untagged
- countType: sinceImagePushed
- countUnit: days
- countNumber: 365
- action:
- type: expire
-
-- name: purge-lifecycle-policy
- ecs_ecr:
- name: needs-no-lifecycle-policy
- purge_lifecycle_policy: true
-'''
-
-RETURN = '''
-state:
- type: str
- description: The asserted state of the repository (present, absent)
- returned: always
-created:
- type: bool
- description: If true, the repository was created
- returned: always
-name:
- type: str
- description: The name of the repository
- returned: "when state == 'absent'"
-repository:
- type: dict
- description: The created or updated repository
- returned: "when state == 'present'"
- sample:
- createdAt: '2017-01-17T08:41:32-06:00'
- registryId: '999999999999'
- repositoryArn: arn:aws:ecr:us-east-1:999999999999:repository/ecr-test-1484664090
- repositoryName: ecr-test-1484664090
- repositoryUri: 999999999999.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090
-'''
-
-import json
-import traceback
-
-try:
- from botocore.exceptions import ClientError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import boto_exception, compare_policies, sort_json_policy_dict
-from ansible.module_utils.six import string_types
-
-
-def build_kwargs(registry_id):
- """
- Builds a kwargs dict which may contain the optional registryId.
-
- :param registry_id: Optional string containing the registryId.
- :return: kwargs dict with registryId, if given
- """
- if not registry_id:
- return dict()
- else:
- return dict(registryId=registry_id)
-
-
-class EcsEcr:
- def __init__(self, module):
- self.ecr = module.client('ecr')
- self.sts = module.client('sts')
- self.check_mode = module.check_mode
- self.changed = False
- self.skipped = False
-
- def get_repository(self, registry_id, name):
- try:
- res = self.ecr.describe_repositories(
- repositoryNames=[name], **build_kwargs(registry_id))
- repos = res.get('repositories')
- return repos and repos[0]
- except ClientError as err:
- code = err.response['Error'].get('Code', 'Unknown')
- if code == 'RepositoryNotFoundException':
- return None
- raise
-
- def get_repository_policy(self, registry_id, name):
- try:
- res = self.ecr.get_repository_policy(
- repositoryName=name, **build_kwargs(registry_id))
- text = res.get('policyText')
- return text and json.loads(text)
- except ClientError as err:
- code = err.response['Error'].get('Code', 'Unknown')
- if code == 'RepositoryPolicyNotFoundException':
- return None
- raise
-
- def create_repository(self, registry_id, name, image_tag_mutability):
- if registry_id:
- default_registry_id = self.sts.get_caller_identity().get('Account')
- if registry_id != default_registry_id:
- raise Exception('Cannot create repository in registry {0}.'
- 'Would be created in {1} instead.'.format(registry_id, default_registry_id))
-
- if not self.check_mode:
- repo = self.ecr.create_repository(
- repositoryName=name,
- imageTagMutability=image_tag_mutability).get('repository')
- self.changed = True
- return repo
- else:
- self.skipped = True
- return dict(repositoryName=name)
-
- def set_repository_policy(self, registry_id, name, policy_text, force):
- if not self.check_mode:
- policy = self.ecr.set_repository_policy(
- repositoryName=name,
- policyText=policy_text,
- force=force,
- **build_kwargs(registry_id))
- self.changed = True
- return policy
- else:
- self.skipped = True
- if self.get_repository(registry_id, name) is None:
- printable = name
- if registry_id:
- printable = '{0}:{1}'.format(registry_id, name)
- raise Exception(
- 'could not find repository {0}'.format(printable))
- return
-
- def delete_repository(self, registry_id, name):
- if not self.check_mode:
- repo = self.ecr.delete_repository(
- repositoryName=name, **build_kwargs(registry_id))
- self.changed = True
- return repo
- else:
- repo = self.get_repository(registry_id, name)
- if repo:
- self.skipped = True
- return repo
- return None
-
- def delete_repository_policy(self, registry_id, name):
- if not self.check_mode:
- policy = self.ecr.delete_repository_policy(
- repositoryName=name, **build_kwargs(registry_id))
- self.changed = True
- return policy
- else:
- policy = self.get_repository_policy(registry_id, name)
- if policy:
- self.skipped = True
- return policy
- return None
-
- def put_image_tag_mutability(self, registry_id, name, new_mutability_configuration):
- repo = self.get_repository(registry_id, name)
- current_mutability_configuration = repo.get('imageTagMutability')
-
- if current_mutability_configuration != new_mutability_configuration:
- if not self.check_mode:
- self.ecr.put_image_tag_mutability(
- repositoryName=name,
- imageTagMutability=new_mutability_configuration,
- **build_kwargs(registry_id))
- else:
- self.skipped = True
- self.changed = True
-
- repo['imageTagMutability'] = new_mutability_configuration
- return repo
-
- def get_lifecycle_policy(self, registry_id, name):
- try:
- res = self.ecr.get_lifecycle_policy(
- repositoryName=name, **build_kwargs(registry_id))
- text = res.get('lifecyclePolicyText')
- return text and json.loads(text)
- except ClientError as err:
- code = err.response['Error'].get('Code', 'Unknown')
- if code == 'LifecyclePolicyNotFoundException':
- return None
- raise
-
- def put_lifecycle_policy(self, registry_id, name, policy_text):
- if not self.check_mode:
- policy = self.ecr.put_lifecycle_policy(
- repositoryName=name,
- lifecyclePolicyText=policy_text,
- **build_kwargs(registry_id))
- self.changed = True
- return policy
- else:
- self.skipped = True
- if self.get_repository(registry_id, name) is None:
- printable = name
- if registry_id:
- printable = '{0}:{1}'.format(registry_id, name)
- raise Exception(
- 'could not find repository {0}'.format(printable))
- return
-
- def purge_lifecycle_policy(self, registry_id, name):
- if not self.check_mode:
- policy = self.ecr.delete_lifecycle_policy(
- repositoryName=name, **build_kwargs(registry_id))
- self.changed = True
- return policy
- else:
- policy = self.get_lifecycle_policy(registry_id, name)
- if policy:
- self.skipped = True
- return policy
- return None
-
-
-def sort_lists_of_strings(policy):
- for statement_index in range(0, len(policy.get('Statement', []))):
- for key in policy['Statement'][statement_index]:
- value = policy['Statement'][statement_index][key]
- if isinstance(value, list) and all(isinstance(item, string_types) for item in value):
- policy['Statement'][statement_index][key] = sorted(value)
- return policy
-
-
-def run(ecr, params):
- # type: (EcsEcr, dict, int) -> Tuple[bool, dict]
- result = {}
- try:
- name = params['name']
- state = params['state']
- policy_text = params['policy']
- purge_policy = params['purge_policy']
- registry_id = params['registry_id']
- force_set_policy = params['force_set_policy']
- image_tag_mutability = params['image_tag_mutability'].upper()
- lifecycle_policy_text = params['lifecycle_policy']
- purge_lifecycle_policy = params['purge_lifecycle_policy']
-
- # Parse policies, if they are given
- try:
- policy = policy_text and json.loads(policy_text)
- except ValueError:
- result['policy'] = policy_text
- result['msg'] = 'Could not parse policy'
- return False, result
-
- try:
- lifecycle_policy = \
- lifecycle_policy_text and json.loads(lifecycle_policy_text)
- except ValueError:
- result['lifecycle_policy'] = lifecycle_policy_text
- result['msg'] = 'Could not parse lifecycle_policy'
- return False, result
-
- result['state'] = state
- result['created'] = False
-
- repo = ecr.get_repository(registry_id, name)
-
- if state == 'present':
- result['created'] = False
-
- if not repo:
- repo = ecr.create_repository(registry_id, name, image_tag_mutability)
- result['changed'] = True
- result['created'] = True
- else:
- repo = ecr.put_image_tag_mutability(registry_id, name, image_tag_mutability)
- result['repository'] = repo
-
- if purge_lifecycle_policy:
- original_lifecycle_policy = \
- ecr.get_lifecycle_policy(registry_id, name)
-
- result['lifecycle_policy'] = None
-
- if original_lifecycle_policy:
- ecr.purge_lifecycle_policy(registry_id, name)
- result['changed'] = True
-
- elif lifecycle_policy_text is not None:
- try:
- lifecycle_policy = sort_json_policy_dict(lifecycle_policy)
- result['lifecycle_policy'] = lifecycle_policy
-
- original_lifecycle_policy = ecr.get_lifecycle_policy(
- registry_id, name)
-
- if original_lifecycle_policy:
- original_lifecycle_policy = sort_json_policy_dict(
- original_lifecycle_policy)
-
- if original_lifecycle_policy != lifecycle_policy:
- ecr.put_lifecycle_policy(registry_id, name,
- lifecycle_policy_text)
- result['changed'] = True
- except Exception:
- # Some failure w/ the policy. It's helpful to know what the
- # policy is.
- result['lifecycle_policy'] = lifecycle_policy_text
- raise
-
- if purge_policy:
- original_policy = ecr.get_repository_policy(registry_id, name)
-
- result['policy'] = None
-
- if original_policy:
- ecr.delete_repository_policy(registry_id, name)
- result['changed'] = True
-
- elif policy_text is not None:
- try:
- # Sort any lists containing only string types
- policy = sort_lists_of_strings(policy)
-
- result['policy'] = policy
-
- original_policy = ecr.get_repository_policy(
- registry_id, name)
- if original_policy:
- original_policy = sort_lists_of_strings(original_policy)
-
- if compare_policies(original_policy, policy):
- ecr.set_repository_policy(
- registry_id, name, policy_text, force_set_policy)
- result['changed'] = True
- except Exception:
- # Some failure w/ the policy. It's helpful to know what the
- # policy is.
- result['policy'] = policy_text
- raise
-
- elif state == 'absent':
- result['name'] = name
- if repo:
- ecr.delete_repository(registry_id, name)
- result['changed'] = True
-
- except Exception as err:
- msg = str(err)
- if isinstance(err, ClientError):
- msg = boto_exception(err)
- result['msg'] = msg
- result['exception'] = traceback.format_exc()
- return False, result
-
- if ecr.skipped:
- result['skipped'] = True
-
- if ecr.changed:
- result['changed'] = True
-
- return True, result
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- registry_id=dict(required=False),
- state=dict(required=False, choices=['present', 'absent'],
- default='present'),
- force_set_policy=dict(required=False, type='bool', default=False),
- policy=dict(required=False, type='json'),
- image_tag_mutability=dict(required=False, choices=['mutable', 'immutable'],
- default='mutable'),
- purge_policy=dict(required=False, type='bool', aliases=['delete_policy'],
- deprecated_aliases=[dict(name='delete_policy', version='2.14')]),
- lifecycle_policy=dict(required=False, type='json'),
- purge_lifecycle_policy=dict(required=False, type='bool')
- )
- mutually_exclusive = [
- ['policy', 'purge_policy'],
- ['lifecycle_policy', 'purge_lifecycle_policy']]
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive)
-
- ecr = EcsEcr(module)
- passed, result = run(ecr, module.params)
-
- if passed:
- module.exit_json(**result)
- else:
- module.fail_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_service.py b/lib/ansible/modules/cloud/amazon/ecs_service.py
deleted file mode 100644
index 11e5bb3f38..0000000000
--- a/lib/ansible/modules/cloud/amazon/ecs_service.py
+++ /dev/null
@@ -1,850 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ecs_service
-short_description: Create, terminate, start or stop a service in ECS
-description:
- - Creates or terminates ECS. services.
-notes:
- - The service role specified must be assumable. (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com)
- - For details of the parameters and returns see U(https://boto3.readthedocs.io/en/latest/reference/services/ecs.html).
- - An IAM role must have been previously created.
-version_added: "2.1"
-author:
- - "Mark Chance (@Java1Guy)"
- - "Darek Kaczynski (@kaczynskid)"
- - "Stephane Maarek (@simplesteph)"
- - "Zac Blazic (@zacblazic)"
-
-requirements: [ json, botocore, boto3 ]
-options:
- state:
- description:
- - The desired state of the service.
- required: true
- choices: ["present", "absent", "deleting"]
- type: str
- name:
- description:
- - The name of the service.
- required: true
- type: str
- cluster:
- description:
- - The name of the cluster in which the service exists.
- required: false
- type: str
- task_definition:
- description:
- - The task definition the service will run.
- - This parameter is required when I(state=present).
- required: false
- type: str
- load_balancers:
- description:
- - The list of ELBs defined for this service.
- required: false
- type: list
- elements: str
- desired_count:
- description:
- - The count of how many instances of the service.
- - This parameter is required when I(state=present).
- required: false
- type: int
- client_token:
- description:
- - Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.
- required: false
- type: str
- role:
- description:
- - The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer
- on your behalf.
- - This parameter is only required if you are using a load balancer with your service in a network mode other than C(awsvpc).
- required: false
- type: str
- delay:
- description:
- - The time to wait before checking that the service is available.
- required: false
- default: 10
- type: int
- repeat:
- description:
- - The number of times to check that the service is available.
- required: false
- default: 10
- type: int
- force_new_deployment:
- description:
- - Force deployment of service even if there are no changes.
- required: false
- version_added: 2.8
- type: bool
- deployment_configuration:
- description:
- - Optional parameters that control the deployment_configuration.
- - Format is '{"maximum_percent":<integer>, "minimum_healthy_percent":<integer>}
- required: false
- version_added: 2.3
- type: dict
- suboptions:
- maximum_percent:
- type: int
- description: Upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment.
- minimum_healthy_percent:
- type: int
- description: A lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment.
- placement_constraints:
- description:
- - The placement constraints for the tasks in the service.
- required: false
- version_added: 2.4
- type: list
- elements: dict
- suboptions:
- placement_strategy:
- description:
- - The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules per service.
- required: false
- version_added: 2.4
- type: list
- elements: dict
- suboptions:
- type:
- description: The type of placement strategy.
- type: str
- field:
- description: The field to apply the placement strategy against.
- type: str
- network_configuration:
- description:
- - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc).
- - I(assign_public_ip) requires botocore >= 1.8.4
- type: dict
- suboptions:
- subnets:
- description:
- - A list of subnet IDs to associate with the task
- version_added: 2.6
- type: list
- elements: str
- security_groups:
- description:
- - A list of security group names or group IDs to associate with the task
- version_added: 2.6
- type: list
- elements: str
- assign_public_ip:
- description:
- - Whether the task's elastic network interface receives a public IP address.
- - This option requires botocore >= 1.8.4.
- type: bool
- version_added: 2.7
- launch_type:
- description:
- - The launch type on which to run your service.
- required: false
- version_added: 2.7
- choices: ["EC2", "FARGATE"]
- type: str
- health_check_grace_period_seconds:
- description:
- - Seconds to wait before health checking the freshly added/updated services.
- - This option requires botocore >= 1.8.20.
- required: false
- version_added: 2.8
- type: int
- service_registries:
- description:
- - Describes service discovery registries this service will register with.
- type: list
- elements: dict
- required: false
- version_added: 2.8
- suboptions:
- container_name:
- description:
- - container name for service discovery registration
- type: str
- container_port:
- description:
- - container port for service discovery registration
- type: int
- arn:
- description:
- - Service discovery registry ARN
- type: str
- scheduling_strategy:
- description:
- - The scheduling strategy, defaults to "REPLICA" if not given to preserve previous behavior
- required: false
- version_added: 2.8
- choices: ["DAEMON", "REPLICA"]
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Basic provisioning example
-- ecs_service:
- state: present
- name: console-test-service
- cluster: new_cluster
- task_definition: 'new_cluster-task:1'
- desired_count: 0
-
-- name: create ECS service on VPC network
- ecs_service:
- state: present
- name: console-test-service
- cluster: new_cluster
- task_definition: 'new_cluster-task:1'
- desired_count: 0
- network_configuration:
- subnets:
- - subnet-abcd1234
- security_groups:
- - sg-aaaa1111
- - my_security_group
-
-# Simple example to delete
-- ecs_service:
- name: default
- state: absent
- cluster: new_cluster
-
-# With custom deployment configuration (added in version 2.3), placement constraints and strategy (added in version 2.4)
-- ecs_service:
- state: present
- name: test-service
- cluster: test-cluster
- task_definition: test-task-definition
- desired_count: 3
- deployment_configuration:
- minimum_healthy_percent: 75
- maximum_percent: 150
- placement_constraints:
- - type: memberOf
- expression: 'attribute:flavor==test'
- placement_strategy:
- - type: binpack
- field: memory
-'''
-
-RETURN = '''
-service:
- description: Details of created service.
- returned: when creating a service
- type: complex
- contains:
- clusterArn:
- description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
- returned: always
- type: str
- desiredCount:
- description: The desired number of instantiations of the task definition to keep running on the service.
- returned: always
- type: int
- loadBalancers:
- description: A list of load balancer objects
- returned: always
- type: complex
- contains:
- loadBalancerName:
- description: the name
- returned: always
- type: str
- containerName:
- description: The name of the container to associate with the load balancer.
- returned: always
- type: str
- containerPort:
- description: The port on the container to associate with the load balancer.
- returned: always
- type: int
- pendingCount:
- description: The number of tasks in the cluster that are in the PENDING state.
- returned: always
- type: int
- runningCount:
- description: The number of tasks in the cluster that are in the RUNNING state.
- returned: always
- type: int
- serviceArn:
- description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region
- of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example,
- arn:aws:ecs:region :012345678910 :service/my-service .
- returned: always
- type: str
- serviceName:
- description: A user-generated string used to identify the service
- returned: always
- type: str
- status:
- description: The valid values are ACTIVE, DRAINING, or INACTIVE.
- returned: always
- type: str
- taskDefinition:
- description: The ARN of a task definition to use for tasks in the service.
- returned: always
- type: str
- deployments:
- description: list of service deployments
- returned: always
- type: list
- elements: dict
- deploymentConfiguration:
- description: dictionary of deploymentConfiguration
- returned: always
- type: complex
- contains:
- maximumPercent:
- description: maximumPercent param
- returned: always
- type: int
- minimumHealthyPercent:
- description: minimumHealthyPercent param
- returned: always
- type: int
- events:
- description: list of service events
- returned: always
- type: list
- elements: dict
- placementConstraints:
- description: List of placement constraints objects
- returned: always
- type: list
- elements: dict
- contains:
- type:
- description: The type of constraint. Valid values are distinctInstance and memberOf.
- returned: always
- type: str
- expression:
- description: A cluster query language expression to apply to the constraint. Note you cannot specify an expression if the constraint type is
- distinctInstance.
- returned: always
- type: str
- placementStrategy:
- description: List of placement strategy objects
- returned: always
- type: list
- elements: dict
- contains:
- type:
- description: The type of placement strategy. Valid values are random, spread and binpack.
- returned: always
- type: str
- field:
- description: The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId
- (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance,
- such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY.
- returned: always
- type: str
-
-ansible_facts:
- description: Facts about deleted service.
- returned: when deleting a service
- type: complex
- contains:
- service:
- description: Details of deleted service.
- returned: when service existed and was deleted
- type: complex
- contains:
- clusterArn:
- description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
- returned: always
- type: str
- desiredCount:
- description: The desired number of instantiations of the task definition to keep running on the service.
- returned: always
- type: int
- loadBalancers:
- description: A list of load balancer objects
- returned: always
- type: complex
- contains:
- loadBalancerName:
- description: the name
- returned: always
- type: str
- containerName:
- description: The name of the container to associate with the load balancer.
- returned: always
- type: str
- containerPort:
- description: The port on the container to associate with the load balancer.
- returned: always
- type: int
- pendingCount:
- description: The number of tasks in the cluster that are in the PENDING state.
- returned: always
- type: int
- runningCount:
- description: The number of tasks in the cluster that are in the RUNNING state.
- returned: always
- type: int
- serviceArn:
- description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region
- of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example,
- arn:aws:ecs:region :012345678910 :service/my-service .
- returned: always
- type: str
- serviceName:
- description: A user-generated string used to identify the service
- returned: always
- type: str
- status:
- description: The valid values are ACTIVE, DRAINING, or INACTIVE.
- returned: always
- type: str
- taskDefinition:
- description: The ARN of a task definition to use for tasks in the service.
- returned: always
- type: str
- deployments:
- description: list of service deployments
- returned: always
- type: list
- elements: dict
- deploymentConfiguration:
- description: dictionary of deploymentConfiguration
- returned: always
- type: complex
- contains:
- maximumPercent:
- description: maximumPercent param
- returned: always
- type: int
- minimumHealthyPercent:
- description: minimumHealthyPercent param
- returned: always
- type: int
- events:
- description: list of service events
- returned: always
- type: list
- elements: dict
- placementConstraints:
- description: List of placement constraints objects
- returned: always
- type: list
- elements: dict
- contains:
- type:
- description: The type of constraint. Valid values are distinctInstance and memberOf.
- returned: always
- type: str
- expression:
- description: A cluster query language expression to apply to the constraint. Note you cannot specify an expression if
- the constraint type is distinctInstance.
- returned: always
- type: str
- placementStrategy:
- description: List of placement strategy objects
- returned: always
- type: list
- elements: dict
- contains:
- type:
- description: The type of placement strategy. Valid values are random, spread and binpack.
- returned: always
- type: str
- field:
- description: The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId
- (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance,
- such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are CPU and MEMORY.
- returned: always
- type: str
-'''
-import time
-
-DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
- 'maximum_percent': 'int',
- 'minimum_healthy_percent': 'int'
-}
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import snake_dict_to_camel_dict, map_complex_type, get_ec2_security_group_ids_from_names
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-class EcsServiceManager:
- """Handles ECS Services"""
-
- def __init__(self, module):
- self.module = module
- self.ecs = module.client('ecs')
- self.ec2 = module.client('ec2')
-
- def format_network_configuration(self, network_config):
- result = dict()
- if network_config['subnets'] is not None:
- result['subnets'] = network_config['subnets']
- else:
- self.module.fail_json(msg="Network configuration must include subnets")
- if network_config['security_groups'] is not None:
- groups = network_config['security_groups']
- if any(not sg.startswith('sg-') for sg in groups):
- try:
- vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
- groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't look up security groups")
- result['securityGroups'] = groups
- if network_config['assign_public_ip'] is not None:
- if self.module.botocore_at_least('1.8.4'):
- if network_config['assign_public_ip'] is True:
- result['assignPublicIp'] = "ENABLED"
- else:
- result['assignPublicIp'] = "DISABLED"
- else:
- self.module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use assign_public_ip in network_configuration')
- return dict(awsvpcConfiguration=result)
-
- def find_in_array(self, array_of_services, service_name, field_name='serviceArn'):
- for c in array_of_services:
- if c[field_name].endswith(service_name):
- return c
- return None
-
- def describe_service(self, cluster_name, service_name):
- response = self.ecs.describe_services(
- cluster=cluster_name,
- services=[service_name])
- msg = ''
- if len(response['failures']) > 0:
- c = self.find_in_array(response['failures'], service_name, 'arn')
- msg += ", failure reason is " + c['reason']
- if c and c['reason'] == 'MISSING':
- return None
- # fall thru and look through found ones
- if len(response['services']) > 0:
- c = self.find_in_array(response['services'], service_name)
- if c:
- return c
- raise Exception("Unknown problem describing service %s." % service_name)
-
- def is_matching_service(self, expected, existing):
- if expected['task_definition'] != existing['taskDefinition']:
- return False
-
- if (expected['load_balancers'] or []) != existing['loadBalancers']:
- return False
-
- # expected is params. DAEMON scheduling strategy returns desired count equal to
- # number of instances running; don't check desired count if scheduling strat is daemon
- if (expected['scheduling_strategy'] != 'DAEMON'):
- if (expected['desired_count'] or 0) != existing['desiredCount']:
- return False
-
- return True
-
- def create_service(self, service_name, cluster_name, task_definition, load_balancers,
- desired_count, client_token, role, deployment_configuration,
- placement_constraints, placement_strategy, health_check_grace_period_seconds,
- network_configuration, service_registries, launch_type, scheduling_strategy):
-
- params = dict(
- cluster=cluster_name,
- serviceName=service_name,
- taskDefinition=task_definition,
- loadBalancers=load_balancers,
- clientToken=client_token,
- role=role,
- deploymentConfiguration=deployment_configuration,
- placementConstraints=placement_constraints,
- placementStrategy=placement_strategy
- )
- if network_configuration:
- params['networkConfiguration'] = network_configuration
- if launch_type:
- params['launchType'] = launch_type
- if self.health_check_setable(params) and health_check_grace_period_seconds is not None:
- params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds
- if service_registries:
- params['serviceRegistries'] = service_registries
- # desired count is not required if scheduling strategy is daemon
- if desired_count is not None:
- params['desiredCount'] = desired_count
-
- if scheduling_strategy:
- params['schedulingStrategy'] = scheduling_strategy
- response = self.ecs.create_service(**params)
- return self.jsonize(response['service'])
-
- def update_service(self, service_name, cluster_name, task_definition,
- desired_count, deployment_configuration, network_configuration,
- health_check_grace_period_seconds, force_new_deployment):
- params = dict(
- cluster=cluster_name,
- service=service_name,
- taskDefinition=task_definition,
- deploymentConfiguration=deployment_configuration)
- if network_configuration:
- params['networkConfiguration'] = network_configuration
- if force_new_deployment:
- params['forceNewDeployment'] = force_new_deployment
- if health_check_grace_period_seconds is not None:
- params['healthCheckGracePeriodSeconds'] = health_check_grace_period_seconds
- # desired count is not required if scheduling strategy is daemon
- if desired_count is not None:
- params['desiredCount'] = desired_count
-
- response = self.ecs.update_service(**params)
- return self.jsonize(response['service'])
-
- def jsonize(self, service):
- # some fields are datetime which is not JSON serializable
- # make them strings
- if 'createdAt' in service:
- service['createdAt'] = str(service['createdAt'])
- if 'deployments' in service:
- for d in service['deployments']:
- if 'createdAt' in d:
- d['createdAt'] = str(d['createdAt'])
- if 'updatedAt' in d:
- d['updatedAt'] = str(d['updatedAt'])
- if 'events' in service:
- for e in service['events']:
- if 'createdAt' in e:
- e['createdAt'] = str(e['createdAt'])
- return service
-
- def delete_service(self, service, cluster=None):
- return self.ecs.delete_service(cluster=cluster, service=service)
-
- def ecs_api_handles_network_configuration(self):
- # There doesn't seem to be a nice way to inspect botocore to look
- # for attributes (and networkConfiguration is not an explicit argument
- # to e.g. ecs.run_task, it's just passed as a keyword argument)
- return self.module.botocore_at_least('1.7.44')
-
- def health_check_setable(self, params):
- load_balancers = params.get('loadBalancers', [])
- # check if botocore (and thus boto3) is new enough for using the healthCheckGracePeriodSeconds parameter
- return len(load_balancers) > 0 and self.module.botocore_at_least('1.8.20')
-
-
-def main():
- argument_spec = dict(
- state=dict(required=True, choices=['present', 'absent', 'deleting']),
- name=dict(required=True, type='str'),
- cluster=dict(required=False, type='str'),
- task_definition=dict(required=False, type='str'),
- load_balancers=dict(required=False, default=[], type='list'),
- desired_count=dict(required=False, type='int'),
- client_token=dict(required=False, default='', type='str'),
- role=dict(required=False, default='', type='str'),
- delay=dict(required=False, type='int', default=10),
- repeat=dict(required=False, type='int', default=10),
- force_new_deployment=dict(required=False, default=False, type='bool'),
- deployment_configuration=dict(required=False, default={}, type='dict'),
- placement_constraints=dict(required=False, default=[], type='list'),
- placement_strategy=dict(required=False, default=[], type='list'),
- health_check_grace_period_seconds=dict(required=False, type='int'),
- network_configuration=dict(required=False, type='dict', options=dict(
- subnets=dict(type='list'),
- security_groups=dict(type='list'),
- assign_public_ip=dict(type='bool')
- )),
- launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
- service_registries=dict(required=False, type='list', default=[]),
- scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA'])
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True,
- required_if=[('state', 'present', ['task_definition']),
- ('launch_type', 'FARGATE', ['network_configuration'])],
- required_together=[['load_balancers', 'role']])
-
- if module.params['state'] == 'present' and module.params['scheduling_strategy'] == 'REPLICA':
- if module.params['desired_count'] is None:
- module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count')
-
- service_mgr = EcsServiceManager(module)
- if module.params['network_configuration']:
- if not service_mgr.ecs_api_handles_network_configuration():
- module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
- network_configuration = service_mgr.format_network_configuration(module.params['network_configuration'])
- else:
- network_configuration = None
-
- deployment_configuration = map_complex_type(module.params['deployment_configuration'],
- DEPLOYMENT_CONFIGURATION_TYPE_MAP)
-
- deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration)
- serviceRegistries = list(map(snake_dict_to_camel_dict, module.params['service_registries']))
-
- try:
- existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
- except Exception as e:
- module.fail_json(msg="Exception describing service '" + module.params['name'] + "' in cluster '" + module.params['cluster'] + "': " + str(e))
-
- results = dict(changed=False)
-
- if module.params['launch_type']:
- if not module.botocore_at_least('1.8.4'):
- module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch_type')
- if module.params['force_new_deployment']:
- if not module.botocore_at_least('1.8.4'):
- module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use force_new_deployment')
- if module.params['health_check_grace_period_seconds']:
- if not module.botocore_at_least('1.8.20'):
- module.fail_json(msg='botocore needs to be version 1.8.20 or higher to use health_check_grace_period_seconds')
-
- if module.params['state'] == 'present':
-
- matching = False
- update = False
-
- if existing and 'status' in existing and existing['status'] == "ACTIVE":
- if module.params['force_new_deployment']:
- update = True
- elif service_mgr.is_matching_service(module.params, existing):
- matching = True
- results['service'] = existing
- else:
- update = True
-
- if not matching:
- if not module.check_mode:
-
- role = module.params['role']
- clientToken = module.params['client_token']
-
- loadBalancers = []
- for loadBalancer in module.params['load_balancers']:
- if 'containerPort' in loadBalancer:
- loadBalancer['containerPort'] = int(loadBalancer['containerPort'])
- loadBalancers.append(loadBalancer)
-
- for loadBalancer in loadBalancers:
- if 'containerPort' in loadBalancer:
- loadBalancer['containerPort'] = int(loadBalancer['containerPort'])
-
- if update:
- # check various parameters and boto versions and give a helpful error in boto is not new enough for feature
-
- if module.params['scheduling_strategy']:
- if not module.botocore_at_least('1.10.37'):
- module.fail_json(msg='botocore needs to be version 1.10.37 or higher to use scheduling_strategy')
- elif (existing['schedulingStrategy']) != module.params['scheduling_strategy']:
- module.fail_json(msg="It is not possible to update the scheduling strategy of an existing service")
-
- if module.params['service_registries']:
- if not module.botocore_at_least('1.9.15'):
- module.fail_json(msg='botocore needs to be version 1.9.15 or higher to use service_registries')
- elif (existing['serviceRegistries'] or []) != serviceRegistries:
- module.fail_json(msg="It is not possible to update the service registries of an existing service")
-
- if (existing['loadBalancers'] or []) != loadBalancers:
- module.fail_json(msg="It is not possible to update the load balancers of an existing service")
-
- # update required
- response = service_mgr.update_service(module.params['name'],
- module.params['cluster'],
- module.params['task_definition'],
- module.params['desired_count'],
- deploymentConfiguration,
- network_configuration,
- module.params['health_check_grace_period_seconds'],
- module.params['force_new_deployment'])
-
- else:
- try:
- response = service_mgr.create_service(module.params['name'],
- module.params['cluster'],
- module.params['task_definition'],
- loadBalancers,
- module.params['desired_count'],
- clientToken,
- role,
- deploymentConfiguration,
- module.params['placement_constraints'],
- module.params['placement_strategy'],
- module.params['health_check_grace_period_seconds'],
- network_configuration,
- serviceRegistries,
- module.params['launch_type'],
- module.params['scheduling_strategy']
- )
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e, msg="Couldn't create service")
-
- results['service'] = response
-
- results['changed'] = True
-
- elif module.params['state'] == 'absent':
- if not existing:
- pass
- else:
- # it exists, so we should delete it and mark changed.
- # return info about the cluster deleted
- del existing['deployments']
- del existing['events']
- results['ansible_facts'] = existing
- if 'status' in existing and existing['status'] == "INACTIVE":
- results['changed'] = False
- else:
- if not module.check_mode:
- try:
- service_mgr.delete_service(
- module.params['name'],
- module.params['cluster']
- )
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e, msg="Couldn't delete service")
- results['changed'] = True
-
- elif module.params['state'] == 'deleting':
- if not existing:
- module.fail_json(msg="Service '" + module.params['name'] + " not found.")
- return
- # it exists, so we should delete it and mark changed.
- # return info about the cluster deleted
- delay = module.params['delay']
- repeat = module.params['repeat']
- time.sleep(delay)
- for i in range(repeat):
- existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
- status = existing['status']
- if status == "INACTIVE":
- results['changed'] = True
- break
- time.sleep(delay)
- if i is repeat - 1:
- module.fail_json(msg="Service still not deleted after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
- return
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_service_info.py b/lib/ansible/modules/cloud/amazon/ecs_service_info.py
deleted file mode 100644
index 4a7b99c690..0000000000
--- a/lib/ansible/modules/cloud/amazon/ecs_service_info.py
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ecs_service_info
-short_description: List or describe services in ECS
-description:
- - Lists or describes services in ECS.
- - This module was called C(ecs_service_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(ecs_service_info) module no longer returns C(ansible_facts)!
-version_added: "2.1"
-author:
- - "Mark Chance (@Java1Guy)"
- - "Darek Kaczynski (@kaczynskid)"
-requirements: [ json, botocore, boto3 ]
-options:
- details:
- description:
- - Set this to true if you want detailed information about the services.
- required: false
- default: false
- type: bool
- events:
- description:
- - Whether to return ECS service events. Only has an effect if I(details=true).
- required: false
- default: true
- type: bool
- version_added: "2.6"
- cluster:
- description:
- - The cluster ARNS in which to list the services.
- required: false
- type: str
- service:
- description:
- - One or more services to get details for
- required: false
- type: list
- elements: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Basic listing example
-- ecs_service_info:
- cluster: test-cluster
- service: console-test-service
- details: true
- register: output
-
-# Basic listing example
-- ecs_service_info:
- cluster: test-cluster
- register: output
-'''
-
-RETURN = '''
-services:
- description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
- returned: success
- type: complex
- contains:
- clusterArn:
- description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
- returned: always
- type: str
- desiredCount:
- description: The desired number of instantiations of the task definition to keep running on the service.
- returned: always
- type: int
- loadBalancers:
- description: A list of load balancer objects
- returned: always
- type: complex
- contains:
- loadBalancerName:
- description: the name
- returned: always
- type: str
- containerName:
- description: The name of the container to associate with the load balancer.
- returned: always
- type: str
- containerPort:
- description: The port on the container to associate with the load balancer.
- returned: always
- type: int
- pendingCount:
- description: The number of tasks in the cluster that are in the PENDING state.
- returned: always
- type: int
- runningCount:
- description: The number of tasks in the cluster that are in the RUNNING state.
- returned: always
- type: int
- serviceArn:
- description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
- returned: always
- type: str
- serviceName:
- description: A user-generated string used to identify the service
- returned: always
- type: str
- status:
- description: The valid values are ACTIVE, DRAINING, or INACTIVE.
- returned: always
- type: str
- taskDefinition:
- description: The ARN of a task definition to use for tasks in the service.
- returned: always
- type: str
- deployments:
- description: list of service deployments
- returned: always
- type: list
- elements: dict
- events:
- description: list of service events
- returned: when events is true
- type: list
- elements: dict
-''' # NOQA
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import AWSRetry
-
-
-class EcsServiceManager:
- """Handles ECS Services"""
-
- def __init__(self, module):
- self.module = module
- self.ecs = module.client('ecs')
-
- @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
- def list_services_with_backoff(self, **kwargs):
- paginator = self.ecs.get_paginator('list_services')
- try:
- return paginator.paginate(**kwargs).build_full_result()
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'ClusterNotFoundException':
- self.module.fail_json_aws(e, "Could not find cluster to list services")
- else:
- raise
-
- @AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
- def describe_services_with_backoff(self, **kwargs):
- return self.ecs.describe_services(**kwargs)
-
- def list_services(self, cluster):
- fn_args = dict()
- if cluster and cluster is not None:
- fn_args['cluster'] = cluster
- try:
- response = self.list_services_with_backoff(**fn_args)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't list ECS services")
- relevant_response = dict(services=response['serviceArns'])
- return relevant_response
-
- def describe_services(self, cluster, services):
- fn_args = dict()
- if cluster and cluster is not None:
- fn_args['cluster'] = cluster
- fn_args['services'] = services
- try:
- response = self.describe_services_with_backoff(**fn_args)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't describe ECS services")
- running_services = [self.extract_service_from(service) for service in response.get('services', [])]
- services_not_running = response.get('failures', [])
- return running_services, services_not_running
-
- def extract_service_from(self, service):
- # some fields are datetime which is not JSON serializable
- # make them strings
- if 'deployments' in service:
- for d in service['deployments']:
- if 'createdAt' in d:
- d['createdAt'] = str(d['createdAt'])
- if 'updatedAt' in d:
- d['updatedAt'] = str(d['updatedAt'])
- if 'events' in service:
- if not self.module.params['events']:
- del service['events']
- else:
- for e in service['events']:
- if 'createdAt' in e:
- e['createdAt'] = str(e['createdAt'])
- return service
-
-
-def chunks(l, n):
- """Yield successive n-sized chunks from l."""
- """ https://stackoverflow.com/a/312464 """
- for i in range(0, len(l), n):
- yield l[i:i + n]
-
-
-def main():
-
- argument_spec = dict(
- details=dict(type='bool', default=False),
- events=dict(type='bool', default=True),
- cluster=dict(),
- service=dict(type='list')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- is_old_facts = module._name == 'ecs_service_facts'
- if is_old_facts:
- module.deprecate("The 'ecs_service_facts' module has been renamed to 'ecs_service_info', "
- "and the renamed one no longer returns ansible_facts", version='2.13')
-
- show_details = module.params.get('details')
-
- task_mgr = EcsServiceManager(module)
- if show_details:
- if module.params['service']:
- services = module.params['service']
- else:
- services = task_mgr.list_services(module.params['cluster'])['services']
- ecs_info = dict(services=[], services_not_running=[])
- for chunk in chunks(services, 10):
- running_services, services_not_running = task_mgr.describe_services(module.params['cluster'], chunk)
- ecs_info['services'].extend(running_services)
- ecs_info['services_not_running'].extend(services_not_running)
- else:
- ecs_info = task_mgr.list_services(module.params['cluster'])
-
- if is_old_facts:
- module.exit_json(changed=False, ansible_facts=ecs_info, **ecs_info)
- else:
- module.exit_json(changed=False, **ecs_info)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_tag.py b/lib/ansible/modules/cloud/amazon/ecs_tag.py
deleted file mode 100644
index 0a2b639233..0000000000
--- a/lib/ansible/modules/cloud/amazon/ecs_tag.py
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2019, Michael Pechner <mikey@mikey.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = r'''
----
-module: ecs_tag
-short_description: create and remove tags on Amazon ECS resources
-notes:
- - none
-description:
- - Creates and removes tags for Amazon ECS resources.
- - Resources are referenced by their cluster name.
-version_added: '2.10'
-author:
- - Michael Pechner (@mpechner)
-requirements: [ boto3, botocore ]
-options:
- cluster_name:
- description:
- - The name of the cluster whose resources we are tagging.
- required: true
- type: str
- resource:
- description:
- - The ECS resource name.
- - Required unless I(resource_type=cluster).
- type: str
- resource_type:
- description:
- - The type of resource.
- default: cluster
- choices: ['cluster', 'task', 'service', 'task_definition', 'container']
- type: str
- state:
- description:
- - Whether the tags should be present or absent on the resource.
- default: present
- choices: ['present', 'absent']
- type: str
- tags:
- description:
- - A dictionary of tags to add or remove from the resource.
- - If the value provided for a tag is null and I(state=absent), the tag will be removed regardless of its current value.
- type: dict
- purge_tags:
- description:
- - Whether unspecified tags should be removed from the resource.
- - Note that when combined with I(state=absent), specified tags with non-matching values are not purged.
- type: bool
- default: false
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = r'''
-- name: Ensure tags are present on a resource
- ecs_tag:
- cluster_name: mycluster
- resource_type: cluster
- state: present
- tags:
- Name: ubervol
- env: prod
-
-- name: Remove the Env tag
- ecs_tag:
- cluster_name: mycluster
- resource_type: cluster
- tags:
- Env:
- state: absent
-
-- name: Remove the Env tag if it's currently 'development'
- ecs_tag:
- cluster_name: mycluster
- resource_type: cluster
- tags:
- Env: development
- state: absent
-
-- name: Remove all tags except for Name from a cluster
- ecs_tag:
- cluster_name: mycluster
- resource_type: cluster
- tags:
- Name: foo
- state: absent
- purge_tags: true
-'''
-
-RETURN = r'''
-tags:
- description: A dict containing the tags on the resource
- returned: always
- type: dict
-added_tags:
- description: A dict of tags that were added to the resource
- returned: If tags were added
- type: dict
-removed_tags:
- description: A dict of tags that were removed from the resource
- returned: If tags were removed
- type: dict
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-__metaclass__ = type
-
-
-def get_tags(ecs, module, resource):
- try:
- return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)['tags'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource))
-
-
-def get_arn(ecs, module, cluster_name, resource_type, resource):
-
- try:
- if resource_type == 'cluster':
- description = ecs.describe_clusters(clusters=[resource])
- resource_arn = description['clusters'][0]['clusterArn']
- elif resource_type == 'task':
- description = ecs.describe_tasks(cluster=cluster_name, tasks=[resource])
- resource_arn = description['tasks'][0]['taskArn']
- elif resource_type == 'service':
- description = ecs.describe_services(cluster=cluster_name, services=[resource])
- resource_arn = description['services'][0]['serviceArn']
- elif resource_type == 'task_definition':
- description = ecs.describe_task_definition(taskDefinition=resource)
- resource_arn = description['taskDefinition']['taskDefinitionArn']
- elif resource_type == 'container':
- description = ecs.describe_container_instances(clusters=[resource])
- resource_arn = description['containerInstances'][0]['containerInstanceArn']
- except (IndexError, KeyError):
- module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to find {0} {1}'.format(resource_type, resource))
-
- return resource_arn
-
-
-def main():
- argument_spec = dict(
- cluster_name=dict(required=True),
- resource=dict(required=False),
- tags=dict(type='dict'),
- purge_tags=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent']),
- resource_type=dict(default='cluster', choices=['cluster', 'task', 'service', 'task_definition', 'container'])
- )
- required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])]
-
- module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
-
- resource_type = module.params['resource_type']
- cluster_name = module.params['cluster_name']
- if resource_type == 'cluster':
- resource = cluster_name
- else:
- resource = module.params['resource']
- tags = module.params['tags']
- state = module.params['state']
- purge_tags = module.params['purge_tags']
-
- result = {'changed': False}
-
- ecs = module.client('ecs')
-
- resource_arn = get_arn(ecs, module, cluster_name, resource_type, resource)
-
- current_tags = get_tags(ecs, module, resource_arn)
-
- add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
-
- remove_tags = {}
- if state == 'absent':
- for key in tags:
- if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
- remove_tags[key] = current_tags[key]
-
- for key in remove:
- remove_tags[key] = current_tags[key]
-
- if remove_tags:
- result['changed'] = True
- result['removed_tags'] = remove_tags
- if not module.check_mode:
- try:
- ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys()))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
-
- if state == 'present' and add_tags:
- result['changed'] = True
- result['added_tags'] = add_tags
- current_tags.update(add_tags)
- if not module.check_mode:
- try:
- tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name='key', tag_value_key_name='value')
- ecs.tag_resource(resourceArn=resource_arn, tags=tags)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
-
- result['tags'] = get_tags(ecs, module, resource_arn)
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_task.py b/lib/ansible/modules/cloud/amazon/ecs_task.py
deleted file mode 100644
index a0060ce9d3..0000000000
--- a/lib/ansible/modules/cloud/amazon/ecs_task.py
+++ /dev/null
@@ -1,450 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ecs_task
-short_description: Run, start or stop a task in ecs
-description:
- - Creates or deletes instances of task definitions.
-version_added: "2.0"
-author: Mark Chance (@Java1Guy)
-requirements: [ json, botocore, boto3 ]
-options:
- operation:
- description:
- - Which task operation to execute.
- required: True
- choices: ['run', 'start', 'stop']
- type: str
- cluster:
- description:
- - The name of the cluster to run the task on.
- required: False
- type: str
- task_definition:
- description:
- - The task definition to start or run.
- required: False
- type: str
- overrides:
- description:
- - A dictionary of values to pass to the new instances.
- required: False
- type: dict
- count:
- description:
- - How many new instances to start.
- required: False
- type: int
- task:
- description:
- - The task to stop.
- required: False
- type: str
- container_instances:
- description:
- - The list of container instances on which to deploy the task.
- required: False
- type: list
- elements: str
- started_by:
- description:
- - A value showing who or what started the task (for informational purposes).
- required: False
- type: str
- network_configuration:
- description:
- - Network configuration of the service. Only applicable for task definitions created with I(network_mode=awsvpc).
- type: dict
- suboptions:
- subnets:
- description: A list of subnet IDs to which the task is attached.
- type: list
- elements: str
- security_groups:
- description: A list of group names or group IDs for the task.
- type: list
- elements: str
- version_added: 2.6
- launch_type:
- description:
- - The launch type on which to run your service.
- required: false
- version_added: 2.8
- choices: ["EC2", "FARGATE"]
- type: str
- tags:
- type: dict
- description:
- - Tags that will be added to ecs tasks on start and run
- required: false
- version_added: "2.10"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Simple example of run task
-- name: Run task
- ecs_task:
- operation: run
- cluster: console-sample-app-static-cluster
- task_definition: console-sample-app-static-taskdef
- count: 1
- started_by: ansible_user
- register: task_output
-
-# Simple example of start task
-
-- name: Start a task
- ecs_task:
- operation: start
- cluster: console-sample-app-static-cluster
- task_definition: console-sample-app-static-taskdef
- task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
- tags:
- resourceName: a_task_for_ansible_to_run
- type: long_running_task
- network: internal
- version: 1.4
- container_instances:
- - arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
- started_by: ansible_user
- network_configuration:
- subnets:
- - subnet-abcd1234
- security_groups:
- - sg-aaaa1111
- - my_security_group
- register: task_output
-
-- name: RUN a task on Fargate
- ecs_task:
- operation: run
- cluster: console-sample-app-static-cluster
- task_definition: console-sample-app-static-taskdef
- task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
- started_by: ansible_user
- launch_type: FARGATE
- network_configuration:
- subnets:
- - subnet-abcd1234
- security_groups:
- - sg-aaaa1111
- - my_security_group
- register: task_output
-
-- name: Stop a task
- ecs_task:
- operation: stop
- cluster: console-sample-app-static-cluster
- task_definition: console-sample-app-static-taskdef
- task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
-'''
-RETURN = '''
-task:
- description: details about the task that was started
- returned: success
- type: complex
- contains:
- taskArn:
- description: The Amazon Resource Name (ARN) that identifies the task.
- returned: always
- type: str
- clusterArn:
- description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task.
- returned: only when details is true
- type: str
- taskDefinitionArn:
- description: The Amazon Resource Name (ARN) of the task definition.
- returned: only when details is true
- type: str
- containerInstanceArn:
- description: The Amazon Resource Name (ARN) of the container running the task.
- returned: only when details is true
- type: str
- overrides:
- description: The container overrides set for this task.
- returned: only when details is true
- type: list
- elements: dict
- lastStatus:
- description: The last recorded status of the task.
- returned: only when details is true
- type: str
- desiredStatus:
- description: The desired status of the task.
- returned: only when details is true
- type: str
- containers:
- description: The container details.
- returned: only when details is true
- type: list
- elements: dict
- startedBy:
- description: The used who started the task.
- returned: only when details is true
- type: str
- stoppedReason:
- description: The reason why the task was stopped.
- returned: only when details is true
- type: str
- createdAt:
- description: The timestamp of when the task was created.
- returned: only when details is true
- type: str
- startedAt:
- description: The timestamp of when the task was started.
- returned: only when details is true
- type: str
- stoppedAt:
- description: The timestamp of when the task was stopped.
- returned: only when details is true
- type: str
- launchType:
- description: The launch type on which to run your task.
- returned: always
- type: str
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.basic import missing_required_lib
-from ansible.module_utils.ec2 import get_ec2_security_group_ids_from_names, ansible_dict_to_boto3_tag_list
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-class EcsExecManager:
- """Handles ECS Tasks"""
-
- def __init__(self, module):
- self.module = module
- self.ecs = module.client('ecs')
- self.ec2 = module.client('ec2')
-
- def format_network_configuration(self, network_config):
- result = dict()
- if 'subnets' in network_config:
- result['subnets'] = network_config['subnets']
- else:
- self.module.fail_json(msg="Network configuration must include subnets")
- if 'security_groups' in network_config:
- groups = network_config['security_groups']
- if any(not sg.startswith('sg-') for sg in groups):
- try:
- vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
- groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't look up security groups")
- result['securityGroups'] = groups
- return dict(awsvpcConfiguration=result)
-
- def list_tasks(self, cluster_name, service_name, status):
- response = self.ecs.list_tasks(
- cluster=cluster_name,
- family=service_name,
- desiredStatus=status
- )
- if len(response['taskArns']) > 0:
- for c in response['taskArns']:
- if c.endswith(service_name):
- return c
- return None
-
- def run_task(self, cluster, task_definition, overrides, count, startedBy, launch_type, tags):
- if overrides is None:
- overrides = dict()
- params = dict(cluster=cluster, taskDefinition=task_definition,
- overrides=overrides, count=count, startedBy=startedBy)
- if self.module.params['network_configuration']:
- params['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
- if launch_type:
- params['launchType'] = launch_type
- if tags:
- params['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
-
- # TODO: need to check if long arn format enabled.
- try:
- response = self.ecs.run_task(**params)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't run task")
- # include tasks and failures
- return response['tasks']
-
- def start_task(self, cluster, task_definition, overrides, container_instances, startedBy, tags):
- args = dict()
- if cluster:
- args['cluster'] = cluster
- if task_definition:
- args['taskDefinition'] = task_definition
- if overrides:
- args['overrides'] = overrides
- if container_instances:
- args['containerInstances'] = container_instances
- if startedBy:
- args['startedBy'] = startedBy
- if self.module.params['network_configuration']:
- args['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
- if tags:
- args['tags'] = ansible_dict_to_boto3_tag_list(tags, 'key', 'value')
- try:
- response = self.ecs.start_task(**args)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't start task")
- # include tasks and failures
- return response['tasks']
-
- def stop_task(self, cluster, task):
- response = self.ecs.stop_task(cluster=cluster, task=task)
- return response['task']
-
- def ecs_api_handles_launch_type(self):
- from distutils.version import LooseVersion
- # There doesn't seem to be a nice way to inspect botocore to look
- # for attributes (and networkConfiguration is not an explicit argument
- # to e.g. ecs.run_task, it's just passed as a keyword argument)
- return LooseVersion(botocore.__version__) >= LooseVersion('1.8.4')
-
- def ecs_task_long_format_enabled(self):
- account_support = self.ecs.list_account_settings(name='taskLongArnFormat', effectiveSettings=True)
- return account_support['settings'][0]['value'] == 'enabled'
-
- def ecs_api_handles_tags(self):
- from distutils.version import LooseVersion
- # There doesn't seem to be a nice way to inspect botocore to look
- # for attributes (and networkConfiguration is not an explicit argument
- # to e.g. ecs.run_task, it's just passed as a keyword argument)
- return LooseVersion(botocore.__version__) >= LooseVersion('1.12.46')
-
- def ecs_api_handles_network_configuration(self):
- from distutils.version import LooseVersion
- # There doesn't seem to be a nice way to inspect botocore to look
- # for attributes (and networkConfiguration is not an explicit argument
- # to e.g. ecs.run_task, it's just passed as a keyword argument)
- return LooseVersion(botocore.__version__) >= LooseVersion('1.7.44')
-
-
-def main():
- argument_spec = dict(
- operation=dict(required=True, choices=['run', 'start', 'stop']),
- cluster=dict(required=False, type='str'), # R S P
- task_definition=dict(required=False, type='str'), # R* S*
- overrides=dict(required=False, type='dict'), # R S
- count=dict(required=False, type='int'), # R
- task=dict(required=False, type='str'), # P*
- container_instances=dict(required=False, type='list'), # S*
- started_by=dict(required=False, type='str'), # R S
- network_configuration=dict(required=False, type='dict'),
- launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
- tags=dict(required=False, type='dict')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,
- required_if=[('launch_type', 'FARGATE', ['network_configuration'])])
-
- # Validate Inputs
- if module.params['operation'] == 'run':
- if 'task_definition' not in module.params and module.params['task_definition'] is None:
- module.fail_json(msg="To run a task, a task_definition must be specified")
- task_to_list = module.params['task_definition']
- status_type = "RUNNING"
-
- if module.params['operation'] == 'start':
- if 'task_definition' not in module.params and module.params['task_definition'] is None:
- module.fail_json(msg="To start a task, a task_definition must be specified")
- if 'container_instances' not in module.params and module.params['container_instances'] is None:
- module.fail_json(msg="To start a task, container instances must be specified")
- task_to_list = module.params['task']
- status_type = "RUNNING"
-
- if module.params['operation'] == 'stop':
- if 'task' not in module.params and module.params['task'] is None:
- module.fail_json(msg="To stop a task, a task must be specified")
- if 'task_definition' not in module.params and module.params['task_definition'] is None:
- module.fail_json(msg="To stop a task, a task definition must be specified")
- task_to_list = module.params['task_definition']
- status_type = "STOPPED"
-
- service_mgr = EcsExecManager(module)
-
- if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration():
- module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
-
- if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type():
- module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type')
-
- if module.params['tags']:
- if not service_mgr.ecs_api_handles_tags():
- module.fail_json(msg=missing_required_lib("botocore >= 1.12.46", reason="to use tags"))
- if not service_mgr.ecs_task_long_format_enabled():
- module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags")
-
- existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
-
- results = dict(changed=False)
- if module.params['operation'] == 'run':
- if existing:
- # TBD - validate the rest of the details
- results['task'] = existing
- else:
- if not module.check_mode:
- results['task'] = service_mgr.run_task(
- module.params['cluster'],
- module.params['task_definition'],
- module.params['overrides'],
- module.params['count'],
- module.params['started_by'],
- module.params['launch_type'],
- module.params['tags'],
- )
- results['changed'] = True
-
- elif module.params['operation'] == 'start':
- if existing:
- # TBD - validate the rest of the details
- results['task'] = existing
- else:
- if not module.check_mode:
- results['task'] = service_mgr.start_task(
- module.params['cluster'],
- module.params['task_definition'],
- module.params['overrides'],
- module.params['container_instances'],
- module.params['started_by'],
- module.params['tags'],
- )
- results['changed'] = True
-
- elif module.params['operation'] == 'stop':
- if existing:
- results['task'] = existing
- else:
- if not module.check_mode:
- # it exists, so we should delete it and mark changed.
- # return info about the cluster deleted
- results['task'] = service_mgr.stop_task(
- module.params['cluster'],
- module.params['task']
- )
- results['changed'] = True
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py b/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
deleted file mode 100644
index 77cdc52884..0000000000
--- a/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
+++ /dev/null
@@ -1,528 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ecs_taskdefinition
-short_description: register a task definition in ecs
-description:
- - Registers or deregisters task definitions in the Amazon Web Services (AWS) EC2 Container Service (ECS).
-version_added: "2.0"
-author: Mark Chance (@Java1Guy)
-requirements: [ json, botocore, boto3 ]
-options:
- state:
- description:
- - State whether the task definition should exist or be deleted.
- required: true
- choices: ['present', 'absent']
- type: str
- arn:
- description:
- - The ARN of the task description to delete.
- required: false
- type: str
- family:
- description:
- - A Name that would be given to the task definition.
- required: false
- type: str
- revision:
- description:
- - A revision number for the task definition.
- required: False
- type: int
- force_create:
- description:
- - Always create new task definition.
- required: False
- version_added: 2.5
- type: bool
- containers:
- description:
- - A list of containers definitions.
- required: False
- type: list
- elements: str
- network_mode:
- description:
- - The Docker networking mode to use for the containers in the task.
- - C(awsvpc) mode was added in Ansible 2.5
- - Windows containers must use I(network_mode=default), which will utilize docker NAT networking.
- - Setting I(network_mode=default) for a Linux container will use bridge mode.
- required: false
- default: bridge
- choices: [ 'default', 'bridge', 'host', 'none', 'awsvpc' ]
- version_added: 2.3
- type: str
- task_role_arn:
- description:
- - The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted
- the permissions that are specified in this role.
- required: false
- version_added: 2.3
- type: str
- execution_role_arn:
- description:
- - The Amazon Resource Name (ARN) of the task execution role that the Amazon ECS container agent and the Docker daemon can assume.
- required: false
- version_added: 2.7
- type: str
- volumes:
- description:
- - A list of names of volumes to be attached.
- required: False
- type: list
- elements: dict
- suboptions:
- name:
- type: str
- description: The name of the volume.
- required: true
- launch_type:
- description:
- - The launch type on which to run your task.
- required: false
- version_added: 2.7
- type: str
- choices: ["EC2", "FARGATE"]
- cpu:
- description:
- - The number of cpu units used by the task. If using the EC2 launch type, this field is optional and any value can be used.
- - If using the Fargate launch type, this field is required and you must use one of C(256), C(512), C(1024), C(2048), C(4096).
- required: false
- version_added: 2.7
- type: str
- memory:
- description:
- - The amount (in MiB) of memory used by the task. If using the EC2 launch type, this field is optional and any value can be used.
- - If using the Fargate launch type, this field is required and is limited by the cpu.
- required: false
- version_added: 2.7
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Create task definition
- ecs_taskdefinition:
- containers:
- - name: simple-app
- cpu: 10
- essential: true
- image: "httpd:2.4"
- memory: 300
- mountPoints:
- - containerPath: /usr/local/apache2/htdocs
- sourceVolume: my-vol
- portMappings:
- - containerPort: 80
- hostPort: 80
- logConfiguration:
- logDriver: awslogs
- options:
- awslogs-group: /ecs/test-cluster-taskdef
- awslogs-region: us-west-2
- awslogs-stream-prefix: ecs
- - name: busybox
- command:
- - >
- /bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1><h2>Congratulations!
- </h2><p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom;
- cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
- cpu: 10
- entryPoint:
- - sh
- - "-c"
- essential: false
- image: busybox
- memory: 200
- volumesFrom:
- - sourceContainer: simple-app
- volumes:
- - name: my-vol
- family: test-cluster-taskdef
- state: present
- register: task_output
-
-- name: Create task definition
- ecs_taskdefinition:
- family: nginx
- containers:
- - name: nginx
- essential: true
- image: "nginx"
- portMappings:
- - containerPort: 8080
- hostPort: 8080
- cpu: 512
- memory: 1024
- state: present
-
-- name: Create task definition
- ecs_taskdefinition:
- family: nginx
- containers:
- - name: nginx
- essential: true
- image: "nginx"
- portMappings:
- - containerPort: 8080
- hostPort: 8080
- launch_type: FARGATE
- cpu: 512
- memory: 1024
- state: present
- network_mode: awsvpc
-
-# Create Task Definition with Environment Variables and Secrets
-- name: Create task definition
- ecs_taskdefinition:
- family: nginx
- containers:
- - name: nginx
- essential: true
- image: "nginx"
- environment:
- - name: "PORT"
- value: "8080"
- secrets:
- # For variables stored in Secrets Manager
- - name: "NGINX_HOST"
- valueFrom: "arn:aws:secretsmanager:us-west-2:123456789012:secret:nginx/NGINX_HOST"
- # For variables stored in Parameter Store
- - name: "API_KEY"
- valueFrom: "arn:aws:ssm:us-west-2:123456789012:parameter/nginx/API_KEY"
- launch_type: FARGATE
- cpu: 512
- memory: 1GB
- state: present
- network_mode: awsvpc
-'''
-RETURN = '''
-taskdefinition:
- description: a reflection of the input parameters
- type: dict
- returned: always
-'''
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible.module_utils._text import to_text
-
-
-class EcsTaskManager:
- """Handles ECS Tasks"""
-
- def __init__(self, module):
- self.module = module
-
- self.ecs = module.client('ecs')
-
- def describe_task(self, task_name):
- try:
- response = self.ecs.describe_task_definition(taskDefinition=task_name)
- return response['taskDefinition']
- except botocore.exceptions.ClientError:
- return None
-
- def register_task(self, family, task_role_arn, execution_role_arn, network_mode, container_definitions, volumes, launch_type, cpu, memory):
- validated_containers = []
-
- # Ensures the number parameters are int as required by boto
- for container in container_definitions:
- for param in ('memory', 'cpu', 'memoryReservation'):
- if param in container:
- container[param] = int(container[param])
-
- if 'portMappings' in container:
- for port_mapping in container['portMappings']:
- for port in ('hostPort', 'containerPort'):
- if port in port_mapping:
- port_mapping[port] = int(port_mapping[port])
- if network_mode == 'awsvpc' and 'hostPort' in port_mapping:
- if port_mapping['hostPort'] != port_mapping.get('containerPort'):
- self.module.fail_json(msg="In awsvpc network mode, host port must be set to the same as "
- "container port or not be set")
-
- validated_containers.append(container)
-
- params = dict(
- family=family,
- taskRoleArn=task_role_arn,
- containerDefinitions=container_definitions,
- volumes=volumes
- )
- if network_mode != 'default':
- params['networkMode'] = network_mode
- if cpu:
- params['cpu'] = cpu
- if memory:
- params['memory'] = memory
- if launch_type:
- params['requiresCompatibilities'] = [launch_type]
- if execution_role_arn:
- params['executionRoleArn'] = execution_role_arn
-
- try:
- response = self.ecs.register_task_definition(**params)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
-
- return response['taskDefinition']
-
- def describe_task_definitions(self, family):
- data = {
- "taskDefinitionArns": [],
- "nextToken": None
- }
-
- def fetch():
- # Boto3 is weird about params passed, so only pass nextToken if we have a value
- params = {
- 'familyPrefix': family
- }
-
- if data['nextToken']:
- params['nextToken'] = data['nextToken']
-
- result = self.ecs.list_task_definitions(**params)
- data['taskDefinitionArns'] += result['taskDefinitionArns']
- data['nextToken'] = result.get('nextToken', None)
- return data['nextToken'] is not None
-
- # Fetch all the arns, possibly across multiple pages
- while fetch():
- pass
-
- # Return the full descriptions of the task definitions, sorted ascending by revision
- return list(
- sorted(
- [self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']],
- key=lambda td: td['revision']
- )
- )
-
- def deregister_task(self, taskArn):
- response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
- return response['taskDefinition']
-
-
-def main():
- argument_spec = dict(
- state=dict(required=True, choices=['present', 'absent']),
- arn=dict(required=False, type='str'),
- family=dict(required=False, type='str'),
- revision=dict(required=False, type='int'),
- force_create=dict(required=False, default=False, type='bool'),
- containers=dict(required=False, type='list'),
- network_mode=dict(required=False, default='bridge', choices=['default', 'bridge', 'host', 'none', 'awsvpc'], type='str'),
- task_role_arn=dict(required=False, default='', type='str'),
- execution_role_arn=dict(required=False, default='', type='str'),
- volumes=dict(required=False, type='list'),
- launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
- cpu=dict(),
- memory=dict(required=False, type='str')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True,
- required_if=[('launch_type', 'FARGATE', ['cpu', 'memory'])]
- )
-
- task_to_describe = None
- task_mgr = EcsTaskManager(module)
- results = dict(changed=False)
-
- if module.params['launch_type']:
- if not module.botocore_at_least('1.8.4'):
- module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch_type')
-
- if module.params['execution_role_arn']:
- if not module.botocore_at_least('1.10.44'):
- module.fail_json(msg='botocore needs to be version 1.10.44 or higher to use execution_role_arn')
-
- if module.params['containers']:
- for container in module.params['containers']:
- for environment in container.get('environment', []):
- environment['value'] = to_text(environment['value'])
-
- if module.params['state'] == 'present':
- if 'containers' not in module.params or not module.params['containers']:
- module.fail_json(msg="To use task definitions, a list of containers must be specified")
-
- if 'family' not in module.params or not module.params['family']:
- module.fail_json(msg="To use task definitions, a family must be specified")
-
- network_mode = module.params['network_mode']
- launch_type = module.params['launch_type']
- if launch_type == 'FARGATE' and network_mode != 'awsvpc':
- module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc")
-
- family = module.params['family']
- existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family'])
-
- if 'revision' in module.params and module.params['revision']:
- # The definition specifies revision. We must guarantee that an active revision of that number will result from this.
- revision = int(module.params['revision'])
-
- # A revision has been explicitly specified. Attempt to locate a matching revision
- tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision]
- existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None
-
- if existing and existing['status'] != "ACTIVE":
- # We cannot reactivate an inactive revision
- module.fail_json(msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision))
- elif not existing:
- if not existing_definitions_in_family and revision != 1:
- module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
- elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision:
- module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" %
- (revision, existing_definitions_in_family[-1]['revision'] + 1))
- else:
- existing = None
-
- def _right_has_values_of_left(left, right):
- # Make sure the values are equivalent for everything left has
- for k, v in left.items():
- if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])):
- # We don't care about list ordering because ECS can change things
- if isinstance(v, list) and k in right:
- left_list = v
- right_list = right[k] or []
-
- if len(left_list) != len(right_list):
- return False
-
- for list_val in left_list:
- if list_val not in right_list:
- return False
- else:
- return False
-
- # Make sure right doesn't have anything that left doesn't
- for k, v in right.items():
- if v and k not in left:
- return False
-
- return True
-
- def _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, existing_task_definition):
- if td['status'] != "ACTIVE":
- return None
-
- if requested_task_role_arn != td.get('taskRoleArn', ""):
- return None
-
- existing_volumes = td.get('volumes', []) or []
-
- if len(requested_volumes) != len(existing_volumes):
- # Nope.
- return None
-
- if len(requested_volumes) > 0:
- for requested_vol in requested_volumes:
- found = False
-
- for actual_vol in existing_volumes:
- if _right_has_values_of_left(requested_vol, actual_vol):
- found = True
- break
-
- if not found:
- return None
-
- existing_containers = td.get('containerDefinitions', []) or []
-
- if len(requested_containers) != len(existing_containers):
- # Nope.
- return None
-
- for requested_container in requested_containers:
- found = False
-
- for actual_container in existing_containers:
- if _right_has_values_of_left(requested_container, actual_container):
- found = True
- break
-
- if not found:
- return None
-
- return existing_task_definition
-
- # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
- for td in existing_definitions_in_family:
- requested_volumes = module.params['volumes'] or []
- requested_containers = module.params['containers'] or []
- requested_task_role_arn = module.params['task_role_arn']
- existing = _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, td)
-
- if existing:
- break
-
- if existing and not module.params.get('force_create'):
- # Awesome. Have an existing one. Nothing to do.
- results['taskdefinition'] = existing
- else:
- if not module.check_mode:
- # Doesn't exist. create it.
- volumes = module.params.get('volumes', []) or []
- results['taskdefinition'] = task_mgr.register_task(module.params['family'],
- module.params['task_role_arn'],
- module.params['execution_role_arn'],
- module.params['network_mode'],
- module.params['containers'],
- volumes,
- module.params['launch_type'],
- module.params['cpu'],
- module.params['memory'])
- results['changed'] = True
-
- elif module.params['state'] == 'absent':
- # When de-registering a task definition, we can specify the ARN OR the family and revision.
- if module.params['state'] == 'absent':
- if 'arn' in module.params and module.params['arn'] is not None:
- task_to_describe = module.params['arn']
- elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
- module.params['revision'] is not None:
- task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
- else:
- module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")
-
- existing = task_mgr.describe_task(task_to_describe)
-
- if not existing:
- pass
- else:
- # It exists, so we should delete it and mark changed. Return info about the task definition deleted
- results['taskdefinition'] = existing
- if 'status' in existing and existing['status'] == "INACTIVE":
- results['changed'] = False
- else:
- if not module.check_mode:
- task_mgr.deregister_task(task_to_describe)
- results['changed'] = True
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_taskdefinition_info.py b/lib/ansible/modules/cloud/amazon/ecs_taskdefinition_info.py
deleted file mode 100644
index 815339fa07..0000000000
--- a/lib/ansible/modules/cloud/amazon/ecs_taskdefinition_info.py
+++ /dev/null
@@ -1,334 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: ecs_taskdefinition_info
-short_description: Describe a task definition in ECS
-notes:
- - For details of the parameters and returns see
- U(http://boto3.readthedocs.io/en/latest/reference/services/ecs.html#ECS.Client.describe_task_definition)
- - This module was called C(ecs_taskdefinition_facts) before Ansible 2.9. The usage did not change.
-description:
- - Describes a task definition in ECS.
-version_added: "2.5"
-author:
- - Gustavo Maia (@gurumaia)
- - Mark Chance (@Java1Guy)
- - Darek Kaczynski (@kaczynskid)
-requirements: [ json, botocore, boto3 ]
-options:
- task_definition:
- description:
- - The name of the task definition to get details for
- required: true
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- ecs_taskdefinition_info:
- task_definition: test-td
-'''
-
-RETURN = '''
-container_definitions:
- description: Returns a list of complex objects representing the containers
- returned: success
- type: complex
- contains:
- name:
- description: The name of a container.
- returned: always
- type: str
- image:
- description: The image used to start a container.
- returned: always
- type: str
- cpu:
- description: The number of cpu units reserved for the container.
- returned: always
- type: int
- memoryReservation:
- description: The soft limit (in MiB) of memory to reserve for the container.
- returned: when present
- type: int
- links:
- description: Links to other containers.
- returned: when present
- type: str
- portMappings:
- description: The list of port mappings for the container.
- returned: always
- type: complex
- contains:
- containerPort:
- description: The port number on the container.
- returned: when present
- type: int
- hostPort:
- description: The port number on the container instance to reserve for your container.
- returned: when present
- type: int
- protocol:
- description: The protocol used for the port mapping.
- returned: when present
- type: str
- essential:
- description: Whether this is an essential container or not.
- returned: always
- type: bool
- entryPoint:
- description: The entry point that is passed to the container.
- returned: when present
- type: str
- command:
- description: The command that is passed to the container.
- returned: when present
- type: str
- environment:
- description: The environment variables to pass to a container.
- returned: always
- type: complex
- contains:
- name:
- description: The name of the environment variable.
- returned: when present
- type: str
- value:
- description: The value of the environment variable.
- returned: when present
- type: str
- mountPoints:
- description: The mount points for data volumes in your container.
- returned: always
- type: complex
- contains:
- sourceVolume:
- description: The name of the volume to mount.
- returned: when present
- type: str
- containerPath:
- description: The path on the container to mount the host volume at.
- returned: when present
- type: str
- readOnly:
- description: If this value is true , the container has read-only access to the volume.
- If this value is false , then the container can write to the volume.
- returned: when present
- type: bool
- volumesFrom:
- description: Data volumes to mount from another container.
- returned: always
- type: complex
- contains:
- sourceContainer:
- description: The name of another container within the same task definition to mount volumes from.
- returned: when present
- type: str
- readOnly:
- description: If this value is true , the container has read-only access to the volume.
- If this value is false , then the container can write to the volume.
- returned: when present
- type: bool
- hostname:
- description: The hostname to use for your container.
- returned: when present
- type: str
- user:
- description: The user name to use inside the container.
- returned: when present
- type: str
- workingDirectory:
- description: The working directory in which to run commands inside the container.
- returned: when present
- type: str
- disableNetworking:
- description: When this parameter is true, networking is disabled within the container.
- returned: when present
- type: bool
- privileged:
- description: When this parameter is true, the container is given elevated
- privileges on the host container instance (similar to the root user).
- returned: when present
- type: bool
- readonlyRootFilesystem:
- description: When this parameter is true, the container is given read-only access to its root file system.
- returned: when present
- type: bool
- dnsServers:
- description: A list of DNS servers that are presented to the container.
- returned: when present
- type: str
- dnsSearchDomains:
- description: A list of DNS search domains that are presented to the container.
- returned: when present
- type: str
- extraHosts:
- description: A list of hostnames and IP address mappings to append to the /etc/hosts file on the container.
- returned: when present
- type: complex
- contains:
- hostname:
- description: The hostname to use in the /etc/hosts entry.
- returned: when present
- type: str
- ipAddress:
- description: The IP address to use in the /etc/hosts entry.
- returned: when present
- type: str
- dockerSecurityOptions:
- description: A list of strings to provide custom labels for SELinux and AppArmor multi-level security systems.
- returned: when present
- type: str
- dockerLabels:
- description: A key/value map of labels to add to the container.
- returned: when present
- type: str
- ulimits:
- description: A list of ulimits to set in the container.
- returned: when present
- type: complex
- contains:
- name:
- description: The type of the ulimit .
- returned: when present
- type: str
- softLimit:
- description: The soft limit for the ulimit type.
- returned: when present
- type: int
- hardLimit:
- description: The hard limit for the ulimit type.
- returned: when present
- type: int
- logConfiguration:
- description: The log configuration specification for the container.
- returned: when present
- type: str
- options:
- description: The configuration options to send to the log driver.
- returned: when present
- type: str
-
-family:
- description: The family of your task definition, used as the definition name
- returned: always
- type: str
-task_definition_arn:
- description: ARN of the task definition
- returned: always
- type: str
-task_role_arn:
- description: The ARN of the IAM role that containers in this task can assume
- returned: when role is set
- type: str
-network_mode:
- description: Network mode for the containers
- returned: always
- type: str
-revision:
- description: Revision number that was queried
- returned: always
- type: int
-volumes:
- description: The list of volumes in a task
- returned: always
- type: complex
- contains:
- name:
- description: The name of the volume.
- returned: when present
- type: str
- host:
- description: The contents of the host parameter determine whether your data volume
- persists on the host container instance and where it is stored.
- returned: when present
- type: bool
- source_path:
- description: The path on the host container instance that is presented to the container.
- returned: when present
- type: str
-status:
- description: The status of the task definition
- returned: always
- type: str
-requires_attributes:
- description: The container instance attributes required by your task
- returned: when present
- type: complex
- contains:
- name:
- description: The name of the attribute.
- returned: when present
- type: str
- value:
- description: The value of the attribute.
- returned: when present
- type: str
- targetType:
- description: The type of the target with which to attach the attribute.
- returned: when present
- type: str
- targetId:
- description: The ID of the target.
- returned: when present
- type: str
-placement_constraints:
- description: A list of placement constraint objects to use for tasks
- returned: always
- type: complex
- contains:
- type:
- description: The type of constraint.
- returned: when present
- type: str
- expression:
- description: A cluster query language expression to apply to the constraint.
- returned: when present
- type: str
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def main():
- argument_spec = dict(
- task_definition=dict(required=True, type='str')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._name == 'ecs_taskdefinition_facts':
- module.deprecate("The 'ecs_taskdefinition_facts' module has been renamed to 'ecs_taskdefinition_info'", version='2.13')
-
- ecs = module.client('ecs')
-
- try:
- ecs_td = ecs.describe_task_definition(taskDefinition=module.params['task_definition'])['taskDefinition']
- except botocore.exceptions.ClientError:
- ecs_td = {}
-
- module.exit_json(changed=False, **camel_dict_to_snake_dict(ecs_td))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/efs.py b/lib/ansible/modules/cloud/amazon/efs.py
deleted file mode 100644
index f93527268a..0000000000
--- a/lib/ansible/modules/cloud/amazon/efs.py
+++ /dev/null
@@ -1,758 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: efs
-short_description: create and maintain EFS file systems
-description:
- - Module allows create, search and destroy Amazon EFS file systems.
-version_added: "2.2"
-requirements: [ boto3 ]
-author:
- - "Ryan Sydnor (@ryansydnor)"
- - "Artem Kazakov (@akazakov)"
-options:
- encrypt:
- description:
- - If I(encrypt=true) creates an encrypted file system. This can not be modified after the file system is created.
- type: bool
- default: false
- version_added: 2.5
- kms_key_id:
- description:
- - The id of the AWS KMS CMK that will be used to protect the encrypted file system. This parameter is only
- required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for
- Amazon EFS is used. The key id can be Key ID, Key ID ARN, Key Alias or Key Alias ARN.
- version_added: 2.5
- type: str
- purge_tags:
- description:
- - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter
- is not set then tags will not be modified.
- type: bool
- default: true
- version_added: 2.5
- state:
- description:
- - Allows to create, search and destroy Amazon EFS file system.
- default: 'present'
- choices: ['present', 'absent']
- type: str
- name:
- description:
- - Creation Token of Amazon EFS file system. Required for create and update. Either name or ID required for delete.
- type: str
- id:
- description:
- - ID of Amazon EFS. Either name or ID required for delete.
- type: str
- performance_mode:
- description:
- - File system's performance mode to use. Only takes effect during creation.
- default: 'general_purpose'
- choices: ['general_purpose', 'max_io']
- type: str
- tags:
- description:
- - "List of tags of Amazon EFS. Should be defined as dictionary
- In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data."
- type: dict
- targets:
- description:
- - "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
- This data may be modified for existing EFS using state 'present' and new list of mount targets."
- type: list
- elements: dict
- suboptions:
- subnet_id:
- required: true
- description: The ID of the subnet to add the mount target in.
- ip_address:
- type: str
- description: A valid IPv4 address within the address range of the specified subnet.
- security_groups:
- type: list
- elements: str
- description: List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified
- throughput_mode:
- description:
- - The throughput_mode for the file system to be created.
- - Requires botocore >= 1.10.57
- choices: ['bursting', 'provisioned']
- version_added: 2.8
- type: str
- provisioned_throughput_in_mibps:
- description:
- - If the throughput_mode is provisioned, select the amount of throughput to provisioned in Mibps.
- - Requires botocore >= 1.10.57
- type: float
- version_added: 2.8
- wait:
- description:
- - "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted')
- In case of 'absent' state should wait for EFS 'deleted' life cycle state"
- type: bool
- default: false
- wait_timeout:
- description:
- - How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary.
- default: 0
- type: int
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# EFS provisioning
-- efs:
- state: present
- name: myTestEFS
- tags:
- Name: myTestNameTag
- purpose: file-storage
- targets:
- - subnet_id: subnet-748c5d03
- security_groups: [ "sg-1a2b3c4d" ]
-
-# Modifying EFS data
-- efs:
- state: present
- name: myTestEFS
- tags:
- name: myAnotherTestTag
- targets:
- - subnet_id: subnet-7654fdca
- security_groups: [ "sg-4c5d6f7a" ]
-
-# Deleting EFS
-- efs:
- state: absent
- name: myTestEFS
-'''
-
-RETURN = '''
-creation_time:
- description: timestamp of creation date
- returned: always
- type: str
- sample: "2015-11-16 07:30:57-05:00"
-creation_token:
- description: EFS creation token
- returned: always
- type: str
- sample: "console-88609e04-9a0e-4a2e-912c-feaa99509961"
-file_system_id:
- description: ID of the file system
- returned: always
- type: str
- sample: "fs-xxxxxxxx"
-life_cycle_state:
- description: state of the EFS file system
- returned: always
- type: str
- sample: "creating, available, deleting, deleted"
-mount_point:
- description: url of file system with leading dot from the time when AWS EFS required to add a region suffix to the address
- returned: always
- type: str
- sample: ".fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/"
-filesystem_address:
- description: url of file system valid for use with mount
- returned: always
- type: str
- sample: "fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/"
-mount_targets:
- description: list of mount targets
- returned: always
- type: list
- sample:
- [
- {
- "file_system_id": "fs-a7ad440e",
- "ip_address": "172.31.17.173",
- "life_cycle_state": "available",
- "mount_target_id": "fsmt-d8907871",
- "network_interface_id": "eni-6e387e26",
- "owner_id": "740748460359",
- "security_groups": [
- "sg-a30b22c6"
- ],
- "subnet_id": "subnet-e265c895"
- },
- ...
- ]
-name:
- description: name of the file system
- returned: always
- type: str
- sample: "my-efs"
-number_of_mount_targets:
- description: the number of targets mounted
- returned: always
- type: int
- sample: 3
-owner_id:
- description: AWS account ID of EFS owner
- returned: always
- type: str
- sample: "XXXXXXXXXXXX"
-size_in_bytes:
- description: size of the file system in bytes as of a timestamp
- returned: always
- type: dict
- sample:
- {
- "timestamp": "2015-12-21 13:59:59-05:00",
- "value": 12288
- }
-performance_mode:
- description: performance mode of the file system
- returned: always
- type: str
- sample: "generalPurpose"
-tags:
- description: tags on the efs instance
- returned: always
- type: dict
- sample:
- {
- "name": "my-efs",
- "key": "Value"
- }
-
-'''
-
-from time import sleep
-from time import time as timestamp
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError as e:
- pass # Handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (compare_aws_tags, camel_dict_to_snake_dict,
- ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict)
-
-
-def _index_by_key(key, items):
- return dict((item[key], item) for item in items)
-
-
-class EFSConnection(object):
-
- DEFAULT_WAIT_TIMEOUT_SECONDS = 0
-
- STATE_CREATING = 'creating'
- STATE_AVAILABLE = 'available'
- STATE_DELETING = 'deleting'
- STATE_DELETED = 'deleted'
-
- def __init__(self, module):
- self.connection = module.client('efs')
- region = module.region
-
- self.module = module
- self.region = region
- self.wait = module.params.get('wait')
- self.wait_timeout = module.params.get('wait_timeout')
-
- def get_file_systems(self, **kwargs):
- """
- Returns generator of file systems including all attributes of FS
- """
- items = iterate_all(
- 'FileSystems',
- self.connection.describe_file_systems,
- **kwargs
- )
- for item in items:
- item['Name'] = item['CreationToken']
- item['CreationTime'] = str(item['CreationTime'])
- """
- In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
- AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
- And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount)
- AWS documentation is available here:
- https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
- """
- item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
- item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
- if 'Timestamp' in item['SizeInBytes']:
- item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
- if item['LifeCycleState'] == self.STATE_AVAILABLE:
- item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
- item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
- else:
- item['Tags'] = {}
- item['MountTargets'] = []
- yield item
-
- def get_tags(self, **kwargs):
- """
- Returns tag list for selected instance of EFS
- """
- tags = self.connection.describe_tags(**kwargs)['Tags']
- return tags
-
- def get_mount_targets(self, **kwargs):
- """
- Returns mount targets for selected instance of EFS
- """
- targets = iterate_all(
- 'MountTargets',
- self.connection.describe_mount_targets,
- **kwargs
- )
- for target in targets:
- if target['LifeCycleState'] == self.STATE_AVAILABLE:
- target['SecurityGroups'] = list(self.get_security_groups(
- MountTargetId=target['MountTargetId']
- ))
- else:
- target['SecurityGroups'] = []
- yield target
-
- def get_security_groups(self, **kwargs):
- """
- Returns security groups for selected instance of EFS
- """
- return iterate_all(
- 'SecurityGroups',
- self.connection.describe_mount_target_security_groups,
- **kwargs
- )
-
- def get_file_system_id(self, name):
- """
- Returns ID of instance by instance name
- """
- info = first_or_default(iterate_all(
- 'FileSystems',
- self.connection.describe_file_systems,
- CreationToken=name
- ))
- return info and info['FileSystemId'] or None
-
- def get_file_system_state(self, name, file_system_id=None):
- """
- Returns state of filesystem by EFS id/name
- """
- info = first_or_default(iterate_all(
- 'FileSystems',
- self.connection.describe_file_systems,
- CreationToken=name,
- FileSystemId=file_system_id
- ))
- return info and info['LifeCycleState'] or self.STATE_DELETED
-
- def get_mount_targets_in_state(self, file_system_id, states=None):
- """
- Returns states of mount targets of selected EFS with selected state(s) (optional)
- """
- targets = iterate_all(
- 'MountTargets',
- self.connection.describe_mount_targets,
- FileSystemId=file_system_id
- )
-
- if states:
- if not isinstance(states, list):
- states = [states]
- targets = filter(lambda target: target['LifeCycleState'] in states, targets)
-
- return list(targets)
-
- def supports_provisioned_mode(self):
- """
- Ensure boto3 includes provisioned throughput mode feature
- """
- return hasattr(self.connection, 'update_file_system')
-
- def get_throughput_mode(self, **kwargs):
- """
- Returns throughput mode for selected EFS instance
- """
- info = first_or_default(iterate_all(
- 'FileSystems',
- self.connection.describe_file_systems,
- **kwargs
- ))
-
- return info and info['ThroughputMode'] or None
-
- def get_provisioned_throughput_in_mibps(self, **kwargs):
- """
- Returns throughput mode for selected EFS instance
- """
- info = first_or_default(iterate_all(
- 'FileSystems',
- self.connection.describe_file_systems,
- **kwargs
- ))
- return info.get('ProvisionedThroughputInMibps', None)
-
- def create_file_system(self, name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps):
- """
- Creates new filesystem with selected name
- """
- changed = False
- state = self.get_file_system_state(name)
- params = {}
- params['CreationToken'] = name
- params['PerformanceMode'] = performance_mode
- if encrypt:
- params['Encrypted'] = encrypt
- if kms_key_id is not None:
- params['KmsKeyId'] = kms_key_id
- if throughput_mode:
- if self.supports_provisioned_mode():
- params['ThroughputMode'] = throughput_mode
- else:
- self.module.fail_json(msg="throughput_mode parameter requires botocore >= 1.10.57")
- if provisioned_throughput_in_mibps:
- if self.supports_provisioned_mode():
- params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps
- else:
- self.module.fail_json(msg="provisioned_throughput_in_mibps parameter requires botocore >= 1.10.57")
-
- if state in [self.STATE_DELETING, self.STATE_DELETED]:
- wait_for(
- lambda: self.get_file_system_state(name),
- self.STATE_DELETED
- )
- try:
- self.connection.create_file_system(**params)
- changed = True
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Unable to create file system.")
-
- # we always wait for the state to be available when creating.
- # if we try to take any actions on the file system before it's available
- # we'll throw errors
- wait_for(
- lambda: self.get_file_system_state(name),
- self.STATE_AVAILABLE,
- self.wait_timeout
- )
-
- return changed
-
- def update_file_system(self, name, throughput_mode, provisioned_throughput_in_mibps):
- """
- Update filesystem with new throughput settings
- """
- changed = False
- state = self.get_file_system_state(name)
- if state in [self.STATE_AVAILABLE, self.STATE_CREATING]:
- fs_id = self.get_file_system_id(name)
- current_mode = self.get_throughput_mode(FileSystemId=fs_id)
- current_throughput = self.get_provisioned_throughput_in_mibps(FileSystemId=fs_id)
- params = dict()
- if throughput_mode and throughput_mode != current_mode:
- params['ThroughputMode'] = throughput_mode
- if provisioned_throughput_in_mibps and provisioned_throughput_in_mibps != current_throughput:
- params['ProvisionedThroughputInMibps'] = provisioned_throughput_in_mibps
- if len(params) > 0:
- wait_for(
- lambda: self.get_file_system_state(name),
- self.STATE_AVAILABLE,
- self.wait_timeout
- )
- try:
- self.connection.update_file_system(FileSystemId=fs_id, **params)
- changed = True
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Unable to update file system.")
- return changed
-
- def converge_file_system(self, name, tags, purge_tags, targets, throughput_mode, provisioned_throughput_in_mibps):
- """
- Change attributes (mount targets and tags) of filesystem by name
- """
- result = False
- fs_id = self.get_file_system_id(name)
-
- if tags is not None:
- tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags)
-
- if tags_to_delete:
- try:
- self.connection.delete_tags(
- FileSystemId=fs_id,
- TagKeys=tags_to_delete
- )
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Unable to delete tags.")
-
- result = True
-
- if tags_need_modify:
- try:
- self.connection.create_tags(
- FileSystemId=fs_id,
- Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)
- )
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Unable to create tags.")
-
- result = True
-
- if targets is not None:
- incomplete_states = [self.STATE_CREATING, self.STATE_DELETING]
- wait_for(
- lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
- 0
- )
- current_targets = _index_by_key('SubnetId', self.get_mount_targets(FileSystemId=fs_id))
- targets = _index_by_key('SubnetId', targets)
-
- targets_to_create, intersection, targets_to_delete = dict_diff(current_targets,
- targets, True)
-
- # To modify mount target it should be deleted and created again
- changed = [sid for sid in intersection if not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'],
- current_targets[sid], targets[sid])]
- targets_to_delete = list(targets_to_delete) + changed
- targets_to_create = list(targets_to_create) + changed
-
- if targets_to_delete:
- for sid in targets_to_delete:
- self.connection.delete_mount_target(
- MountTargetId=current_targets[sid]['MountTargetId']
- )
- wait_for(
- lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
- 0
- )
- result = True
-
- if targets_to_create:
- for sid in targets_to_create:
- self.connection.create_mount_target(
- FileSystemId=fs_id,
- **targets[sid]
- )
- wait_for(
- lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
- 0,
- self.wait_timeout
- )
- result = True
-
- # If no security groups were passed into the module, then do not change it.
- security_groups_to_update = [sid for sid in intersection if
- 'SecurityGroups' in targets[sid] and
- current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups']]
-
- if security_groups_to_update:
- for sid in security_groups_to_update:
- self.connection.modify_mount_target_security_groups(
- MountTargetId=current_targets[sid]['MountTargetId'],
- SecurityGroups=targets[sid].get('SecurityGroups', None)
- )
- result = True
-
- return result
-
- def delete_file_system(self, name, file_system_id=None):
- """
- Removes EFS instance by id/name
- """
- result = False
- state = self.get_file_system_state(name, file_system_id)
- if state in [self.STATE_CREATING, self.STATE_AVAILABLE]:
- wait_for(
- lambda: self.get_file_system_state(name),
- self.STATE_AVAILABLE
- )
- if not file_system_id:
- file_system_id = self.get_file_system_id(name)
- self.delete_mount_targets(file_system_id)
- self.connection.delete_file_system(FileSystemId=file_system_id)
- result = True
-
- if self.wait:
- wait_for(
- lambda: self.get_file_system_state(name),
- self.STATE_DELETED,
- self.wait_timeout
- )
-
- return result
-
- def delete_mount_targets(self, file_system_id):
- """
- Removes mount targets by EFS id
- """
- wait_for(
- lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)),
- 0
- )
-
- targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE)
- for target in targets:
- self.connection.delete_mount_target(MountTargetId=target['MountTargetId'])
-
- wait_for(
- lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)),
- 0
- )
-
- return len(targets) > 0
-
-
-def iterate_all(attr, map_method, **kwargs):
- """
- Method creates iterator from result set
- """
- args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
- wait = 1
- while True:
- try:
- data = map_method(**args)
- for elm in data[attr]:
- yield elm
- if 'NextMarker' in data:
- args['Marker'] = data['Nextmarker']
- continue
- break
- except ClientError as e:
- if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
- sleep(wait)
- wait = wait * 2
- continue
- else:
- raise
-
-
-def targets_equal(keys, a, b):
- """
- Method compare two mount targets by specified attributes
- """
- for key in keys:
- if key in b and a[key] != b[key]:
- return False
-
- return True
-
-
-def dict_diff(dict1, dict2, by_key=False):
- """
- Helper method to calculate difference of two dictionaries
- """
- keys1 = set(dict1.keys() if by_key else dict1.items())
- keys2 = set(dict2.keys() if by_key else dict2.items())
-
- intersection = keys1 & keys2
-
- return keys2 ^ intersection, intersection, keys1 ^ intersection
-
-
-def first_or_default(items, default=None):
- """
- Helper method to fetch first element of list (if exists)
- """
- for item in items:
- return item
- return default
-
-
-def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS):
- """
- Helper method to wait for desired value returned by callback method
- """
- wait_start = timestamp()
- while True:
- if callback() != value:
- if timeout != 0 and (timestamp() - wait_start > timeout):
- raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)')
- else:
- sleep(5)
- continue
- break
-
-
-def main():
- """
- Module action handler
- """
- argument_spec = dict(
- encrypt=dict(required=False, type="bool", default=False),
- state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
- kms_key_id=dict(required=False, type='str', default=None),
- purge_tags=dict(default=True, type='bool'),
- id=dict(required=False, type='str', default=None),
- name=dict(required=False, type='str', default=None),
- tags=dict(required=False, type="dict", default={}),
- targets=dict(required=False, type="list", default=[]),
- performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
- throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None),
- provisioned_throughput_in_mibps=dict(required=False, type='float'),
- wait=dict(required=False, type="bool", default=False),
- wait_timeout=dict(required=False, type="int", default=0)
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
-
- connection = EFSConnection(module)
-
- name = module.params.get('name')
- fs_id = module.params.get('id')
- tags = module.params.get('tags')
- target_translations = {
- 'ip_address': 'IpAddress',
- 'security_groups': 'SecurityGroups',
- 'subnet_id': 'SubnetId'
- }
- targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
- performance_mode_translations = {
- 'general_purpose': 'generalPurpose',
- 'max_io': 'maxIO'
- }
- encrypt = module.params.get('encrypt')
- kms_key_id = module.params.get('kms_key_id')
- performance_mode = performance_mode_translations[module.params.get('performance_mode')]
- purge_tags = module.params.get('purge_tags')
- throughput_mode = module.params.get('throughput_mode')
- provisioned_throughput_in_mibps = module.params.get('provisioned_throughput_in_mibps')
- state = str(module.params.get('state')).lower()
- changed = False
-
- if state == 'present':
- if not name:
- module.fail_json(msg='Name parameter is required for create')
-
- changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps)
- if connection.supports_provisioned_mode():
- changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed
- changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets,
- throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed
- result = first_or_default(connection.get_file_systems(CreationToken=name))
-
- elif state == 'absent':
- if not name and not fs_id:
- module.fail_json(msg='Either name or id parameter is required for delete')
-
- changed = connection.delete_file_system(name, fs_id)
- result = None
- if result:
- result = camel_dict_to_snake_dict(result)
- module.exit_json(changed=changed, efs=result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/efs_info.py b/lib/ansible/modules/cloud/amazon/efs_info.py
deleted file mode 100644
index 5238f041e0..0000000000
--- a/lib/ansible/modules/cloud/amazon/efs_info.py
+++ /dev/null
@@ -1,401 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: efs_info
-short_description: Get information about Amazon EFS file systems
-description:
- - This module can be used to search Amazon EFS file systems.
- - This module was called C(efs_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(efs_info) module no longer returns C(ansible_facts)!
-version_added: "2.2"
-requirements: [ boto3 ]
-author:
- - "Ryan Sydnor (@ryansydnor)"
-options:
- name:
- description:
- - Creation Token of Amazon EFS file system.
- aliases: [ creation_token ]
- type: str
- id:
- description:
- - ID of Amazon EFS.
- type: str
- tags:
- description:
- - List of tags of Amazon EFS. Should be defined as dictionary.
- type: dict
- targets:
- description:
- - List of targets on which to filter the returned results.
- - Result must match all of the specified targets, each of which can be a security group ID, a subnet ID or an IP address.
- type: list
- elements: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Find all existing efs
- efs_info:
- register: result
-
-- name: Find efs using id
- efs_info:
- id: fs-1234abcd
- register: result
-
-- name: Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
- efs_info:
- tags:
- Name: myTestNameTag
- targets:
- - subnet-1a2b3c4d
- - sg-4d3c2b1a
- register: result
-
-- debug:
- msg: "{{ result['efs'] }}"
-'''
-
-RETURN = '''
-creation_time:
- description: timestamp of creation date
- returned: always
- type: str
- sample: "2015-11-16 07:30:57-05:00"
-creation_token:
- description: EFS creation token
- returned: always
- type: str
- sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
-file_system_id:
- description: ID of the file system
- returned: always
- type: str
- sample: fs-xxxxxxxx
-life_cycle_state:
- description: state of the EFS file system
- returned: always
- type: str
- sample: creating, available, deleting, deleted
-mount_point:
- description: url of file system with leading dot from the time AWS EFS required to add network suffix to EFS address
- returned: always
- type: str
- sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
-filesystem_address:
- description: url of file system
- returned: always
- type: str
- sample: fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
-mount_targets:
- description: list of mount targets
- returned: always
- type: list
- sample:
- [
- {
- "file_system_id": "fs-a7ad440e",
- "ip_address": "172.31.17.173",
- "life_cycle_state": "available",
- "mount_target_id": "fsmt-d8907871",
- "network_interface_id": "eni-6e387e26",
- "owner_id": "740748460359",
- "security_groups": [
- "sg-a30b22c6"
- ],
- "subnet_id": "subnet-e265c895"
- },
- ...
- ]
-name:
- description: name of the file system
- returned: always
- type: str
- sample: my-efs
-number_of_mount_targets:
- description: the number of targets mounted
- returned: always
- type: int
- sample: 3
-owner_id:
- description: AWS account ID of EFS owner
- returned: always
- type: str
- sample: XXXXXXXXXXXX
-size_in_bytes:
- description: size of the file system in bytes as of a timestamp
- returned: always
- type: dict
- sample:
- {
- "timestamp": "2015-12-21 13:59:59-05:00",
- "value": 12288
- }
-performance_mode:
- description: performance mode of the file system
- returned: always
- type: str
- sample: "generalPurpose"
-throughput_mode:
- description: mode of throughput for the file system
- returned: when botocore >= 1.10.57
- type: str
- sample: "bursting"
-provisioned_throughput_in_mibps:
- description: throughput provisioned in Mibps
- returned: when botocore >= 1.10.57 and throughput_mode is set to "provisioned"
- type: float
- sample: 15.0
-tags:
- description: tags on the efs instance
- returned: always
- type: dict
- sample:
- {
- "name": "my-efs",
- "key": "Value"
- }
-
-'''
-
-
-from collections import defaultdict
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import get_aws_connection_info, AWSRetry
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
-from ansible.module_utils._text import to_native
-
-
-class EFSConnection(object):
- STATE_CREATING = 'creating'
- STATE_AVAILABLE = 'available'
- STATE_DELETING = 'deleting'
- STATE_DELETED = 'deleted'
-
- def __init__(self, module):
- try:
- self.connection = module.client('efs')
- self.module = module
- except Exception as e:
- module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e))
-
- self.region = get_aws_connection_info(module, boto3=True)[0]
-
- @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
- def list_file_systems(self, **kwargs):
- """
- Returns generator of file systems including all attributes of FS
- """
- paginator = self.connection.get_paginator('describe_file_systems')
- return paginator.paginate(**kwargs).build_full_result()['FileSystems']
-
- @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
- def get_tags(self, file_system_id):
- """
- Returns tag list for selected instance of EFS
- """
- paginator = self.connection.get_paginator('describe_tags')
- return boto3_tag_list_to_ansible_dict(paginator.paginate(FileSystemId=file_system_id).build_full_result()['Tags'])
-
- @AWSRetry.exponential_backoff(catch_extra_error_codes=['ThrottlingException'])
- def get_mount_targets(self, file_system_id):
- """
- Returns mount targets for selected instance of EFS
- """
- paginator = self.connection.get_paginator('describe_mount_targets')
- return paginator.paginate(FileSystemId=file_system_id).build_full_result()['MountTargets']
-
- @AWSRetry.jittered_backoff(catch_extra_error_codes=['ThrottlingException'])
- def get_security_groups(self, mount_target_id):
- """
- Returns security groups for selected instance of EFS
- """
- return self.connection.describe_mount_target_security_groups(MountTargetId=mount_target_id)['SecurityGroups']
-
- def get_mount_targets_data(self, file_systems):
- for item in file_systems:
- if item['life_cycle_state'] == self.STATE_AVAILABLE:
- try:
- mount_targets = self.get_mount_targets(item['file_system_id'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't get EFS targets")
- for mt in mount_targets:
- item['mount_targets'].append(camel_dict_to_snake_dict(mt))
- return file_systems
-
- def get_security_groups_data(self, file_systems):
- for item in file_systems:
- if item['life_cycle_state'] == self.STATE_AVAILABLE:
- for target in item['mount_targets']:
- if target['life_cycle_state'] == self.STATE_AVAILABLE:
- try:
- target['security_groups'] = self.get_security_groups(target['mount_target_id'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't get EFS security groups")
- else:
- target['security_groups'] = []
- else:
- item['tags'] = {}
- item['mount_targets'] = []
- return file_systems
-
- def get_file_systems(self, file_system_id=None, creation_token=None):
- kwargs = dict()
- if file_system_id:
- kwargs['FileSystemId'] = file_system_id
- if creation_token:
- kwargs['CreationToken'] = creation_token
- try:
- file_systems = self.list_file_systems(**kwargs)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't get EFS file systems")
-
- results = list()
- for item in file_systems:
- item['CreationTime'] = str(item['CreationTime'])
- """
- In the time when MountPoint was introduced there was a need to add a suffix of network path before one could use it
- AWS updated it and now there is no need to add a suffix. MountPoint is left for back-compatibility purpose
- And new FilesystemAddress variable is introduced for direct use with other modules (e.g. mount)
- AWS documentation is available here:
- U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html)
- """
- item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
- item['FilesystemAddress'] = '%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
-
- if 'Timestamp' in item['SizeInBytes']:
- item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
- result = camel_dict_to_snake_dict(item)
- result['tags'] = {}
- result['mount_targets'] = []
- # Set tags *after* doing camel to snake
- if result['life_cycle_state'] == self.STATE_AVAILABLE:
- try:
- result['tags'] = self.get_tags(result['file_system_id'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't get EFS tags")
- results.append(result)
- return results
-
-
-def prefix_to_attr(attr_id):
- """
- Helper method to convert ID prefix to mount target attribute
- """
- attr_by_prefix = {
- 'fsmt-': 'mount_target_id',
- 'subnet-': 'subnet_id',
- 'eni-': 'network_interface_id',
- 'sg-': 'security_groups'
- }
- return first_or_default([attr_name for (prefix, attr_name) in attr_by_prefix.items()
- if str(attr_id).startswith(prefix)], 'ip_address')
-
-
-def first_or_default(items, default=None):
- """
- Helper method to fetch first element of list (if exists)
- """
- for item in items:
- return item
- return default
-
-
-def has_tags(available, required):
- """
- Helper method to determine if tag requested already exists
- """
- for key, value in required.items():
- if key not in available or value != available[key]:
- return False
- return True
-
-
-def has_targets(available, required):
- """
- Helper method to determine if mount target requested already exists
- """
- grouped = group_list_of_dict(available)
- for (value, field) in required:
- if field not in grouped or value not in grouped[field]:
- return False
- return True
-
-
-def group_list_of_dict(array):
- """
- Helper method to group list of dict to dict with all possible values
- """
- result = defaultdict(list)
- for item in array:
- for key, value in item.items():
- result[key] += value if isinstance(value, list) else [value]
- return result
-
-
-def main():
- """
- Module action handler
- """
- argument_spec = dict(
- id=dict(),
- name=dict(aliases=['creation_token']),
- tags=dict(type="dict", default={}),
- targets=dict(type="list", default=[])
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True)
- is_old_facts = module._name == 'efs_facts'
- if is_old_facts:
- module.deprecate("The 'efs_facts' module has been renamed to 'efs_info', "
- "and the renamed one no longer returns ansible_facts", version='2.13')
-
- connection = EFSConnection(module)
-
- name = module.params.get('name')
- fs_id = module.params.get('id')
- tags = module.params.get('tags')
- targets = module.params.get('targets')
-
- file_systems_info = connection.get_file_systems(fs_id, name)
-
- if tags:
- file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)]
-
- file_systems_info = connection.get_mount_targets_data(file_systems_info)
- file_systems_info = connection.get_security_groups_data(file_systems_info)
-
- if targets:
- targets = [(item, prefix_to_attr(item)) for item in targets]
- file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)]
-
- if is_old_facts:
- module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
- else:
- module.exit_json(changed=False, efs=file_systems_info)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elasticache.py b/lib/ansible/modules/cloud/amazon/elasticache.py
deleted file mode 100644
index 080fc77c7f..0000000000
--- a/lib/ansible/modules/cloud/amazon/elasticache.py
+++ /dev/null
@@ -1,562 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: elasticache
-short_description: Manage cache clusters in Amazon ElastiCache
-description:
- - Manage cache clusters in Amazon ElastiCache.
- - Returns information about the specified cache cluster.
-version_added: "1.4"
-requirements: [ boto3 ]
-author: "Jim Dalton (@jsdalton)"
-options:
- state:
- description:
- - C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed.
- - C(rebooted) will reboot the cluster, resulting in a momentary outage.
- choices: ['present', 'absent', 'rebooted']
- required: true
- type: str
- name:
- description:
- - The cache cluster identifier.
- required: true
- type: str
- engine:
- description:
- - Name of the cache engine to be used.
- - Supported values are C(redis) and C(memcached).
- default: memcached
- type: str
- cache_engine_version:
- description:
- - The version number of the cache engine.
- type: str
- node_type:
- description:
- - The compute and memory capacity of the nodes in the cache cluster.
- default: cache.t2.small
- type: str
- num_nodes:
- description:
- - The initial number of cache nodes that the cache cluster will have.
- - Required when I(state=present).
- type: int
- default: 1
- cache_port:
- description:
- - The port number on which each of the cache nodes will accept
- connections.
- type: int
- cache_parameter_group:
- description:
- - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group
- for the specified engine will be used.
- version_added: "2.0"
- aliases: [ 'parameter_group' ]
- type: str
- cache_subnet_group:
- description:
- - The subnet group name to associate with. Only use if inside a vpc.
- - Required if inside a vpc
- version_added: "2.0"
- type: str
- security_group_ids:
- description:
- - A list of vpc security group IDs to associate with this cache cluster. Only use if inside a vpc.
- type: list
- elements: str
- version_added: "1.6"
- cache_security_groups:
- description:
- - A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a vpc.
- type: list
- elements: str
- zone:
- description:
- - The EC2 Availability Zone in which the cache cluster will be created.
- type: str
- wait:
- description:
- - Wait for cache cluster result before returning.
- type: bool
- default: true
- hard_modify:
- description:
- - Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state.
- type: bool
- default: false
-extends_documentation_fragment:
- - aws
- - ec2
-"""
-
-EXAMPLES = """
-# Note: None of these examples set aws_access_key, aws_secret_key, or region.
-# It is assumed that their matching environment variables are set.
-
-# Basic example
-- elasticache:
- name: "test-please-delete"
- state: present
- engine: memcached
- cache_engine_version: 1.4.14
- node_type: cache.m1.small
- num_nodes: 1
- cache_port: 11211
- cache_security_groups:
- - default
- zone: us-east-1d
-
-
-# Ensure cache cluster is gone
-- elasticache:
- name: "test-please-delete"
- state: absent
-
-# Reboot cache cluster
-- elasticache:
- name: "test-please-delete"
- state: rebooted
-
-"""
-from time import sleep
-from traceback import format_exc
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3, camel_dict_to_snake_dict
-
-try:
- import boto3
- import botocore
-except ImportError:
- pass # will be detected by imported HAS_BOTO3
-
-
-class ElastiCacheManager(object):
-
- """Handles elasticache creation and destruction"""
-
- EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
-
- def __init__(self, module, name, engine, cache_engine_version, node_type,
- num_nodes, cache_port, cache_parameter_group, cache_subnet_group,
- cache_security_groups, security_group_ids, zone, wait,
- hard_modify, region, **aws_connect_kwargs):
- self.module = module
- self.name = name
- self.engine = engine.lower()
- self.cache_engine_version = cache_engine_version
- self.node_type = node_type
- self.num_nodes = num_nodes
- self.cache_port = cache_port
- self.cache_parameter_group = cache_parameter_group
- self.cache_subnet_group = cache_subnet_group
- self.cache_security_groups = cache_security_groups
- self.security_group_ids = security_group_ids
- self.zone = zone
- self.wait = wait
- self.hard_modify = hard_modify
-
- self.region = region
- self.aws_connect_kwargs = aws_connect_kwargs
-
- self.changed = False
- self.data = None
- self.status = 'gone'
- self.conn = self._get_elasticache_connection()
- self._refresh_data()
-
- def ensure_present(self):
- """Ensure cache cluster exists or create it if not"""
- if self.exists():
- self.sync()
- else:
- self.create()
-
- def ensure_absent(self):
- """Ensure cache cluster is gone or delete it if not"""
- self.delete()
-
- def ensure_rebooted(self):
- """Ensure cache cluster is gone or delete it if not"""
- self.reboot()
-
- def exists(self):
- """Check if cache cluster exists"""
- return self.status in self.EXIST_STATUSES
-
- def create(self):
- """Create an ElastiCache cluster"""
- if self.status == 'available':
- return
- if self.status in ['creating', 'rebooting', 'modifying']:
- if self.wait:
- self._wait_for_status('available')
- return
- if self.status == 'deleting':
- if self.wait:
- self._wait_for_status('gone')
- else:
- msg = "'%s' is currently deleting. Cannot create."
- self.module.fail_json(msg=msg % self.name)
-
- kwargs = dict(CacheClusterId=self.name,
- NumCacheNodes=self.num_nodes,
- CacheNodeType=self.node_type,
- Engine=self.engine,
- EngineVersion=self.cache_engine_version,
- CacheSecurityGroupNames=self.cache_security_groups,
- SecurityGroupIds=self.security_group_ids,
- CacheParameterGroupName=self.cache_parameter_group,
- CacheSubnetGroupName=self.cache_subnet_group)
- if self.cache_port is not None:
- kwargs['Port'] = self.cache_port
- if self.zone is not None:
- kwargs['PreferredAvailabilityZone'] = self.zone
-
- try:
- self.conn.create_cache_cluster(**kwargs)
-
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg=e.message, exception=format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- self._refresh_data()
-
- self.changed = True
- if self.wait:
- self._wait_for_status('available')
- return True
-
- def delete(self):
- """Destroy an ElastiCache cluster"""
- if self.status == 'gone':
- return
- if self.status == 'deleting':
- if self.wait:
- self._wait_for_status('gone')
- return
- if self.status in ['creating', 'rebooting', 'modifying']:
- if self.wait:
- self._wait_for_status('available')
- else:
- msg = "'%s' is currently %s. Cannot delete."
- self.module.fail_json(msg=msg % (self.name, self.status))
-
- try:
- response = self.conn.delete_cache_cluster(CacheClusterId=self.name)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg=e.message, exception=format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- cache_cluster_data = response['CacheCluster']
- self._refresh_data(cache_cluster_data)
-
- self.changed = True
- if self.wait:
- self._wait_for_status('gone')
-
- def sync(self):
- """Sync settings to cluster if required"""
- if not self.exists():
- msg = "'%s' is %s. Cannot sync."
- self.module.fail_json(msg=msg % (self.name, self.status))
-
- if self.status in ['creating', 'rebooting', 'modifying']:
- if self.wait:
- self._wait_for_status('available')
- else:
- # Cluster can only be synced if available. If we can't wait
- # for this, then just be done.
- return
-
- if self._requires_destroy_and_create():
- if not self.hard_modify:
- msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed."
- self.module.fail_json(msg=msg % self.name)
- if not self.wait:
- msg = "'%s' requires destructive modification. 'wait' must be set to true."
- self.module.fail_json(msg=msg % self.name)
- self.delete()
- self.create()
- return
-
- if self._requires_modification():
- self.modify()
-
- def modify(self):
- """Modify the cache cluster. Note it's only possible to modify a few select options."""
- nodes_to_remove = self._get_nodes_to_remove()
- try:
- self.conn.modify_cache_cluster(CacheClusterId=self.name,
- NumCacheNodes=self.num_nodes,
- CacheNodeIdsToRemove=nodes_to_remove,
- CacheSecurityGroupNames=self.cache_security_groups,
- CacheParameterGroupName=self.cache_parameter_group,
- SecurityGroupIds=self.security_group_ids,
- ApplyImmediately=True,
- EngineVersion=self.cache_engine_version)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg=e.message, exception=format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- self._refresh_data()
-
- self.changed = True
- if self.wait:
- self._wait_for_status('available')
-
- def reboot(self):
- """Reboot the cache cluster"""
- if not self.exists():
- msg = "'%s' is %s. Cannot reboot."
- self.module.fail_json(msg=msg % (self.name, self.status))
- if self.status == 'rebooting':
- return
- if self.status in ['creating', 'modifying']:
- if self.wait:
- self._wait_for_status('available')
- else:
- msg = "'%s' is currently %s. Cannot reboot."
- self.module.fail_json(msg=msg % (self.name, self.status))
-
- # Collect ALL nodes for reboot
- cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
- try:
- self.conn.reboot_cache_cluster(CacheClusterId=self.name,
- CacheNodeIdsToReboot=cache_node_ids)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json(msg=e.message, exception=format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- self._refresh_data()
-
- self.changed = True
- if self.wait:
- self._wait_for_status('available')
-
- def get_info(self):
- """Return basic info about the cache cluster"""
- info = {
- 'name': self.name,
- 'status': self.status
- }
- if self.data:
- info['data'] = self.data
- return info
-
- def _wait_for_status(self, awaited_status):
- """Wait for status to change from present status to awaited_status"""
- status_map = {
- 'creating': 'available',
- 'rebooting': 'available',
- 'modifying': 'available',
- 'deleting': 'gone'
- }
- if self.status == awaited_status:
- # No need to wait, we're already done
- return
- if status_map[self.status] != awaited_status:
- msg = "Invalid awaited status. '%s' cannot transition to '%s'"
- self.module.fail_json(msg=msg % (self.status, awaited_status))
-
- if awaited_status not in set(status_map.values()):
- msg = "'%s' is not a valid awaited status."
- self.module.fail_json(msg=msg % awaited_status)
-
- while True:
- sleep(1)
- self._refresh_data()
- if self.status == awaited_status:
- break
-
- def _requires_modification(self):
- """Check if cluster requires (nondestructive) modification"""
- # Check modifiable data attributes
- modifiable_data = {
- 'NumCacheNodes': self.num_nodes,
- 'EngineVersion': self.cache_engine_version
- }
- for key, value in modifiable_data.items():
- if value is not None and value and self.data[key] != value:
- return True
-
- # Check cache security groups
- cache_security_groups = []
- for sg in self.data['CacheSecurityGroups']:
- cache_security_groups.append(sg['CacheSecurityGroupName'])
- if set(cache_security_groups) != set(self.cache_security_groups):
- return True
-
- # check vpc security groups
- if self.security_group_ids:
- vpc_security_groups = []
- security_groups = self.data['SecurityGroups'] or []
- for sg in security_groups:
- vpc_security_groups.append(sg['SecurityGroupId'])
- if set(vpc_security_groups) != set(self.security_group_ids):
- return True
-
- return False
-
- def _requires_destroy_and_create(self):
- """
- Check whether a destroy and create is required to synchronize cluster.
- """
- unmodifiable_data = {
- 'node_type': self.data['CacheNodeType'],
- 'engine': self.data['Engine'],
- 'cache_port': self._get_port()
- }
- # Only check for modifications if zone is specified
- if self.zone is not None:
- unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone']
- for key, value in unmodifiable_data.items():
- if getattr(self, key) is not None and getattr(self, key) != value:
- return True
- return False
-
- def _get_elasticache_connection(self):
- """Get an elasticache connection"""
- region, ec2_url, aws_connect_params = get_aws_connection_info(self.module, boto3=True)
- if region:
- return boto3_conn(self.module, conn_type='client', resource='elasticache',
- region=region, endpoint=ec2_url, **aws_connect_params)
- else:
- self.module.fail_json(msg="region must be specified")
-
- def _get_port(self):
- """Get the port. Where this information is retrieved from is engine dependent."""
- if self.data['Engine'] == 'memcached':
- return self.data['ConfigurationEndpoint']['Port']
- elif self.data['Engine'] == 'redis':
- # Redis only supports a single node (presently) so just use
- # the first and only
- return self.data['CacheNodes'][0]['Endpoint']['Port']
-
- def _refresh_data(self, cache_cluster_data=None):
- """Refresh data about this cache cluster"""
-
- if cache_cluster_data is None:
- try:
- response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'CacheClusterNotFound':
- self.data = None
- self.status = 'gone'
- return
- else:
- self.module.fail_json(msg=e.message, exception=format_exc(),
- **camel_dict_to_snake_dict(e.response))
- cache_cluster_data = response['CacheClusters'][0]
- self.data = cache_cluster_data
- self.status = self.data['CacheClusterStatus']
-
- # The documentation for elasticache lies -- status on rebooting is set
- # to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it
- # here to make status checks etc. more sane.
- if self.status == 'rebooting cache cluster nodes':
- self.status = 'rebooting'
-
- def _get_nodes_to_remove(self):
- """If there are nodes to remove, it figures out which need to be removed"""
- num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes
- if num_nodes_to_remove <= 0:
- return []
-
- if not self.hard_modify:
- msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
- self.module.fail_json(msg=msg % self.name)
-
- cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
- return cache_node_ids[-num_nodes_to_remove:]
-
-
-def main():
- """ elasticache ansible module """
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(required=True, choices=['present', 'absent', 'rebooted']),
- name=dict(required=True),
- engine=dict(default='memcached'),
- cache_engine_version=dict(default=""),
- node_type=dict(default='cache.t2.small'),
- num_nodes=dict(default=1, type='int'),
- # alias for compat with the original PR 1950
- cache_parameter_group=dict(default="", aliases=['parameter_group']),
- cache_port=dict(type='int'),
- cache_subnet_group=dict(default=""),
- cache_security_groups=dict(default=[], type='list'),
- security_group_ids=dict(default=[], type='list'),
- zone=dict(),
- wait=dict(default=True, type='bool'),
- hard_modify=dict(type='bool')
- ))
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- )
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
-
- name = module.params['name']
- state = module.params['state']
- engine = module.params['engine']
- cache_engine_version = module.params['cache_engine_version']
- node_type = module.params['node_type']
- num_nodes = module.params['num_nodes']
- cache_port = module.params['cache_port']
- cache_subnet_group = module.params['cache_subnet_group']
- cache_security_groups = module.params['cache_security_groups']
- security_group_ids = module.params['security_group_ids']
- zone = module.params['zone']
- wait = module.params['wait']
- hard_modify = module.params['hard_modify']
- cache_parameter_group = module.params['cache_parameter_group']
-
- if cache_subnet_group and cache_security_groups:
- module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups")
-
- if state == 'present' and not num_nodes:
- module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0")
-
- elasticache_manager = ElastiCacheManager(module, name, engine,
- cache_engine_version, node_type,
- num_nodes, cache_port,
- cache_parameter_group,
- cache_subnet_group,
- cache_security_groups,
- security_group_ids, zone, wait,
- hard_modify, region, **aws_connect_kwargs)
-
- if state == 'present':
- elasticache_manager.ensure_present()
- elif state == 'absent':
- elasticache_manager.ensure_absent()
- elif state == 'rebooted':
- elasticache_manager.ensure_rebooted()
-
- facts_result = dict(changed=elasticache_manager.changed,
- elasticache=elasticache_manager.get_info())
-
- module.exit_json(**facts_result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elasticache_info.py b/lib/ansible/modules/cloud/amazon/elasticache_info.py
deleted file mode 100644
index cd4a243810..0000000000
--- a/lib/ansible/modules/cloud/amazon/elasticache_info.py
+++ /dev/null
@@ -1,310 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-module: elasticache_info
-short_description: Retrieve information for AWS ElastiCache clusters
-description:
- - Retrieve information from AWS ElastiCache clusters
- - This module was called C(elasticache_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.5"
-options:
- name:
- description:
- - The name of an ElastiCache cluster.
- type: str
-
-author:
- - Will Thames (@willthames)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: obtain all ElastiCache information
- elasticache_info:
-
-- name: obtain all information for a single ElastiCache cluster
- elasticache_info:
- name: test_elasticache
-'''
-
-RETURN = '''
-elasticache_clusters:
- description: List of ElastiCache clusters
- returned: always
- type: complex
- contains:
- auto_minor_version_upgrade:
- description: Whether to automatically upgrade to minor versions
- returned: always
- type: bool
- sample: true
- cache_cluster_create_time:
- description: Date and time cluster was created
- returned: always
- type: str
- sample: '2017-09-15T05:43:46.038000+00:00'
- cache_cluster_id:
- description: ID of the cache cluster
- returned: always
- type: str
- sample: abcd-1234-001
- cache_cluster_status:
- description: Status of ElastiCache cluster
- returned: always
- type: str
- sample: available
- cache_node_type:
- description: Instance type of ElastiCache nodes
- returned: always
- type: str
- sample: cache.t2.micro
- cache_nodes:
- description: List of ElastiCache nodes in the cluster
- returned: always
- type: complex
- contains:
- cache_node_create_time:
- description: Date and time node was created
- returned: always
- type: str
- sample: '2017-09-15T05:43:46.038000+00:00'
- cache_node_id:
- description: ID of the cache node
- returned: always
- type: str
- sample: '0001'
- cache_node_status:
- description: Status of the cache node
- returned: always
- type: str
- sample: available
- customer_availability_zone:
- description: Availability Zone in which the cache node was created
- returned: always
- type: str
- sample: ap-southeast-2b
- endpoint:
- description: Connection details for the cache node
- returned: always
- type: complex
- contains:
- address:
- description: URL of the cache node endpoint
- returned: always
- type: str
- sample: abcd-1234-001.bgiz2p.0001.apse2.cache.amazonaws.com
- port:
- description: Port of the cache node endpoint
- returned: always
- type: int
- sample: 6379
- parameter_group_status:
- description: Status of the Cache Parameter Group
- returned: always
- type: str
- sample: in-sync
- cache_parameter_group:
- description: Contents of the Cache Parameter Group
- returned: always
- type: complex
- contains:
- cache_node_ids_to_reboot:
- description: Cache nodes which need to be rebooted for parameter changes to be applied
- returned: always
- type: list
- sample: []
- cache_parameter_group_name:
- description: Name of the cache parameter group
- returned: always
- type: str
- sample: default.redis3.2
- parameter_apply_status:
- description: Status of parameter updates
- returned: always
- type: str
- sample: in-sync
- cache_security_groups:
- description: Security Groups used by the cache
- returned: always
- type: list
- sample:
- - 'sg-abcd1234'
- cache_subnet_group_name:
- description: ElastiCache Subnet Group used by the cache
- returned: always
- type: str
- sample: abcd-subnet-group
- client_download_landing_page:
- description: URL of client download web page
- returned: always
- type: str
- sample: 'https://console.aws.amazon.com/elasticache/home#client-download:'
- engine:
- description: Engine used by ElastiCache
- returned: always
- type: str
- sample: redis
- engine_version:
- description: Version of ElastiCache engine
- returned: always
- type: str
- sample: 3.2.4
- notification_configuration:
- description: Configuration of notifications
- returned: if notifications are enabled
- type: complex
- contains:
- topic_arn:
- description: ARN of notification destination topic
- returned: if notifications are enabled
- type: str
- sample: arn:aws:sns:*:123456789012:my_topic
- topic_name:
- description: Name of notification destination topic
- returned: if notifications are enabled
- type: str
- sample: MyTopic
- num_cache_nodes:
- description: Number of Cache Nodes
- returned: always
- type: int
- sample: 1
- pending_modified_values:
- description: Values that are pending modification
- returned: always
- type: complex
- contains: {}
- preferred_availability_zone:
- description: Preferred Availability Zone
- returned: always
- type: str
- sample: ap-southeast-2b
- preferred_maintenance_window:
- description: Time slot for preferred maintenance window
- returned: always
- type: str
- sample: sat:12:00-sat:13:00
- replication_group_id:
- description: Replication Group Id
- returned: always
- type: str
- sample: replication-001
- security_groups:
- description: List of Security Groups associated with ElastiCache
- returned: always
- type: complex
- contains:
- security_group_id:
- description: Security Group ID
- returned: always
- type: str
- sample: sg-abcd1234
- status:
- description: Status of Security Group
- returned: always
- type: str
- sample: active
- tags:
- description: Tags applied to the ElastiCache cluster
- returned: always
- type: complex
- contains: {}
- sample:
- Application: web
- Environment: test
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import get_aws_connection_info, camel_dict_to_snake_dict, AWSRetry, boto3_tag_list_to_ansible_dict
-
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-@AWSRetry.exponential_backoff()
-def describe_cache_clusters_with_backoff(client, cluster_id=None):
- paginator = client.get_paginator('describe_cache_clusters')
- params = dict(ShowCacheNodeInfo=True)
- if cluster_id:
- params['CacheClusterId'] = cluster_id
- try:
- response = paginator.paginate(**params).build_full_result()
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'CacheClusterNotFound':
- return []
- raise
- except botocore.exceptions.BotoCoreError:
- raise
- return response['CacheClusters']
-
-
-@AWSRetry.exponential_backoff()
-def get_elasticache_tags_with_backoff(client, cluster_id):
- return client.list_tags_for_resource(ResourceName=cluster_id)['TagList']
-
-
-def get_aws_account_id(module):
- try:
- client = module.client('sts')
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Can't authorize connection")
-
- try:
- return client.get_caller_identity()['Account']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain AWS account id")
-
-
-def get_elasticache_clusters(client, module):
- region = get_aws_connection_info(module, boto3=True)[0]
- try:
- clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name'))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't obtain cache cluster info")
-
- account_id = get_aws_account_id(module)
- results = []
- for cluster in clusters:
-
- cluster = camel_dict_to_snake_dict(cluster)
- arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id'])
- try:
- tags = get_elasticache_tags_with_backoff(client, arn)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get tags for cluster %s")
-
- cluster['tags'] = boto3_tag_list_to_ansible_dict(tags)
- results.append(cluster)
- return results
-
-
-def main():
- argument_spec = dict(
- name=dict(required=False),
- )
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._name == 'elasticache_facts':
- module.deprecate("The 'elasticache_facts' module has been renamed to 'elasticache_info'", version='2.13')
-
- client = module.client('elasticache')
-
- module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elasticache_parameter_group.py b/lib/ansible/modules/cloud/amazon/elasticache_parameter_group.py
deleted file mode 100644
index 50951d24a0..0000000000
--- a/lib/ansible/modules/cloud/amazon/elasticache_parameter_group.py
+++ /dev/null
@@ -1,356 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: elasticache_parameter_group
-short_description: Manage cache parameter groups in Amazon ElastiCache.
-description:
- - Manage cache security groups in Amazon ElastiCache.
- - Returns information about the specified cache cluster.
-version_added: "2.3"
-author: "Sloane Hertel (@s-hertel)"
-extends_documentation_fragment:
- - aws
- - ec2
-requirements: [ boto3, botocore ]
-options:
- group_family:
- description:
- - The name of the cache parameter group family that the cache parameter group can be used with.
- Required when creating a cache parameter group.
- choices: ['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0']
- type: str
- name:
- description:
- - A user-specified name for the cache parameter group.
- required: yes
- type: str
- description:
- description:
- - A user-specified description for the cache parameter group.
- type: str
- state:
- description:
- - Idempotent actions that will create/modify, destroy, or reset a cache parameter group as needed.
- choices: ['present', 'absent', 'reset']
- required: true
- type: str
- values:
- description:
- - A user-specified dictionary of parameters to reset or modify for the cache parameter group.
- type: dict
-"""
-
-EXAMPLES = """
-# Note: None of these examples set aws_access_key, aws_secret_key, or region.
-# It is assumed that their matching environment variables are set.
----
-- hosts: localhost
- connection: local
- tasks:
- - name: 'Create a test parameter group'
- elasticache_parameter_group:
- name: 'test-param-group'
- group_family: 'redis3.2'
- description: 'This is a cache parameter group'
- state: 'present'
- - name: 'Modify a test parameter group'
- elasticache_parameter_group:
- name: 'test-param-group'
- values:
- activerehashing: yes
- client-output-buffer-limit-normal-hard-limit: 4
- state: 'present'
- - name: 'Reset all modifiable parameters for the test parameter group'
- elasticache_parameter_group:
- name: 'test-param-group'
- state: reset
- - name: 'Delete a test parameter group'
- elasticache_parameter_group:
- name: 'test-param-group'
- state: 'absent'
-"""
-
-RETURN = """
-elasticache:
- description: cache parameter group information and response metadata
- returned: always
- type: dict
- sample:
- cache_parameter_group:
- cache_parameter_group_family: redis3.2
- cache_parameter_group_name: test-please-delete
- description: "initial description"
- response_metadata:
- http_headers:
- content-length: "562"
- content-type: text/xml
- date: "Mon, 06 Feb 2017 22:14:08 GMT"
- x-amzn-requestid: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1
- http_status_code: 200
- request_id: 947291f9-ecb9-11e6-85bd-3baa4eca2cc1
- retry_attempts: 0
-changed:
- description: if the cache parameter group has changed
- returned: always
- type: bool
- sample:
- changed: true
-"""
-
-# import module snippets
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict
-from ansible.module_utils._text import to_text
-from ansible.module_utils.six import string_types
-import traceback
-
-try:
- import boto3
- import botocore
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-
-def create(module, conn, name, group_family, description):
- """ Create ElastiCache parameter group. """
- try:
- response = conn.create_cache_parameter_group(CacheParameterGroupName=name, CacheParameterGroupFamily=group_family, Description=description)
- changed = True
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable to create cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- return response, changed
-
-
-def delete(module, conn, name):
- """ Delete ElastiCache parameter group. """
- try:
- conn.delete_cache_parameter_group(CacheParameterGroupName=name)
- response = {}
- changed = True
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable to delete cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- return response, changed
-
-
-def make_current_modifiable_param_dict(module, conn, name):
- """ Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}"""
- current_info = get_info(conn, name)
- if current_info is False:
- module.fail_json(msg="Could not connect to the cache parameter group %s." % name)
-
- parameters = current_info["Parameters"]
- modifiable_params = {}
-
- for param in parameters:
- if param["IsModifiable"]:
- modifiable_params[param["ParameterName"]] = [param.get("AllowedValues")]
- modifiable_params[param["ParameterName"]].append(param["DataType"])
- modifiable_params[param["ParameterName"]].append(param.get("ParameterValue"))
- return modifiable_params
-
-
-def check_valid_modification(module, values, modifiable_params):
- """ Check if the parameters and values in values are valid. """
- changed_with_update = False
-
- for parameter in values:
- new_value = values[parameter]
-
- # check valid modifiable parameters
- if parameter not in modifiable_params:
- module.fail_json(msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." % (parameter, modifiable_params.keys()))
-
- # check allowed datatype for modified parameters
- str_to_type = {"integer": int, "string": string_types}
- expected_type = str_to_type[modifiable_params[parameter][1]]
- if not isinstance(new_value, expected_type):
- if expected_type == str:
- if isinstance(new_value, bool):
- values[parameter] = "yes" if new_value else "no"
- else:
- values[parameter] = to_text(new_value)
- elif expected_type == int:
- if isinstance(new_value, bool):
- values[parameter] = 1 if new_value else 0
- else:
- module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." %
- (new_value, type(new_value), parameter, modifiable_params[parameter][1]))
- else:
- module.fail_json(msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." %
- (new_value, type(new_value), parameter, modifiable_params[parameter][1]))
-
- # check allowed values for modifiable parameters
- choices = modifiable_params[parameter][0]
- if choices:
- if not (to_text(new_value) in choices or isinstance(new_value, int)):
- module.fail_json(msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." %
- (new_value, parameter, choices))
-
- # check if a new value is different from current value
- if to_text(values[parameter]) != modifiable_params[parameter][2]:
- changed_with_update = True
-
- return changed_with_update, values
-
-
-def check_changed_parameter_values(values, old_parameters, new_parameters):
- """ Checking if the new values are different than the old values. """
- changed_with_update = False
-
- # if the user specified parameters to reset, only check those for change
- if values:
- for parameter in values:
- if old_parameters[parameter] != new_parameters[parameter]:
- changed_with_update = True
- break
- # otherwise check all to find a change
- else:
- for parameter in old_parameters:
- if old_parameters[parameter] != new_parameters[parameter]:
- changed_with_update = True
- break
-
- return changed_with_update
-
-
-def modify(module, conn, name, values):
- """ Modify ElastiCache parameter group to reflect the new information if it differs from the current. """
- # compares current group parameters with the parameters we've specified to to a value to see if this will change the group
- format_parameters = []
- for key in values:
- value = to_text(values[key])
- format_parameters.append({'ParameterName': key, 'ParameterValue': value})
- try:
- response = conn.modify_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable to modify cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- return response
-
-
-def reset(module, conn, name, values):
- """ Reset ElastiCache parameter group if the current information is different from the new information. """
- # used to compare with the reset parameters' dict to see if there have been changes
- old_parameters_dict = make_current_modifiable_param_dict(module, conn, name)
-
- format_parameters = []
-
- # determine whether to reset all or specific parameters
- if values:
- all_parameters = False
- format_parameters = []
- for key in values:
- value = to_text(values[key])
- format_parameters.append({'ParameterName': key, 'ParameterValue': value})
- else:
- all_parameters = True
-
- try:
- response = conn.reset_cache_parameter_group(CacheParameterGroupName=name, ParameterNameValues=format_parameters, ResetAllParameters=all_parameters)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable to reset cache parameter group.", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
- # determine changed
- new_parameters_dict = make_current_modifiable_param_dict(module, conn, name)
- changed = check_changed_parameter_values(values, old_parameters_dict, new_parameters_dict)
-
- return response, changed
-
-
-def get_info(conn, name):
- """ Gets info about the ElastiCache parameter group. Returns false if it doesn't exist or we don't have access. """
- try:
- data = conn.describe_cache_parameters(CacheParameterGroupName=name)
- return data
- except botocore.exceptions.ClientError as e:
- return False
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- group_family=dict(type='str', choices=['memcached1.4', 'memcached1.5', 'redis2.6', 'redis2.8', 'redis3.2', 'redis4.0', 'redis5.0']),
- name=dict(required=True, type='str'),
- description=dict(default='', type='str'),
- state=dict(required=True, choices=['present', 'absent', 'reset']),
- values=dict(type='dict'),
- )
- )
- module = AnsibleModule(argument_spec=argument_spec)
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto required for this module')
-
- parameter_group_family = module.params.get('group_family')
- parameter_group_name = module.params.get('name')
- group_description = module.params.get('description')
- state = module.params.get('state')
- values = module.params.get('values')
-
- # Retrieve any AWS settings from the environment.
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- if not region:
- module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")
-
- connection = boto3_conn(module, conn_type='client',
- resource='elasticache', region=region,
- endpoint=ec2_url, **aws_connect_kwargs)
-
- exists = get_info(connection, parameter_group_name)
-
- # check that the needed requirements are available
- if state == 'present' and not (exists or parameter_group_family):
- module.fail_json(msg="Creating a group requires a family group.")
- elif state == 'reset' and not exists:
- module.fail_json(msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name)
-
- # Taking action
- changed = False
- if state == 'present':
- if exists:
- # confirm that the group exists without any actions
- if not values:
- response = exists
- changed = False
- # modify existing group
- else:
- modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name)
- changed, values = check_valid_modification(module, values, modifiable_params)
- response = modify(module, connection, parameter_group_name, values)
- # create group
- else:
- response, changed = create(module, connection, parameter_group_name, parameter_group_family, group_description)
- if values:
- modifiable_params = make_current_modifiable_param_dict(module, connection, parameter_group_name)
- changed, values = check_valid_modification(module, values, modifiable_params)
- response = modify(module, connection, parameter_group_name, values)
- elif state == 'absent':
- if exists:
- # delete group
- response, changed = delete(module, connection, parameter_group_name)
- else:
- response = {}
- changed = False
- elif state == 'reset':
- response, changed = reset(module, connection, parameter_group_name, values)
-
- facts_result = dict(changed=changed, elasticache=camel_dict_to_snake_dict(response))
-
- module.exit_json(**facts_result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elasticache_snapshot.py b/lib/ansible/modules/cloud/amazon/elasticache_snapshot.py
deleted file mode 100644
index 1883499394..0000000000
--- a/lib/ansible/modules/cloud/amazon/elasticache_snapshot.py
+++ /dev/null
@@ -1,233 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: elasticache_snapshot
-short_description: Manage cache snapshots in Amazon ElastiCache
-description:
- - Manage cache snapshots in Amazon ElastiCache.
- - Returns information about the specified snapshot.
-version_added: "2.3"
-author: "Sloane Hertel (@s-hertel)"
-extends_documentation_fragment:
- - aws
- - ec2
-requirements: [ boto3, botocore ]
-options:
- name:
- description:
- - The name of the snapshot we want to create, copy, delete.
- required: true
- type: str
- state:
- description:
- - Actions that will create, destroy, or copy a snapshot.
- required: true
- choices: ['present', 'absent', 'copy']
- type: str
- replication_id:
- description:
- - The name of the existing replication group to make the snapshot.
- type: str
- cluster_id:
- description:
- - The name of an existing cache cluster in the replication group to make the snapshot.
- type: str
- target:
- description:
- - The name of a snapshot copy.
- type: str
- bucket:
- description:
- - The s3 bucket to which the snapshot is exported.
- type: str
-"""
-
-EXAMPLES = """
-# Note: None of these examples set aws_access_key, aws_secret_key, or region.
-# It is assumed that their matching environment variables are set.
----
-- hosts: localhost
- connection: local
- tasks:
- - name: 'Create a snapshot'
- elasticache_snapshot:
- name: 'test-snapshot'
- state: 'present'
- cluster_id: '{{ cluster }}'
- replication_id: '{{ replication }}'
-"""
-
-RETURN = """
-response_metadata:
- description: response metadata about the snapshot
- returned: always
- type: dict
- sample:
- http_headers:
- content-length: 1490
- content-type: text/xml
- date: Tue, 07 Feb 2017 16:43:04 GMT
- x-amzn-requestid: 7f436dea-ed54-11e6-a04c-ab2372a1f14d
- http_status_code: 200
- request_id: 7f436dea-ed54-11e6-a04c-ab2372a1f14d
- retry_attempts: 0
-snapshot:
- description: snapshot data
- returned: always
- type: dict
- sample:
- auto_minor_version_upgrade: true
- cache_cluster_create_time: 2017-02-01T17:43:58.261000+00:00
- cache_cluster_id: test-please-delete
- cache_node_type: cache.m1.small
- cache_parameter_group_name: default.redis3.2
- cache_subnet_group_name: default
- engine: redis
- engine_version: 3.2.4
- node_snapshots:
- cache_node_create_time: 2017-02-01T17:43:58.261000+00:00
- cache_node_id: 0001
- cache_size:
- num_cache_nodes: 1
- port: 11211
- preferred_availability_zone: us-east-1d
- preferred_maintenance_window: wed:03:00-wed:04:00
- snapshot_name: deletesnapshot
- snapshot_retention_limit: 0
- snapshot_source: manual
- snapshot_status: creating
- snapshot_window: 10:00-11:00
- vpc_id: vpc-c248fda4
-changed:
- description: if a snapshot has been created, deleted, or copied
- returned: always
- type: bool
- sample:
- changed: true
-"""
-
-import traceback
-
-try:
- import boto3
- import botocore
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict
-
-
-def create(module, connection, replication_id, cluster_id, name):
- """ Create an ElastiCache backup. """
- try:
- response = connection.create_snapshot(ReplicationGroupId=replication_id,
- CacheClusterId=cluster_id,
- SnapshotName=name)
- changed = True
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == "SnapshotAlreadyExistsFault":
- response = {}
- changed = False
- else:
- module.fail_json(msg="Unable to create the snapshot.", exception=traceback.format_exc())
- return response, changed
-
-
-def copy(module, connection, name, target, bucket):
- """ Copy an ElastiCache backup. """
- try:
- response = connection.copy_snapshot(SourceSnapshotName=name,
- TargetSnapshotName=target,
- TargetBucket=bucket)
- changed = True
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Unable to copy the snapshot.", exception=traceback.format_exc())
- return response, changed
-
-
-def delete(module, connection, name):
- """ Delete an ElastiCache backup. """
- try:
- response = connection.delete_snapshot(SnapshotName=name)
- changed = True
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == "SnapshotNotFoundFault":
- response = {}
- changed = False
- elif e.response['Error']['Code'] == "InvalidSnapshotState":
- module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion."
- "You may need to wait a few minutes.")
- else:
- module.fail_json(msg="Unable to delete the snapshot.", exception=traceback.format_exc())
- return response, changed
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- name=dict(required=True, type='str'),
- state=dict(required=True, type='str', choices=['present', 'absent', 'copy']),
- replication_id=dict(type='str'),
- cluster_id=dict(type='str'),
- target=dict(type='str'),
- bucket=dict(type='str'),
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec)
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto required for this module')
-
- name = module.params.get('name')
- state = module.params.get('state')
- replication_id = module.params.get('replication_id')
- cluster_id = module.params.get('cluster_id')
- target = module.params.get('target')
- bucket = module.params.get('bucket')
-
- # Retrieve any AWS settings from the environment.
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- if not region:
- module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
-
- connection = boto3_conn(module, conn_type='client',
- resource='elasticache', region=region,
- endpoint=ec2_url, **aws_connect_kwargs)
-
- changed = False
- response = {}
-
- if state == 'present':
- if not all((replication_id, cluster_id)):
- module.fail_json(msg="The state 'present' requires options: 'replication_id' and 'cluster_id'")
- response, changed = create(module, connection, replication_id, cluster_id, name)
- elif state == 'absent':
- response, changed = delete(module, connection, name)
- elif state == 'copy':
- if not all((target, bucket)):
- module.fail_json(msg="The state 'copy' requires options: 'target' and 'bucket'.")
- response, changed = copy(module, connection, name, target, bucket)
-
- facts_result = dict(changed=changed, **camel_dict_to_snake_dict(response))
-
- module.exit_json(**facts_result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py b/lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py
deleted file mode 100644
index e1425416b5..0000000000
--- a/lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: elasticache_subnet_group
-version_added: "2.0"
-short_description: manage ElastiCache subnet groups
-description:
- - Creates, modifies, and deletes ElastiCache subnet groups. This module has a dependency on python-boto >= 2.5.
-options:
- state:
- description:
- - Specifies whether the subnet should be present or absent.
- required: true
- choices: [ 'present' , 'absent' ]
- type: str
- name:
- description:
- - Database subnet group identifier.
- required: true
- type: str
- description:
- description:
- - ElastiCache subnet group description. Only set when a new group is added.
- type: str
- subnets:
- description:
- - List of subnet IDs that make up the ElastiCache subnet group.
- type: list
- elements: str
-author: "Tim Mahoney (@timmahoney)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Add or change a subnet group
-- elasticache_subnet_group:
- state: present
- name: norwegian-blue
- description: My Fancy Ex Parrot Subnet Group
- subnets:
- - subnet-aaaaaaaa
- - subnet-bbbbbbbb
-
-# Remove a subnet group
-- elasticache_subnet_group:
- state: absent
- name: norwegian-blue
-'''
-
-try:
- import boto
- from boto.elasticache import connect_to_region
- from boto.exception import BotoServerError
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, get_aws_connection_info
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(required=True, choices=['present', 'absent']),
- name=dict(required=True),
- description=dict(required=False),
- subnets=dict(required=False, type='list'),
- )
- )
- module = AnsibleModule(argument_spec=argument_spec)
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- state = module.params.get('state')
- group_name = module.params.get('name').lower()
- group_description = module.params.get('description')
- group_subnets = module.params.get('subnets') or {}
-
- if state == 'present':
- for required in ['name', 'description', 'subnets']:
- if not module.params.get(required):
- module.fail_json(msg=str("Parameter %s required for state='present'" % required))
- else:
- for not_allowed in ['description', 'subnets']:
- if module.params.get(not_allowed):
- module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed))
-
- # Retrieve any AWS settings from the environment.
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
-
- if not region:
- module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
-
- """Get an elasticache connection"""
- try:
- conn = connect_to_region(region_name=region, **aws_connect_kwargs)
- except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg=e.message)
-
- try:
- changed = False
- exists = False
-
- try:
- matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100)
- exists = len(matching_groups) > 0
- except BotoServerError as e:
- if e.error_code != 'CacheSubnetGroupNotFoundFault':
- module.fail_json(msg=e.error_message)
-
- if state == 'absent':
- if exists:
- conn.delete_cache_subnet_group(group_name)
- changed = True
- else:
- if not exists:
- new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
- changed = True
- else:
- changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
- changed = True
-
- except BotoServerError as e:
- if e.error_message != 'No modifications were requested.':
- module.fail_json(msg=e.error_message)
- else:
- changed = False
-
- module.exit_json(changed=changed)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elb_application_lb.py b/lib/ansible/modules/cloud/amazon/elb_application_lb.py
deleted file mode 100644
index a14e4f70f1..0000000000
--- a/lib/ansible/modules/cloud/amazon/elb_application_lb.py
+++ /dev/null
@@ -1,659 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: elb_application_lb
-short_description: Manage an Application load balancer
-description:
- - Manage an AWS Application Elastic Load Balancer. See U(https://aws.amazon.com/blogs/aws/new-aws-application-load-balancer/) for details.
-version_added: "2.4"
-requirements: [ boto3 ]
-author: "Rob White (@wimnat)"
-options:
- access_logs_enabled:
- description:
- - Whether or not to enable access logs.
- - When set, I(access_logs_s3_bucket) must also be set.
- type: bool
- access_logs_s3_bucket:
- description:
- - The name of the S3 bucket for the access logs.
- - The bucket must exist in the same
- region as the load balancer and have a bucket policy that grants Elastic Load Balancing permission to write to the bucket.
- - Required if access logs in Amazon S3 are enabled.
- - When set, I(access_logs_enabled) must also be set.
- type: str
- access_logs_s3_prefix:
- description:
- - The prefix for the log location in the S3 bucket.
- - If you don't specify a prefix, the access logs are stored in the root of the bucket.
- - Cannot begin or end with a slash.
- type: str
- deletion_protection:
- description:
- - Indicates whether deletion protection for the ELB is enabled.
- default: no
- type: bool
- http2:
- description:
- - Indicates whether to enable HTTP2 routing.
- default: no
- type: bool
- version_added: 2.6
- idle_timeout:
- description:
- - The number of seconds to wait before an idle connection is closed.
- type: int
- listeners:
- description:
- - A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys
- are CamelCased.
- type: list
- suboptions:
- Port:
- description: The port on which the load balancer is listening.
- required: true
- type: int
- Protocol:
- description: The protocol for connections from clients to the load balancer.
- required: true
- type: str
- Certificates:
- description: The SSL server certificate.
- type: list
- suboptions:
- CertificateArn:
- description: The Amazon Resource Name (ARN) of the certificate.
- type: str
- SslPolicy:
- description: The security policy that defines which ciphers and protocols are supported.
- type: str
- DefaultActions:
- description: The default actions for the listener.
- required: true
- type: list
- suboptions:
- Type:
- description: The type of action.
- type: str
- TargetGroupArn:
- description: The Amazon Resource Name (ARN) of the target group.
- type: str
- Rules:
- type: list
- description:
- - A list of ALB Listener Rules.
- - 'For the complete documentation of possible Conditions and Actions please see the boto3 documentation:'
- - 'https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_rule'
- suboptions:
- Conditions:
- type: list
- description: Conditions which must be met for the actions to be applied.
- Priority:
- type: int
- description: The rule priority.
- Actions:
- type: list
- description: Actions to apply if all of the rule's conditions are met.
- name:
- description:
- - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric
- characters or hyphens, and must not begin or end with a hyphen.
- required: true
- type: str
- purge_listeners:
- description:
- - If yes, existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter. If the I(listeners) parameter is
- not set then listeners will not be modified
- default: yes
- type: bool
- purge_tags:
- description:
- - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then
- tags will not be modified.
- default: yes
- type: bool
- subnets:
- description:
- - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from
- at least two Availability Zones.
- - Required if I(state=present).
- type: list
- security_groups:
- description:
- - A list of the names or IDs of the security groups to assign to the load balancer.
- - Required if I(state=present).
- default: []
- type: list
- scheme:
- description:
- - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation.
- default: internet-facing
- choices: [ 'internet-facing', 'internal' ]
- type: str
- state:
- description:
- - Create or destroy the load balancer.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- tags:
- description:
- - A dictionary of one or more tags to assign to the load balancer.
- type: dict
- wait:
- description:
- - Wait for the load balancer to have a state of 'active' before completing. A status check is
- performed every 15 seconds until a successful state is reached. An error is returned after 40 failed checks.
- default: no
- type: bool
- version_added: 2.6
- wait_timeout:
- description:
- - The time in seconds to use in conjunction with I(wait).
- version_added: 2.6
- type: int
- purge_rules:
- description:
- - When set to no, keep the existing load balancer rules in place. Will modify and add, but will not delete.
- default: yes
- type: bool
- version_added: 2.7
-extends_documentation_fragment:
- - aws
- - ec2
-notes:
- - Listeners are matched based on port. If a listener's port is changed then a new listener will be created.
- - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created.
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Create an ELB and attach a listener
-- elb_application_lb:
- name: myelb
- security_groups:
- - sg-12345678
- - my-sec-group
- subnets:
- - subnet-012345678
- - subnet-abcdef000
- listeners:
- - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive).
- Port: 80 # Required. The port on which the load balancer is listening.
- # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
- SslPolicy: ELBSecurityPolicy-2015-05
- Certificates: # The ARN of the certificate (only one certficate ARN should be provided)
- - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com
- DefaultActions:
- - Type: forward # Required.
- TargetGroupName: # Required. The name of the target group
- state: present
-
-# Create an ELB and attach a listener with logging enabled
-- elb_application_lb:
- access_logs_enabled: yes
- access_logs_s3_bucket: mybucket
- access_logs_s3_prefix: "logs"
- name: myelb
- security_groups:
- - sg-12345678
- - my-sec-group
- subnets:
- - subnet-012345678
- - subnet-abcdef000
- listeners:
- - Protocol: HTTP # Required. The protocol for connections from clients to the load balancer (HTTP or HTTPS) (case-sensitive).
- Port: 80 # Required. The port on which the load balancer is listening.
- # The security policy that defines which ciphers and protocols are supported. The default is the current predefined security policy.
- SslPolicy: ELBSecurityPolicy-2015-05
- Certificates: # The ARN of the certificate (only one certficate ARN should be provided)
- - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com
- DefaultActions:
- - Type: forward # Required.
- TargetGroupName: # Required. The name of the target group
- state: present
-
-# Create an ALB with listeners and rules
-- elb_application_lb:
- name: test-alb
- subnets:
- - subnet-12345678
- - subnet-87654321
- security_groups:
- - sg-12345678
- scheme: internal
- listeners:
- - Protocol: HTTPS
- Port: 443
- DefaultActions:
- - Type: forward
- TargetGroupName: test-target-group
- Certificates:
- - CertificateArn: arn:aws:iam::12345678987:server-certificate/test.domain.com
- SslPolicy: ELBSecurityPolicy-2015-05
- Rules:
- - Conditions:
- - Field: path-pattern
- Values:
- - '/test'
- Priority: '1'
- Actions:
- - TargetGroupName: test-target-group
- Type: forward
- - Conditions:
- - Field: path-pattern
- Values:
- - "/redirect-path/*"
- Priority: '2'
- Actions:
- - Type: redirect
- RedirectConfig:
- Host: "#{host}"
- Path: "/example/redir" # or /#{path}
- Port: "#{port}"
- Protocol: "#{protocol}"
- Query: "#{query}"
- StatusCode: "HTTP_302" # or HTTP_301
- - Conditions:
- - Field: path-pattern
- Values:
- - "/fixed-response-path/"
- Priority: '3'
- Actions:
- - Type: fixed-response
- FixedResponseConfig:
- ContentType: "text/plain"
- MessageBody: "This is the page you're looking for"
- StatusCode: "200"
- - Conditions:
- - Field: host-header
- Values:
- - "hostname.domain.com"
- - "alternate.domain.com"
- Priority: '4'
- Actions:
- - TargetGroupName: test-target-group
- Type: forward
- state: present
-
-# Remove an ELB
-- elb_application_lb:
- name: myelb
- state: absent
-
-'''
-
-RETURN = '''
-access_logs_s3_bucket:
- description: The name of the S3 bucket for the access logs.
- returned: when state is present
- type: str
- sample: mys3bucket
-access_logs_s3_enabled:
- description: Indicates whether access logs stored in Amazon S3 are enabled.
- returned: when state is present
- type: str
- sample: true
-access_logs_s3_prefix:
- description: The prefix for the location in the S3 bucket.
- returned: when state is present
- type: str
- sample: my/logs
-availability_zones:
- description: The Availability Zones for the load balancer.
- returned: when state is present
- type: list
- sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]"
-canonical_hosted_zone_id:
- description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
- returned: when state is present
- type: str
- sample: ABCDEF12345678
-created_time:
- description: The date and time the load balancer was created.
- returned: when state is present
- type: str
- sample: "2015-02-12T02:14:02+00:00"
-deletion_protection_enabled:
- description: Indicates whether deletion protection is enabled.
- returned: when state is present
- type: str
- sample: true
-dns_name:
- description: The public DNS name of the load balancer.
- returned: when state is present
- type: str
- sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
-idle_timeout_timeout_seconds:
- description: The idle timeout value, in seconds.
- returned: when state is present
- type: int
- sample: 60
-ip_address_type:
- description: The type of IP addresses used by the subnets for the load balancer.
- returned: when state is present
- type: str
- sample: ipv4
-listeners:
- description: Information about the listeners.
- returned: when state is present
- type: complex
- contains:
- listener_arn:
- description: The Amazon Resource Name (ARN) of the listener.
- returned: when state is present
- type: str
- sample: ""
- load_balancer_arn:
- description: The Amazon Resource Name (ARN) of the load balancer.
- returned: when state is present
- type: str
- sample: ""
- port:
- description: The port on which the load balancer is listening.
- returned: when state is present
- type: int
- sample: 80
- protocol:
- description: The protocol for connections from clients to the load balancer.
- returned: when state is present
- type: str
- sample: HTTPS
- certificates:
- description: The SSL server certificate.
- returned: when state is present
- type: complex
- contains:
- certificate_arn:
- description: The Amazon Resource Name (ARN) of the certificate.
- returned: when state is present
- type: str
- sample: ""
- ssl_policy:
- description: The security policy that defines which ciphers and protocols are supported.
- returned: when state is present
- type: str
- sample: ""
- default_actions:
- description: The default actions for the listener.
- returned: when state is present
- type: str
- contains:
- type:
- description: The type of action.
- returned: when state is present
- type: str
- sample: ""
- target_group_arn:
- description: The Amazon Resource Name (ARN) of the target group.
- returned: when state is present
- type: str
- sample: ""
-load_balancer_arn:
- description: The Amazon Resource Name (ARN) of the load balancer.
- returned: when state is present
- type: str
- sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455
-load_balancer_name:
- description: The name of the load balancer.
- returned: when state is present
- type: str
- sample: my-elb
-routing_http2_enabled:
- description: Indicates whether HTTP/2 is enabled.
- returned: when state is present
- type: str
- sample: true
-scheme:
- description: Internet-facing or internal load balancer.
- returned: when state is present
- type: str
- sample: internal
-security_groups:
- description: The IDs of the security groups for the load balancer.
- returned: when state is present
- type: list
- sample: ['sg-0011223344']
-state:
- description: The state of the load balancer.
- returned: when state is present
- type: dict
- sample: "{'code': 'active'}"
-tags:
- description: The tags attached to the load balancer.
- returned: when state is present
- type: dict
- sample: "{
- 'Tag': 'Example'
- }"
-type:
- description: The type of load balancer.
- returned: when state is present
- type: str
- sample: application
-vpc_id:
- description: The ID of the VPC for the load balancer.
- returned: when state is present
- type: str
- sample: vpc-0011223344
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags
-
-from ansible.module_utils.aws.elbv2 import ApplicationLoadBalancer, ELBListeners, ELBListener, ELBListenerRules, ELBListenerRule
-from ansible.module_utils.aws.elb_utils import get_elb_listener_rules
-
-
-def create_or_update_elb(elb_obj):
- """Create ELB or modify main attributes. json_exit here"""
-
- if elb_obj.elb:
- # ELB exists so check subnets, security groups and tags match what has been passed
-
- # Subnets
- if not elb_obj.compare_subnets():
- elb_obj.modify_subnets()
-
- # Security Groups
- if not elb_obj.compare_security_groups():
- elb_obj.modify_security_groups()
-
- # Tags - only need to play with tags if tags parameter has been set to something
- if elb_obj.tags is not None:
-
- # Delete necessary tags
- tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']),
- boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags)
- if tags_to_delete:
- elb_obj.delete_tags(tags_to_delete)
-
- # Add/update tags
- if tags_need_modify:
- elb_obj.modify_tags()
-
- else:
- # Create load balancer
- elb_obj.create_elb()
-
- # ELB attributes
- elb_obj.update_elb_attributes()
- elb_obj.modify_elb_attributes()
-
- # Listeners
- listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'])
-
- listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners()
-
- # Delete listeners
- for listener_to_delete in listeners_to_delete:
- listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn'])
- listener_obj.delete()
- listeners_obj.changed = True
-
- # Add listeners
- for listener_to_add in listeners_to_add:
- listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn'])
- listener_obj.add()
- listeners_obj.changed = True
-
- # Modify listeners
- for listener_to_modify in listeners_to_modify:
- listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn'])
- listener_obj.modify()
- listeners_obj.changed = True
-
- # If listeners changed, mark ELB as changed
- if listeners_obj.changed:
- elb_obj.changed = True
-
- # Rules of each listener
- for listener in listeners_obj.listeners:
- if 'Rules' in listener:
- rules_obj = ELBListenerRules(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port'])
-
- rules_to_add, rules_to_modify, rules_to_delete = rules_obj.compare_rules()
-
- # Delete rules
- if elb_obj.module.params['purge_rules']:
- for rule in rules_to_delete:
- rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn)
- rule_obj.delete()
- elb_obj.changed = True
-
- # Add rules
- for rule in rules_to_add:
- rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, rule, rules_obj.listener_arn)
- rule_obj.create()
- elb_obj.changed = True
-
- # Modify rules
- for rule in rules_to_modify:
- rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, rule, rules_obj.listener_arn)
- rule_obj.modify()
- elb_obj.changed = True
-
- # Get the ELB again
- elb_obj.update()
-
- # Get the ELB listeners again
- listeners_obj.update()
-
- # Update the ELB attributes
- elb_obj.update_elb_attributes()
-
- # Convert to snake_case and merge in everything we want to return to the user
- snaked_elb = camel_dict_to_snake_dict(elb_obj.elb)
- snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes))
- snaked_elb['listeners'] = []
- for listener in listeners_obj.current_listeners:
- # For each listener, get listener rules
- listener['rules'] = get_elb_listener_rules(elb_obj.connection, elb_obj.module, listener['ListenerArn'])
- snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener))
-
- # Change tags to ansible friendly dict
- snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags'])
-
- elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb)
-
-
-def delete_elb(elb_obj):
-
- if elb_obj.elb:
- elb_obj.delete()
-
- elb_obj.module.exit_json(changed=elb_obj.changed)
-
-
-def main():
-
- argument_spec = dict(
- access_logs_enabled=dict(type='bool'),
- access_logs_s3_bucket=dict(type='str'),
- access_logs_s3_prefix=dict(type='str'),
- deletion_protection=dict(type='bool'),
- http2=dict(type='bool'),
- idle_timeout=dict(type='int'),
- listeners=dict(type='list',
- elements='dict',
- options=dict(
- Protocol=dict(type='str', required=True),
- Port=dict(type='int', required=True),
- SslPolicy=dict(type='str'),
- Certificates=dict(type='list'),
- DefaultActions=dict(type='list', required=True),
- Rules=dict(type='list')
- )
- ),
- name=dict(required=True, type='str'),
- purge_listeners=dict(default=True, type='bool'),
- purge_tags=dict(default=True, type='bool'),
- subnets=dict(type='list'),
- security_groups=dict(type='list'),
- scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']),
- state=dict(choices=['present', 'absent'], default='present'),
- tags=dict(type='dict'),
- wait_timeout=dict(type='int'),
- wait=dict(default=False, type='bool'),
- purge_rules=dict(default=True, type='bool')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[
- ('state', 'present', ['subnets', 'security_groups'])
- ],
- required_together=[
- ['access_logs_enabled', 'access_logs_s3_bucket']
- ]
- )
-
- # Quick check of listeners parameters
- listeners = module.params.get("listeners")
- if listeners is not None:
- for listener in listeners:
- for key in listener.keys():
- if key == 'Protocol' and listener[key] == 'HTTPS':
- if listener.get('SslPolicy') is None:
- module.fail_json(msg="'SslPolicy' is a required listener dict key when Protocol = HTTPS")
-
- if listener.get('Certificates') is None:
- module.fail_json(msg="'Certificates' is a required listener dict key when Protocol = HTTPS")
-
- connection = module.client('elbv2')
- connection_ec2 = module.client('ec2')
-
- state = module.params.get("state")
-
- elb = ApplicationLoadBalancer(connection, connection_ec2, module)
-
- if state == 'present':
- create_or_update_elb(elb)
- else:
- delete_elb(elb)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elb_application_lb_info.py b/lib/ansible/modules/cloud/amazon/elb_application_lb_info.py
deleted file mode 100644
index f2f4c4502a..0000000000
--- a/lib/ansible/modules/cloud/amazon/elb_application_lb_info.py
+++ /dev/null
@@ -1,292 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: elb_application_lb_info
-short_description: Gather information about application ELBs in AWS
-description:
- - Gather information about application ELBs in AWS
- - This module was called C(elb_application_lb_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.4"
-requirements: [ boto3 ]
-author: Rob White (@wimnat)
-options:
- load_balancer_arns:
- description:
- - The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call.
- required: false
- type: list
- names:
- description:
- - The names of the load balancers.
- required: false
- type: list
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all target groups
-- elb_application_lb_info:
-
-# Gather information about the target group attached to a particular ELB
-- elb_application_lb_info:
- load_balancer_arns:
- - "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
-
-# Gather information about a target groups named 'tg1' and 'tg2'
-- elb_application_lb_info:
- names:
- - elb1
- - elb2
-
-# Gather information about specific ALB
-- elb_application_lb_info:
- names: "alb-name"
- region: "aws-region"
- register: alb_info
-- debug:
- var: alb_info
-'''
-
-RETURN = '''
-load_balancers:
- description: a list of load balancers
- returned: always
- type: complex
- contains:
- access_logs_s3_bucket:
- description: The name of the S3 bucket for the access logs.
- returned: when status is present
- type: str
- sample: mys3bucket
- access_logs_s3_enabled:
- description: Indicates whether access logs stored in Amazon S3 are enabled.
- returned: when status is present
- type: str
- sample: true
- access_logs_s3_prefix:
- description: The prefix for the location in the S3 bucket.
- returned: when status is present
- type: str
- sample: /my/logs
- availability_zones:
- description: The Availability Zones for the load balancer.
- returned: when status is present
- type: list
- sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]"
- canonical_hosted_zone_id:
- description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
- returned: when status is present
- type: str
- sample: ABCDEF12345678
- created_time:
- description: The date and time the load balancer was created.
- returned: when status is present
- type: str
- sample: "2015-02-12T02:14:02+00:00"
- deletion_protection_enabled:
- description: Indicates whether deletion protection is enabled.
- returned: when status is present
- type: str
- sample: true
- dns_name:
- description: The public DNS name of the load balancer.
- returned: when status is present
- type: str
- sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
- idle_timeout_timeout_seconds:
- description: The idle timeout value, in seconds.
- returned: when status is present
- type: str
- sample: 60
- ip_address_type:
- description: The type of IP addresses used by the subnets for the load balancer.
- returned: when status is present
- type: str
- sample: ipv4
- load_balancer_arn:
- description: The Amazon Resource Name (ARN) of the load balancer.
- returned: when status is present
- type: str
- sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455
- load_balancer_name:
- description: The name of the load balancer.
- returned: when status is present
- type: str
- sample: my-elb
- scheme:
- description: Internet-facing or internal load balancer.
- returned: when status is present
- type: str
- sample: internal
- security_groups:
- description: The IDs of the security groups for the load balancer.
- returned: when status is present
- type: list
- sample: ['sg-0011223344']
- state:
- description: The state of the load balancer.
- returned: when status is present
- type: dict
- sample: "{'code': 'active'}"
- tags:
- description: The tags attached to the load balancer.
- returned: when status is present
- type: dict
- sample: "{
- 'Tag': 'Example'
- }"
- type:
- description: The type of load balancer.
- returned: when status is present
- type: str
- sample: application
- vpc_id:
- description: The ID of the VPC for the load balancer.
- returned: when status is present
- type: str
- sample: vpc-0011223344
-'''
-
-import traceback
-
-try:
- import boto3
- from botocore.exceptions import ClientError, NoCredentialsError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
- ec2_argument_spec, get_aws_connection_info)
-
-
-def get_elb_listeners(connection, module, elb_arn):
-
- try:
- return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners']
- except ClientError as e:
- module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
-
-def get_listener_rules(connection, module, listener_arn):
-
- try:
- return connection.describe_rules(ListenerArn=listener_arn)['Rules']
- except ClientError as e:
- module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
-
-def get_load_balancer_attributes(connection, module, load_balancer_arn):
-
- try:
- load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes'])
- except ClientError as e:
- module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
- # Replace '.' with '_' in attribute key names to make it more Ansibley
- for k, v in list(load_balancer_attributes.items()):
- load_balancer_attributes[k.replace('.', '_')] = v
- del load_balancer_attributes[k]
-
- return load_balancer_attributes
-
-
-def get_load_balancer_tags(connection, module, load_balancer_arn):
-
- try:
- return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags'])
- except ClientError as e:
- module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
-
-def list_load_balancers(connection, module):
-
- load_balancer_arns = module.params.get("load_balancer_arns")
- names = module.params.get("names")
-
- try:
- load_balancer_paginator = connection.get_paginator('describe_load_balancers')
- if not load_balancer_arns and not names:
- load_balancers = load_balancer_paginator.paginate().build_full_result()
- if load_balancer_arns:
- load_balancers = load_balancer_paginator.paginate(LoadBalancerArns=load_balancer_arns).build_full_result()
- if names:
- load_balancers = load_balancer_paginator.paginate(Names=names).build_full_result()
- except ClientError as e:
- if e.response['Error']['Code'] == 'LoadBalancerNotFound':
- module.exit_json(load_balancers=[])
- else:
- module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except NoCredentialsError as e:
- module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc())
-
- for load_balancer in load_balancers['LoadBalancers']:
- # Get the attributes for each elb
- load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn']))
-
- # Get the listeners for each elb
- load_balancer['listeners'] = get_elb_listeners(connection, module, load_balancer['LoadBalancerArn'])
-
- # For each listener, get listener rules
- for listener in load_balancer['listeners']:
- listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn'])
-
- # Turn the boto3 result in to ansible_friendly_snaked_names
- snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']]
-
- # Get tags for each load balancer
- for snaked_load_balancer in snaked_load_balancers:
- snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn'])
-
- module.exit_json(load_balancers=snaked_load_balancers)
-
-
-def main():
-
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- load_balancer_arns=dict(type='list'),
- names=dict(type='list')
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- mutually_exclusive=[['load_balancer_arns', 'names']],
- supports_check_mode=True
- )
- if module._name == 'elb_application_lb_facts':
- module.deprecate("The 'elb_application_lb_facts' module has been renamed to 'elb_application_lb_info'", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
-
- if region:
- connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)
- else:
- module.fail_json(msg="region must be specified")
-
- list_load_balancers(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elb_classic_lb.py b/lib/ansible/modules/cloud/amazon/elb_classic_lb.py
deleted file mode 100644
index e51b376b82..0000000000
--- a/lib/ansible/modules/cloud/amazon/elb_classic_lb.py
+++ /dev/null
@@ -1,1365 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: elb_classic_lb
-description:
- - Returns information about the load balancer.
- - Will be marked changed when called only if state is changed.
-short_description: Creates or destroys Amazon ELB.
-version_added: "2.4"
-author:
- - "Jim Dalton (@jsdalton)"
-options:
- state:
- description:
- - Create or destroy the ELB
- choices: ["present", "absent"]
- required: true
- type: str
- name:
- description:
- - The name of the ELB
- required: true
- type: str
- listeners:
- description:
- - List of ports/protocols for this ELB to listen on (see example)
- type: list
- purge_listeners:
- description:
- - Purge existing listeners on ELB that are not found in listeners
- type: bool
- default: 'yes'
- instance_ids:
- description:
- - List of instance ids to attach to this ELB
- version_added: "2.1"
- type: list
- purge_instance_ids:
- description:
- - Purge existing instance ids on ELB that are not found in instance_ids
- type: bool
- default: 'no'
- version_added: "2.1"
- zones:
- description:
- - List of availability zones to enable on this ELB
- type: list
- purge_zones:
- description:
- - Purge existing availability zones on ELB that are not found in zones
- type: bool
- default: 'no'
- security_group_ids:
- description:
- - A list of security groups to apply to the elb
- version_added: "1.6"
- type: list
- security_group_names:
- description:
- - A list of security group names to apply to the elb
- version_added: "2.0"
- type: list
- health_check:
- description:
- - An associative array of health check configuration settings (see example)
- type: dict
- access_logs:
- description:
- - An associative array of access logs configuration settings (see example)
- version_added: "2.0"
- type: dict
- subnets:
- description:
- - A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
- version_added: "1.7"
- type: list
- purge_subnets:
- description:
- - Purge existing subnet on ELB that are not found in subnets
- type: bool
- default: 'no'
- version_added: "1.7"
- scheme:
- description:
- - The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
- If you choose to update your scheme with a different value the ELB will be destroyed and
- recreated. To update scheme you must use the option wait.
- choices: ["internal", "internet-facing"]
- default: 'internet-facing'
- version_added: "1.7"
- type: str
- validate_certs:
- description:
- - When set to C(no), SSL certificates will not be validated for boto versions >= 2.6.0.
- type: bool
- default: 'yes'
- version_added: "1.5"
- connection_draining_timeout:
- description:
- - Wait a specified timeout allowing connections to drain before terminating an instance
- version_added: "1.8"
- type: int
- idle_timeout:
- description:
- - ELB connections from clients and to servers are timed out after this amount of time
- version_added: "2.0"
- type: int
- cross_az_load_balancing:
- description:
- - Distribute load across all configured Availability Zones
- type: bool
- default: 'no'
- version_added: "1.8"
- stickiness:
- description:
- - An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example )
- version_added: "2.0"
- type: dict
- wait:
- description:
- - When specified, Ansible will check the status of the load balancer to ensure it has been successfully
- removed from AWS.
- type: bool
- default: 'no'
- version_added: "2.1"
- wait_timeout:
- description:
- - Used in conjunction with wait. Number of seconds to wait for the elb to be terminated.
- A maximum of 600 seconds (10 minutes) is allowed.
- default: 60
- version_added: "2.1"
- type: int
- tags:
- description:
- - An associative array of tags. To delete all tags, supply an empty dict.
- version_added: "2.1"
- type: dict
-
-extends_documentation_fragment:
- - aws
- - ec2
-"""
-
-EXAMPLES = """
-# Note: None of these examples set aws_access_key, aws_secret_key, or region.
-# It is assumed that their matching environment variables are set.
-
-# Basic provisioning example (non-VPC)
-
-- elb_classic_lb:
- name: "test-please-delete"
- state: present
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http # options are http, https, ssl, tcp
- load_balancer_port: 80
- instance_port: 80
- proxy_protocol: True
- - protocol: https
- load_balancer_port: 443
- instance_protocol: http # optional, defaults to value of protocol setting
- instance_port: 80
- # ssl certificate required for https or ssl
- ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
- delegate_to: localhost
-
-# Internal ELB example
-
-- elb_classic_lb:
- name: "test-vpc"
- scheme: internal
- state: present
- instance_ids:
- - i-abcd1234
- purge_instance_ids: true
- subnets:
- - subnet-abcd1234
- - subnet-1a2b3c4d
- listeners:
- - protocol: http # options are http, https, ssl, tcp
- load_balancer_port: 80
- instance_port: 80
- delegate_to: localhost
-
-# Configure a health check and the access logs
-- elb_classic_lb:
- name: "test-please-delete"
- state: present
- zones:
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- health_check:
- ping_protocol: http # options are http, https, ssl, tcp
- ping_port: 80
- ping_path: "/index.html" # not required for tcp or ssl
- response_timeout: 5 # seconds
- interval: 30 # seconds
- unhealthy_threshold: 2
- healthy_threshold: 10
- access_logs:
- interval: 5 # minutes (defaults to 60)
- s3_location: "my-bucket" # This value is required if access_logs is set
- s3_prefix: "logs"
- delegate_to: localhost
-
-# Ensure ELB is gone
-- elb_classic_lb:
- name: "test-please-delete"
- state: absent
- delegate_to: localhost
-
-# Ensure ELB is gone and wait for check (for default timeout)
-- elb_classic_lb:
- name: "test-please-delete"
- state: absent
- wait: yes
- delegate_to: localhost
-
-# Ensure ELB is gone and wait for check with timeout value
-- elb_classic_lb:
- name: "test-please-delete"
- state: absent
- wait: yes
- wait_timeout: 600
- delegate_to: localhost
-
-# Normally, this module will purge any listeners that exist on the ELB
-# but aren't specified in the listeners parameter. If purge_listeners is
-# false it leaves them alone
-- elb_classic_lb:
- name: "test-please-delete"
- state: present
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- purge_listeners: no
- delegate_to: localhost
-
-# Normally, this module will leave availability zones that are enabled
-# on the ELB alone. If purge_zones is true, then any extraneous zones
-# will be removed
-- elb_classic_lb:
- name: "test-please-delete"
- state: present
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- purge_zones: yes
- delegate_to: localhost
-
-# Creates a ELB and assigns a list of subnets to it.
-- elb_classic_lb:
- state: present
- name: 'New ELB'
- security_group_ids: 'sg-123456, sg-67890'
- region: us-west-2
- subnets: 'subnet-123456,subnet-67890'
- purge_subnets: yes
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- delegate_to: localhost
-
-# Create an ELB with connection draining, increased idle timeout and cross availability
-# zone load balancing
-- elb_classic_lb:
- name: "New ELB"
- state: present
- connection_draining_timeout: 60
- idle_timeout: 300
- cross_az_load_balancing: "yes"
- region: us-east-1
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- delegate_to: localhost
-
-# Create an ELB with load balancer stickiness enabled
-- elb_classic_lb:
- name: "New ELB"
- state: present
- region: us-east-1
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- stickiness:
- type: loadbalancer
- enabled: yes
- expiration: 300
- delegate_to: localhost
-
-# Create an ELB with application stickiness enabled
-- elb_classic_lb:
- name: "New ELB"
- state: present
- region: us-east-1
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- stickiness:
- type: application
- enabled: yes
- cookie: SESSIONID
- delegate_to: localhost
-
-# Create an ELB and add tags
-- elb_classic_lb:
- name: "New ELB"
- state: present
- region: us-east-1
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- tags:
- Name: "New ELB"
- stack: "production"
- client: "Bob"
- delegate_to: localhost
-
-# Delete all tags from an ELB
-- elb_classic_lb:
- name: "New ELB"
- state: present
- region: us-east-1
- zones:
- - us-east-1a
- - us-east-1d
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- tags: {}
- delegate_to: localhost
-"""
-
-import random
-import time
-import traceback
-
-try:
- import boto
- import boto.ec2.elb
- import boto.ec2.elb.attributes
- import boto.vpc
- from boto.ec2.elb.healthcheck import HealthCheck
- from boto.ec2.tag import Tag
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import ec2_argument_spec, connect_to_aws, AnsibleAWSError, get_aws_connection_info
-from ansible.module_utils.six import string_types
-from ansible.module_utils._text import to_native
-
-
-def _throttleable_operation(max_retries):
- def _operation_wrapper(op):
- def _do_op(*args, **kwargs):
- retry = 0
- while True:
- try:
- return op(*args, **kwargs)
- except boto.exception.BotoServerError as e:
- if retry < max_retries and e.code in \
- ("Throttling", "RequestLimitExceeded"):
- retry = retry + 1
- time.sleep(min(random.random() * (2 ** retry), 300))
- continue
- else:
- raise
- return _do_op
- return _operation_wrapper
-
-
-def _get_vpc_connection(module, region, aws_connect_params):
- try:
- return connect_to_aws(boto.vpc, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- module.fail_json(msg=str(e))
-
-
-_THROTTLING_RETRIES = 5
-
-
-class ElbManager(object):
- """Handles ELB creation and destruction"""
-
- def __init__(self, module, name, listeners=None, purge_listeners=None,
- zones=None, purge_zones=None, security_group_ids=None,
- health_check=None, subnets=None, purge_subnets=None,
- scheme="internet-facing", connection_draining_timeout=None,
- idle_timeout=None,
- cross_az_load_balancing=None, access_logs=None,
- stickiness=None, wait=None, wait_timeout=None, tags=None,
- region=None,
- instance_ids=None, purge_instance_ids=None, **aws_connect_params):
-
- self.module = module
- self.name = name
- self.listeners = listeners
- self.purge_listeners = purge_listeners
- self.instance_ids = instance_ids
- self.purge_instance_ids = purge_instance_ids
- self.zones = zones
- self.purge_zones = purge_zones
- self.security_group_ids = security_group_ids
- self.health_check = health_check
- self.subnets = subnets
- self.purge_subnets = purge_subnets
- self.scheme = scheme
- self.connection_draining_timeout = connection_draining_timeout
- self.idle_timeout = idle_timeout
- self.cross_az_load_balancing = cross_az_load_balancing
- self.access_logs = access_logs
- self.stickiness = stickiness
- self.wait = wait
- self.wait_timeout = wait_timeout
- self.tags = tags
-
- self.aws_connect_params = aws_connect_params
- self.region = region
-
- self.changed = False
- self.status = 'gone'
- self.elb_conn = self._get_elb_connection()
-
- try:
- self.elb = self._get_elb()
- except boto.exception.BotoServerError as e:
- module.fail_json(msg='unable to get all load balancers: %s' % e.message, exception=traceback.format_exc())
-
- self.ec2_conn = self._get_ec2_connection()
-
- @_throttleable_operation(_THROTTLING_RETRIES)
- def ensure_ok(self):
- """Create the ELB"""
- if not self.elb:
- # Zones and listeners will be added at creation
- self._create_elb()
- else:
- if self._get_scheme():
- # the only way to change the scheme is by recreating the resource
- self.ensure_gone()
- self._create_elb()
- else:
- self._set_zones()
- self._set_security_groups()
- self._set_elb_listeners()
- self._set_subnets()
- self._set_health_check()
- # boto has introduced support for some ELB attributes in
- # different versions, so we check first before trying to
- # set them to avoid errors
- if self._check_attribute_support('connection_draining'):
- self._set_connection_draining_timeout()
- if self._check_attribute_support('connecting_settings'):
- self._set_idle_timeout()
- if self._check_attribute_support('cross_zone_load_balancing'):
- self._set_cross_az_load_balancing()
- if self._check_attribute_support('access_log'):
- self._set_access_log()
- # add sticky options
- self.select_stickiness_policy()
-
- # ensure backend server policies are correct
- self._set_backend_policies()
- # set/remove instance ids
- self._set_instance_ids()
-
- self._set_tags()
-
- def ensure_gone(self):
- """Destroy the ELB"""
- if self.elb:
- self._delete_elb()
- if self.wait:
- elb_removed = self._wait_for_elb_removed()
- # Unfortunately even though the ELB itself is removed quickly
- # the interfaces take longer so reliant security groups cannot
- # be deleted until the interface has registered as removed.
- elb_interface_removed = self._wait_for_elb_interface_removed()
- if not (elb_removed and elb_interface_removed):
- self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
-
- def get_info(self):
- try:
- check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
- except Exception:
- check_elb = None
-
- if not check_elb:
- info = {
- 'name': self.name,
- 'status': self.status,
- 'region': self.region
- }
- else:
- try:
- lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
- except Exception:
- lb_cookie_policy = None
- try:
- app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
- except Exception:
- app_cookie_policy = None
-
- info = {
- 'name': check_elb.name,
- 'dns_name': check_elb.dns_name,
- 'zones': check_elb.availability_zones,
- 'security_group_ids': check_elb.security_groups,
- 'status': self.status,
- 'subnets': self.subnets,
- 'scheme': check_elb.scheme,
- 'hosted_zone_name': check_elb.canonical_hosted_zone_name,
- 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
- 'lb_cookie_policy': lb_cookie_policy,
- 'app_cookie_policy': app_cookie_policy,
- 'proxy_policy': self._get_proxy_protocol_policy(),
- 'backends': self._get_backend_policies(),
- 'instances': [instance.id for instance in check_elb.instances],
- 'out_of_service_count': 0,
- 'in_service_count': 0,
- 'unknown_instance_state_count': 0,
- 'region': self.region
- }
-
- # status of instances behind the ELB
- if info['instances']:
- info['instance_health'] = [dict(
- instance_id=instance_state.instance_id,
- reason_code=instance_state.reason_code,
- state=instance_state.state
- ) for instance_state in self.elb_conn.describe_instance_health(self.name)]
- else:
- info['instance_health'] = []
-
- # instance state counts: InService or OutOfService
- if info['instance_health']:
- for instance_state in info['instance_health']:
- if instance_state['state'] == "InService":
- info['in_service_count'] += 1
- elif instance_state['state'] == "OutOfService":
- info['out_of_service_count'] += 1
- else:
- info['unknown_instance_state_count'] += 1
-
- if check_elb.health_check:
- info['health_check'] = {
- 'target': check_elb.health_check.target,
- 'interval': check_elb.health_check.interval,
- 'timeout': check_elb.health_check.timeout,
- 'healthy_threshold': check_elb.health_check.healthy_threshold,
- 'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
- }
-
- if check_elb.listeners:
- info['listeners'] = [self._api_listener_as_tuple(l)
- for l in check_elb.listeners]
- elif self.status == 'created':
- # When creating a new ELB, listeners don't show in the
- # immediately returned result, so just include the
- # ones that were added
- info['listeners'] = [self._listener_as_tuple(l)
- for l in self.listeners]
- else:
- info['listeners'] = []
-
- if self._check_attribute_support('connection_draining'):
- info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout)
-
- if self._check_attribute_support('connecting_settings'):
- info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
-
- if self._check_attribute_support('cross_zone_load_balancing'):
- is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
- if is_cross_az_lb_enabled:
- info['cross_az_load_balancing'] = 'yes'
- else:
- info['cross_az_load_balancing'] = 'no'
-
- # return stickiness info?
-
- info['tags'] = self.tags
-
- return info
-
- @_throttleable_operation(_THROTTLING_RETRIES)
- def _wait_for_elb_removed(self):
- polling_increment_secs = 15
- max_retries = (self.wait_timeout // polling_increment_secs)
- status_achieved = False
-
- for x in range(0, max_retries):
- try:
- self.elb_conn.get_all_lb_attributes(self.name)
- except (boto.exception.BotoServerError, Exception) as e:
- if "LoadBalancerNotFound" in e.code:
- status_achieved = True
- break
- else:
- time.sleep(polling_increment_secs)
-
- return status_achieved
-
- @_throttleable_operation(_THROTTLING_RETRIES)
- def _wait_for_elb_interface_removed(self):
- polling_increment_secs = 15
- max_retries = (self.wait_timeout // polling_increment_secs)
- status_achieved = False
-
- elb_interfaces = self.ec2_conn.get_all_network_interfaces(
- filters={'attachment.instance-owner-id': 'amazon-elb',
- 'description': 'ELB {0}'.format(self.name)})
-
- for x in range(0, max_retries):
- for interface in elb_interfaces:
- try:
- result = self.ec2_conn.get_all_network_interfaces(interface.id)
- if result == []:
- status_achieved = True
- break
- else:
- time.sleep(polling_increment_secs)
- except (boto.exception.BotoServerError, Exception) as e:
- if 'InvalidNetworkInterfaceID' in e.code:
- status_achieved = True
- break
- else:
- self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- return status_achieved
-
- @_throttleable_operation(_THROTTLING_RETRIES)
- def _get_elb(self):
- elbs = self.elb_conn.get_all_load_balancers()
- for elb in elbs:
- if self.name == elb.name:
- self.status = 'ok'
- return elb
-
- def _get_elb_connection(self):
- try:
- return connect_to_aws(boto.ec2.elb, self.region,
- **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- self.module.fail_json(msg=str(e))
-
- def _get_ec2_connection(self):
- try:
- return connect_to_aws(boto.ec2, self.region,
- **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, Exception) as e:
- self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- @_throttleable_operation(_THROTTLING_RETRIES)
- def _delete_elb(self):
- # True if succeeds, exception raised if not
- result = self.elb_conn.delete_load_balancer(name=self.name)
- if result:
- self.changed = True
- self.status = 'deleted'
-
- def _create_elb(self):
- listeners = [self._listener_as_tuple(l) for l in self.listeners]
- self.elb = self.elb_conn.create_load_balancer(name=self.name,
- zones=self.zones,
- security_groups=self.security_group_ids,
- complex_listeners=listeners,
- subnets=self.subnets,
- scheme=self.scheme)
- if self.elb:
- # HACK: Work around a boto bug in which the listeners attribute is
- # always set to the listeners argument to create_load_balancer, and
- # not the complex_listeners
- # We're not doing a self.elb = self._get_elb here because there
- # might be eventual consistency issues and it doesn't necessarily
- # make sense to wait until the ELB gets returned from the EC2 API.
- # This is necessary in the event we hit the throttling errors and
- # need to retry ensure_ok
- # See https://github.com/boto/boto/issues/3526
- self.elb.listeners = self.listeners
- self.changed = True
- self.status = 'created'
-
- def _create_elb_listeners(self, listeners):
- """Takes a list of listener tuples and creates them"""
- # True if succeeds, exception raised if not
- self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
- complex_listeners=listeners)
-
- def _delete_elb_listeners(self, listeners):
- """Takes a list of listener tuples and deletes them from the elb"""
- ports = [l[0] for l in listeners]
-
- # True if succeeds, exception raised if not
- self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
- ports)
-
- def _set_elb_listeners(self):
- """
- Creates listeners specified by self.listeners; overwrites existing
- listeners on these ports; removes extraneous listeners
- """
- listeners_to_add = []
- listeners_to_remove = []
- listeners_to_keep = []
-
- # Check for any listeners we need to create or overwrite
- for listener in self.listeners:
- listener_as_tuple = self._listener_as_tuple(listener)
-
- # First we loop through existing listeners to see if one is
- # already specified for this port
- existing_listener_found = None
- for existing_listener in self.elb.listeners:
- # Since ELB allows only one listener on each incoming port, a
- # single match on the incoming port is all we're looking for
- if existing_listener[0] == int(listener['load_balancer_port']):
- existing_listener_found = self._api_listener_as_tuple(existing_listener)
- break
-
- if existing_listener_found:
- # Does it match exactly?
- if listener_as_tuple != existing_listener_found:
- # The ports are the same but something else is different,
- # so we'll remove the existing one and add the new one
- listeners_to_remove.append(existing_listener_found)
- listeners_to_add.append(listener_as_tuple)
- else:
- # We already have this listener, so we're going to keep it
- listeners_to_keep.append(existing_listener_found)
- else:
- # We didn't find an existing listener, so just add the new one
- listeners_to_add.append(listener_as_tuple)
-
- # Check for any extraneous listeners we need to remove, if desired
- if self.purge_listeners:
- for existing_listener in self.elb.listeners:
- existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
- if existing_listener_tuple in listeners_to_remove:
- # Already queued for removal
- continue
- if existing_listener_tuple in listeners_to_keep:
- # Keep this one around
- continue
- # Since we're not already removing it and we don't need to keep
- # it, let's get rid of it
- listeners_to_remove.append(existing_listener_tuple)
-
- if listeners_to_remove:
- self._delete_elb_listeners(listeners_to_remove)
-
- if listeners_to_add:
- self._create_elb_listeners(listeners_to_add)
-
- def _api_listener_as_tuple(self, listener):
- """Adds ssl_certificate_id to ELB API tuple if present"""
- base_tuple = listener.get_complex_tuple()
- if listener.ssl_certificate_id and len(base_tuple) < 5:
- return base_tuple + (listener.ssl_certificate_id,)
- return base_tuple
-
- def _listener_as_tuple(self, listener):
- """Formats listener as a 4- or 5-tuples, in the order specified by the
- ELB API"""
- # N.B. string manipulations on protocols below (str(), upper()) is to
- # ensure format matches output from ELB API
- listener_list = [
- int(listener['load_balancer_port']),
- int(listener['instance_port']),
- str(listener['protocol'].upper()),
- ]
-
- # Instance protocol is not required by ELB API; it defaults to match
- # load balancer protocol. We'll mimic that behavior here
- if 'instance_protocol' in listener:
- listener_list.append(str(listener['instance_protocol'].upper()))
- else:
- listener_list.append(str(listener['protocol'].upper()))
-
- if 'ssl_certificate_id' in listener:
- listener_list.append(str(listener['ssl_certificate_id']))
-
- return tuple(listener_list)
-
- def _enable_zones(self, zones):
- try:
- self.elb.enable_zones(zones)
- except boto.exception.BotoServerError as e:
- self.module.fail_json(msg='unable to enable zones: %s' % e.message, exception=traceback.format_exc())
-
- self.changed = True
-
- def _disable_zones(self, zones):
- try:
- self.elb.disable_zones(zones)
- except boto.exception.BotoServerError as e:
- self.module.fail_json(msg='unable to disable zones: %s' % e.message, exception=traceback.format_exc())
- self.changed = True
-
- def _attach_subnets(self, subnets):
- self.elb_conn.attach_lb_to_subnets(self.name, subnets)
- self.changed = True
-
- def _detach_subnets(self, subnets):
- self.elb_conn.detach_lb_from_subnets(self.name, subnets)
- self.changed = True
-
- def _set_subnets(self):
- """Determine which subnets need to be attached or detached on the ELB"""
- if self.subnets:
- if self.purge_subnets:
- subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
- subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
- else:
- subnets_to_detach = None
- subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
-
- if subnets_to_attach:
- self._attach_subnets(subnets_to_attach)
- if subnets_to_detach:
- self._detach_subnets(subnets_to_detach)
-
- def _get_scheme(self):
- """Determine if the current scheme is different than the scheme of the ELB"""
- if self.scheme:
- if self.elb.scheme != self.scheme:
- if not self.wait:
- self.module.fail_json(msg="Unable to modify scheme without using the wait option")
- return True
- return False
-
- def _set_zones(self):
- """Determine which zones need to be enabled or disabled on the ELB"""
- if self.zones:
- if self.purge_zones:
- zones_to_disable = list(set(self.elb.availability_zones) -
- set(self.zones))
- zones_to_enable = list(set(self.zones) -
- set(self.elb.availability_zones))
- else:
- zones_to_disable = None
- zones_to_enable = list(set(self.zones) -
- set(self.elb.availability_zones))
- if zones_to_enable:
- self._enable_zones(zones_to_enable)
- # N.B. This must come second, in case it would have removed all zones
- if zones_to_disable:
- self._disable_zones(zones_to_disable)
-
- def _set_security_groups(self):
- if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids):
- self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
- self.changed = True
-
- def _set_health_check(self):
- """Set health check values on ELB as needed"""
- if self.health_check:
- # This just makes it easier to compare each of the attributes
- # and look for changes. Keys are attributes of the current
- # health_check; values are desired values of new health_check
- health_check_config = {
- "target": self._get_health_check_target(),
- "timeout": self.health_check['response_timeout'],
- "interval": self.health_check['interval'],
- "unhealthy_threshold": self.health_check['unhealthy_threshold'],
- "healthy_threshold": self.health_check['healthy_threshold'],
- }
-
- update_health_check = False
-
- # The health_check attribute is *not* set on newly created
- # ELBs! So we have to create our own.
- if not self.elb.health_check:
- self.elb.health_check = HealthCheck()
-
- for attr, desired_value in health_check_config.items():
- if getattr(self.elb.health_check, attr) != desired_value:
- setattr(self.elb.health_check, attr, desired_value)
- update_health_check = True
-
- if update_health_check:
- self.elb.configure_health_check(self.elb.health_check)
- self.changed = True
-
- def _check_attribute_support(self, attr):
- return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
-
- def _set_cross_az_load_balancing(self):
- attributes = self.elb.get_attributes()
- if self.cross_az_load_balancing:
- if not attributes.cross_zone_load_balancing.enabled:
- self.changed = True
- attributes.cross_zone_load_balancing.enabled = True
- else:
- if attributes.cross_zone_load_balancing.enabled:
- self.changed = True
- attributes.cross_zone_load_balancing.enabled = False
- self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
- attributes.cross_zone_load_balancing.enabled)
-
- def _set_access_log(self):
- attributes = self.elb.get_attributes()
- if self.access_logs:
- if 's3_location' not in self.access_logs:
- self.module.fail_json(msg='s3_location information required')
-
- access_logs_config = {
- "enabled": True,
- "s3_bucket_name": self.access_logs['s3_location'],
- "s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
- "emit_interval": self.access_logs.get('interval', 60),
- }
-
- update_access_logs_config = False
- for attr, desired_value in access_logs_config.items():
- if getattr(attributes.access_log, attr) != desired_value:
- setattr(attributes.access_log, attr, desired_value)
- update_access_logs_config = True
- if update_access_logs_config:
- self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
- self.changed = True
- elif attributes.access_log.enabled:
- attributes.access_log.enabled = False
- self.changed = True
- self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
-
- def _set_connection_draining_timeout(self):
- attributes = self.elb.get_attributes()
- if self.connection_draining_timeout is not None:
- if not attributes.connection_draining.enabled or \
- attributes.connection_draining.timeout != self.connection_draining_timeout:
- self.changed = True
- attributes.connection_draining.enabled = True
- attributes.connection_draining.timeout = self.connection_draining_timeout
- self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
- else:
- if attributes.connection_draining.enabled:
- self.changed = True
- attributes.connection_draining.enabled = False
- self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
-
- def _set_idle_timeout(self):
- attributes = self.elb.get_attributes()
- if self.idle_timeout is not None:
- if attributes.connecting_settings.idle_timeout != self.idle_timeout:
- self.changed = True
- attributes.connecting_settings.idle_timeout = self.idle_timeout
- self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
-
- def _policy_name(self, policy_type):
- return 'elb-classic-lb-{0}'.format(to_native(policy_type, errors='surrogate_or_strict'))
-
- def _create_policy(self, policy_param, policy_meth, policy):
- getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy)
-
- def _delete_policy(self, elb_name, policy):
- self.elb_conn.delete_lb_policy(elb_name, policy)
-
- def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
- self._delete_policy(self.elb.name, policy)
- self._create_policy(policy_param, policy_meth, policy)
-
- def _set_listener_policy(self, listeners_dict, policy=None):
- policy = [] if policy is None else policy
-
- for listener_port in listeners_dict:
- if listeners_dict[listener_port].startswith('HTTP'):
- self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
-
- def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
- for p in getattr(elb_info.policies, policy_attrs['attr']):
- if str(p.__dict__['policy_name']) == str(policy[0]):
- if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
- self._set_listener_policy(listeners_dict)
- self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
- self.changed = True
- break
- else:
- self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
- self.changed = True
-
- self._set_listener_policy(listeners_dict, policy)
-
- def select_stickiness_policy(self):
- if self.stickiness:
-
- if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
- self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
-
- elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
- d = {}
- for listener in elb_info.listeners:
- d[listener[0]] = listener[2]
- listeners_dict = d
-
- if self.stickiness['type'] == 'loadbalancer':
- policy = []
- policy_type = 'LBCookieStickinessPolicyType'
-
- if self.module.boolean(self.stickiness['enabled']):
-
- if 'expiration' not in self.stickiness:
- self.module.fail_json(msg='expiration must be set when type is loadbalancer')
-
- try:
- expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None
- except ValueError:
- self.module.fail_json(msg='expiration must be set to an integer')
-
- policy_attrs = {
- 'type': policy_type,
- 'attr': 'lb_cookie_stickiness_policies',
- 'method': 'create_lb_cookie_stickiness_policy',
- 'dict_key': 'cookie_expiration_period',
- 'param_value': expiration
- }
- policy.append(self._policy_name(policy_attrs['type']))
-
- self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
- elif not self.module.boolean(self.stickiness['enabled']):
- if len(elb_info.policies.lb_cookie_stickiness_policies):
- if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
- self.changed = True
- else:
- self.changed = False
- self._set_listener_policy(listeners_dict)
- self._delete_policy(self.elb.name, self._policy_name(policy_type))
-
- elif self.stickiness['type'] == 'application':
- policy = []
- policy_type = 'AppCookieStickinessPolicyType'
- if self.module.boolean(self.stickiness['enabled']):
-
- if 'cookie' not in self.stickiness:
- self.module.fail_json(msg='cookie must be set when type is application')
-
- policy_attrs = {
- 'type': policy_type,
- 'attr': 'app_cookie_stickiness_policies',
- 'method': 'create_app_cookie_stickiness_policy',
- 'dict_key': 'cookie_name',
- 'param_value': self.stickiness['cookie']
- }
- policy.append(self._policy_name(policy_attrs['type']))
- self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
- elif not self.module.boolean(self.stickiness['enabled']):
- if len(elb_info.policies.app_cookie_stickiness_policies):
- if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
- self.changed = True
- self._set_listener_policy(listeners_dict)
- self._delete_policy(self.elb.name, self._policy_name(policy_type))
-
- else:
- self._set_listener_policy(listeners_dict)
-
- def _get_backend_policies(self):
- """Get a list of backend policies"""
- policies = []
- if self.elb.backends is not None:
- for backend in self.elb.backends:
- if backend.policies is not None:
- for policy in backend.policies:
- policies.append(str(backend.instance_port) + ':' + policy.policy_name)
-
- return policies
-
- def _set_backend_policies(self):
- """Sets policies for all backends"""
- ensure_proxy_protocol = False
- replace = []
- backend_policies = self._get_backend_policies()
-
- # Find out what needs to be changed
- for listener in self.listeners:
- want = False
-
- if 'proxy_protocol' in listener and listener['proxy_protocol']:
- ensure_proxy_protocol = True
- want = True
-
- if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
- if not want:
- replace.append({'port': listener['instance_port'], 'policies': []})
- elif want:
- replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
-
- # enable or disable proxy protocol
- if ensure_proxy_protocol:
- self._set_proxy_protocol_policy()
-
- # Make the backend policies so
- for item in replace:
- self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
- self.changed = True
-
- def _get_proxy_protocol_policy(self):
- """Find out if the elb has a proxy protocol enabled"""
- if self.elb.policies is not None and self.elb.policies.other_policies is not None:
- for policy in self.elb.policies.other_policies:
- if policy.policy_name == 'ProxyProtocol-policy':
- return policy.policy_name
-
- return None
-
- def _set_proxy_protocol_policy(self):
- """Install a proxy protocol policy if needed"""
- proxy_policy = self._get_proxy_protocol_policy()
-
- if proxy_policy is None:
- self.elb_conn.create_lb_policy(
- self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
- )
- self.changed = True
-
- # TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
-
- def _diff_list(self, a, b):
- """Find the entries in list a that are not in list b"""
- b = set(b)
- return [aa for aa in a if aa not in b]
-
- def _get_instance_ids(self):
- """Get the current list of instance ids installed in the elb"""
- instances = []
- if self.elb.instances is not None:
- for instance in self.elb.instances:
- instances.append(instance.id)
-
- return instances
-
- def _set_instance_ids(self):
- """Register or deregister instances from an lb instance"""
- assert_instances = self.instance_ids or []
-
- has_instances = self._get_instance_ids()
-
- add_instances = self._diff_list(assert_instances, has_instances)
- if add_instances:
- self.elb_conn.register_instances(self.elb.name, add_instances)
- self.changed = True
-
- if self.purge_instance_ids:
- remove_instances = self._diff_list(has_instances, assert_instances)
- if remove_instances:
- self.elb_conn.deregister_instances(self.elb.name, remove_instances)
- self.changed = True
-
- def _set_tags(self):
- """Add/Delete tags"""
- if self.tags is None:
- return
-
- params = {'LoadBalancerNames.member.1': self.name}
-
- tagdict = dict()
-
- # get the current list of tags from the ELB, if ELB exists
- if self.elb:
- current_tags = self.elb_conn.get_list('DescribeTags', params,
- [('member', Tag)])
- tagdict = dict((tag.Key, tag.Value) for tag in current_tags
- if hasattr(tag, 'Key'))
-
- # Add missing tags
- dictact = dict(set(self.tags.items()) - set(tagdict.items()))
- if dictact:
- for i, key in enumerate(dictact):
- params['Tags.member.%d.Key' % (i + 1)] = key
- params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
-
- self.elb_conn.make_request('AddTags', params)
- self.changed = True
-
- # Remove extra tags
- dictact = dict(set(tagdict.items()) - set(self.tags.items()))
- if dictact:
- for i, key in enumerate(dictact):
- params['Tags.member.%d.Key' % (i + 1)] = key
-
- self.elb_conn.make_request('RemoveTags', params)
- self.changed = True
-
- def _get_health_check_target(self):
- """Compose target string from healthcheck parameters"""
- protocol = self.health_check['ping_protocol'].upper()
- path = ""
-
- if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
- path = self.health_check['ping_path']
-
- return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state={'required': True, 'choices': ['present', 'absent']},
- name={'required': True},
- listeners={'default': None, 'required': False, 'type': 'list'},
- purge_listeners={'default': True, 'required': False, 'type': 'bool'},
- instance_ids={'default': None, 'required': False, 'type': 'list'},
- purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
- zones={'default': None, 'required': False, 'type': 'list'},
- purge_zones={'default': False, 'required': False, 'type': 'bool'},
- security_group_ids={'default': None, 'required': False, 'type': 'list'},
- security_group_names={'default': None, 'required': False, 'type': 'list'},
- health_check={'default': None, 'required': False, 'type': 'dict'},
- subnets={'default': None, 'required': False, 'type': 'list'},
- purge_subnets={'default': False, 'required': False, 'type': 'bool'},
- scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']},
- connection_draining_timeout={'default': None, 'required': False, 'type': 'int'},
- idle_timeout={'default': None, 'type': 'int', 'required': False},
- cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False},
- stickiness={'default': None, 'required': False, 'type': 'dict'},
- access_logs={'default': None, 'required': False, 'type': 'dict'},
- wait={'default': False, 'type': 'bool', 'required': False},
- wait_timeout={'default': 60, 'type': 'int', 'required': False},
- tags={'default': None, 'required': False, 'type': 'dict'}
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- mutually_exclusive=[['security_group_ids', 'security_group_names']]
- )
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
- if not region:
- module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
-
- name = module.params['name']
- state = module.params['state']
- listeners = module.params['listeners']
- purge_listeners = module.params['purge_listeners']
- instance_ids = module.params['instance_ids']
- purge_instance_ids = module.params['purge_instance_ids']
- zones = module.params['zones']
- purge_zones = module.params['purge_zones']
- security_group_ids = module.params['security_group_ids']
- security_group_names = module.params['security_group_names']
- health_check = module.params['health_check']
- access_logs = module.params['access_logs']
- subnets = module.params['subnets']
- purge_subnets = module.params['purge_subnets']
- scheme = module.params['scheme']
- connection_draining_timeout = module.params['connection_draining_timeout']
- idle_timeout = module.params['idle_timeout']
- cross_az_load_balancing = module.params['cross_az_load_balancing']
- stickiness = module.params['stickiness']
- wait = module.params['wait']
- wait_timeout = module.params['wait_timeout']
- tags = module.params['tags']
-
- if state == 'present' and not listeners:
- module.fail_json(msg="At least one listener is required for ELB creation")
-
- if state == 'present' and not (zones or subnets):
- module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
-
- if wait_timeout > 600:
- module.fail_json(msg='wait_timeout maximum is 600 seconds')
-
- if security_group_names:
- security_group_ids = []
- try:
- ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
- if subnets: # We have at least one subnet, ergo this is a VPC
- vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
- vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
- filters = {'vpc_id': vpc_id}
- else:
- filters = None
- grp_details = ec2.get_all_security_groups(filters=filters)
-
- for group_name in security_group_names:
- if isinstance(group_name, string_types):
- group_name = [group_name]
-
- group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name]
- security_group_ids.extend(group_id)
- except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg=str(e))
-
- elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
- purge_zones, security_group_ids, health_check,
- subnets, purge_subnets, scheme,
- connection_draining_timeout, idle_timeout,
- cross_az_load_balancing,
- access_logs, stickiness, wait, wait_timeout, tags,
- region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
- **aws_connect_params)
-
- # check for unsupported attributes for this version of boto
- if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
- module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
-
- if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
- module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
-
- if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
- module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
-
- if state == 'present':
- elb_man.ensure_ok()
- elif state == 'absent':
- elb_man.ensure_gone()
-
- ansible_facts = {'ec2_elb': 'info'}
- ec2_facts_result = dict(changed=elb_man.changed,
- elb=elb_man.get_info(),
- ansible_facts=ansible_facts)
-
- module.exit_json(**ec2_facts_result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elb_classic_lb_info.py b/lib/ansible/modules/cloud/amazon/elb_classic_lb_info.py
deleted file mode 100644
index c32bf452ca..0000000000
--- a/lib/ansible/modules/cloud/amazon/elb_classic_lb_info.py
+++ /dev/null
@@ -1,217 +0,0 @@
-#!/usr/bin/python
-#
-# This is a free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This Ansible library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this library. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: elb_classic_lb_info
-short_description: Gather information about EC2 Elastic Load Balancers in AWS
-description:
- - Gather information about EC2 Elastic Load Balancers in AWS
- - This module was called C(elb_classic_lb_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.0"
-author:
- - "Michael Schultz (@mjschultz)"
- - "Fernando Jose Pando (@nand0p)"
-options:
- names:
- description:
- - List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned.
- type: list
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - botocore
- - boto3
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-# Output format tries to match ec2_elb_lb module input parameters
-
-# Gather information about all ELBs
-- elb_classic_lb_info:
- register: elb_info
-
-- debug:
- msg: "{{ item.dns_name }}"
- loop: "{{ elb_info.elbs }}"
-
-# Gather information about a particular ELB
-- elb_classic_lb_info:
- names: frontend-prod-elb
- register: elb_info
-
-- debug:
- msg: "{{ elb_info.elbs.0.dns_name }}"
-
-# Gather information about a set of ELBs
-- elb_classic_lb_info:
- names:
- - frontend-prod-elb
- - backend-prod-elb
- register: elb_info
-
-- debug:
- msg: "{{ item.dns_name }}"
- loop: "{{ elb_info.elbs }}"
-
-'''
-
-RETURN = '''
-elbs:
- description: a list of load balancers
- returned: always
- type: list
- sample:
- elbs:
- - attributes:
- access_log:
- enabled: false
- connection_draining:
- enabled: true
- timeout: 300
- connection_settings:
- idle_timeout: 60
- cross_zone_load_balancing:
- enabled: true
- availability_zones:
- - "us-east-1a"
- - "us-east-1b"
- - "us-east-1c"
- - "us-east-1d"
- - "us-east-1e"
- backend_server_description: []
- canonical_hosted_zone_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com
- canonical_hosted_zone_name_id: XXXXXXXXXXXXXX
- created_time: 2017-08-23T18:25:03.280000+00:00
- dns_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com
- health_check:
- healthy_threshold: 10
- interval: 30
- target: HTTP:80/index.html
- timeout: 5
- unhealthy_threshold: 2
- instances: []
- instances_inservice: []
- instances_inservice_count: 0
- instances_outofservice: []
- instances_outofservice_count: 0
- instances_unknownservice: []
- instances_unknownservice_count: 0
- listener_descriptions:
- - listener:
- instance_port: 80
- instance_protocol: HTTP
- load_balancer_port: 80
- protocol: HTTP
- policy_names: []
- load_balancer_name: test-lb
- policies:
- app_cookie_stickiness_policies: []
- lb_cookie_stickiness_policies: []
- other_policies: []
- scheme: internet-facing
- security_groups:
- - sg-29d13055
- source_security_group:
- group_name: default
- owner_alias: XXXXXXXXXXXX
- subnets:
- - subnet-XXXXXXXX
- - subnet-XXXXXXXX
- tags: {}
- vpc_id: vpc-c248fda4
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (
- AWSRetry,
- camel_dict_to_snake_dict,
- boto3_tag_list_to_ansible_dict
-)
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def list_elbs(connection, names):
- paginator = connection.get_paginator('describe_load_balancers')
- load_balancers = paginator.paginate(LoadBalancerNames=names).build_full_result().get('LoadBalancerDescriptions', [])
- results = []
-
- for lb in load_balancers:
- description = camel_dict_to_snake_dict(lb)
- name = lb['LoadBalancerName']
- instances = lb.get('Instances', [])
- description['tags'] = get_tags(connection, name)
- description['instances_inservice'], description['instances_inservice_count'] = lb_instance_health(connection, name, instances, 'InService')
- description['instances_outofservice'], description['instances_outofservice_count'] = lb_instance_health(connection, name, instances, 'OutOfService')
- description['instances_unknownservice'], description['instances_unknownservice_count'] = lb_instance_health(connection, name, instances, 'Unknown')
- description['attributes'] = get_lb_attributes(connection, name)
- results.append(description)
- return results
-
-
-def get_lb_attributes(connection, name):
- attributes = connection.describe_load_balancer_attributes(LoadBalancerName=name).get('LoadBalancerAttributes', {})
- return camel_dict_to_snake_dict(attributes)
-
-
-def get_tags(connection, load_balancer_name):
- tags = connection.describe_tags(LoadBalancerNames=[load_balancer_name])['TagDescriptions']
- if not tags:
- return {}
- return boto3_tag_list_to_ansible_dict(tags[0]['Tags'])
-
-
-def lb_instance_health(connection, load_balancer_name, instances, state):
- instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get('InstanceStates', [])
- instate = [instance['InstanceId'] for instance in instance_states if instance['State'] == state]
- return instate, len(instate)
-
-
-def main():
- argument_spec = dict(
- names={'default': [], 'type': 'list'}
- )
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True)
- if module._name == 'elb_classic_lb_facts':
- module.deprecate("The 'elb_classic_lb_facts' module has been renamed to 'elb_classic_lb_info'", version='2.13')
-
- connection = module.client('elb')
-
- try:
- elbs = list_elbs(connection, module.params.get('names'))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to get load balancer information.")
-
- module.exit_json(elbs=elbs)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elb_instance.py b/lib/ansible/modules/cloud/amazon/elb_instance.py
deleted file mode 100644
index bf2ed35f5b..0000000000
--- a/lib/ansible/modules/cloud/amazon/elb_instance.py
+++ /dev/null
@@ -1,376 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: elb_instance
-short_description: De-registers or registers instances from EC2 ELBs
-description:
- - This module de-registers or registers an AWS EC2 instance from the ELBs
- that it belongs to.
- - Returns fact "ec2_elbs" which is a list of elbs attached to the instance
- if state=absent is passed as an argument.
- - Will be marked changed when called only if there are ELBs found to operate on.
-version_added: "1.2"
-author: "John Jarvis (@jarv)"
-options:
- state:
- description:
- - register or deregister the instance
- required: true
- choices: ['present', 'absent']
- type: str
- instance_id:
- description:
- - EC2 Instance ID
- required: true
- type: str
- ec2_elbs:
- description:
- - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
- type: list
- enable_availability_zone:
- description:
- - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
- been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB.
- type: bool
- default: 'yes'
- wait:
- description:
- - Wait for instance registration or deregistration to complete successfully before returning.
- type: bool
- default: 'yes'
- validate_certs:
- description:
- - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
- type: bool
- default: 'yes'
- version_added: "1.5"
- wait_timeout:
- description:
- - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs.
- If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
- default: 0
- version_added: "1.6"
- type: int
-extends_documentation_fragment:
- - aws
- - ec2
-"""
-
-EXAMPLES = """
-# basic pre_task and post_task example
-pre_tasks:
- - name: Gathering ec2 facts
- action: ec2_facts
- - name: Instance De-register
- elb_instance:
- instance_id: "{{ ansible_ec2_instance_id }}"
- state: absent
- delegate_to: localhost
-roles:
- - myrole
-post_tasks:
- - name: Instance Register
- elb_instance:
- instance_id: "{{ ansible_ec2_instance_id }}"
- ec2_elbs: "{{ item }}"
- state: present
- delegate_to: localhost
- loop: "{{ ec2_elbs }}"
-"""
-
-import time
-
-try:
- import boto
- import boto.ec2
- import boto.ec2.autoscale
- import boto.ec2.elb
- from boto.regioninfo import RegionInfo
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec,
- get_aws_connection_info)
-
-
-class ElbManager:
- """Handles EC2 instance ELB registration and de-registration"""
-
- def __init__(self, module, instance_id=None, ec2_elbs=None,
- region=None, **aws_connect_params):
- self.module = module
- self.instance_id = instance_id
- self.region = region
- self.aws_connect_params = aws_connect_params
- self.lbs = self._get_instance_lbs(ec2_elbs)
- self.changed = False
-
- def deregister(self, wait, timeout):
- """De-register the instance from all ELBs and wait for the ELB
- to report it out-of-service"""
-
- for lb in self.lbs:
- initial_state = self._get_instance_health(lb)
- if initial_state is None:
- # Instance isn't registered with this load
- # balancer. Ignore it and try the next one.
- continue
-
- lb.deregister_instances([self.instance_id])
-
- # The ELB is changing state in some way. Either an instance that's
- # InService is moving to OutOfService, or an instance that's
- # already OutOfService is being deregistered.
- self.changed = True
-
- if wait:
- self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
-
- def register(self, wait, enable_availability_zone, timeout):
- """Register the instance for all ELBs and wait for the ELB
- to report the instance in-service"""
- for lb in self.lbs:
- initial_state = self._get_instance_health(lb)
-
- if enable_availability_zone:
- self._enable_availailability_zone(lb)
-
- lb.register_instances([self.instance_id])
-
- if wait:
- self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
- else:
- # We cannot assume no change was made if we don't wait
- # to find out
- self.changed = True
-
- def exists(self, lbtest):
- """ Verify that the named ELB actually exists """
-
- found = False
- for lb in self.lbs:
- if lb.name == lbtest:
- found = True
- break
- return found
-
- def _enable_availailability_zone(self, lb):
- """Enable the current instance's availability zone in the provided lb.
- Returns True if the zone was enabled or False if no change was made.
- lb: load balancer"""
- instance = self._get_instance()
- if instance.placement in lb.availability_zones:
- return False
-
- lb.enable_zones(zones=instance.placement)
-
- # If successful, the new zone will have been added to
- # lb.availability_zones
- return instance.placement in lb.availability_zones
-
- def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
- """Wait for an ELB to change state
- lb: load balancer
- awaited_state : state to poll for (string)"""
-
- wait_timeout = time.time() + timeout
- while True:
- instance_state = self._get_instance_health(lb)
-
- if not instance_state:
- msg = ("The instance %s could not be put in service on %s."
- " Reason: Invalid Instance")
- self.module.fail_json(msg=msg % (self.instance_id, lb))
-
- if instance_state.state == awaited_state:
- # Check the current state against the initial state, and only set
- # changed if they are different.
- if (initial_state is None) or (instance_state.state != initial_state.state):
- self.changed = True
- break
- elif self._is_instance_state_pending(instance_state):
- # If it's pending, we'll skip further checks and continue waiting
- pass
- elif (awaited_state == 'InService'
- and instance_state.reason_code == "Instance"
- and time.time() >= wait_timeout):
- # If the reason_code for the instance being out of service is
- # "Instance" this indicates a failure state, e.g. the instance
- # has failed a health check or the ELB does not have the
- # instance's availability zone enabled. The exact reason why is
- # described in InstantState.description.
- msg = ("The instance %s could not be put in service on %s."
- " Reason: %s")
- self.module.fail_json(msg=msg % (self.instance_id,
- lb,
- instance_state.description))
- time.sleep(1)
-
- def _is_instance_state_pending(self, instance_state):
- """
- Determines whether the instance_state is "pending", meaning there is
- an operation under way to bring it in service.
- """
- # This is messy, because AWS provides no way to distinguish between
- # an instance that is is OutOfService because it's pending vs. OutOfService
- # because it's failing health checks. So we're forced to analyze the
- # description, which is likely to be brittle.
- return (instance_state and 'pending' in instance_state.description)
-
- def _get_instance_health(self, lb):
- """
- Check instance health, should return status object or None under
- certain error conditions.
- """
- try:
- status = lb.get_instance_health([self.instance_id])[0]
- except boto.exception.BotoServerError as e:
- if e.error_code == 'InvalidInstance':
- return None
- else:
- raise
- return status
-
- def _get_instance_lbs(self, ec2_elbs=None):
- """Returns a list of ELBs attached to self.instance_id
- ec2_elbs: an optional list of elb names that will be used
- for elb lookup instead of returning what elbs
- are attached to self.instance_id"""
-
- if not ec2_elbs:
- ec2_elbs = self._get_auto_scaling_group_lbs()
-
- try:
- elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- self.module.fail_json(msg=str(e))
-
- elbs = []
- marker = None
- while True:
- try:
- newelbs = elb.get_all_load_balancers(marker=marker)
- marker = newelbs.next_marker
- elbs.extend(newelbs)
- if not marker:
- break
- except TypeError:
- # Older version of boto do not allow for params
- elbs = elb.get_all_load_balancers()
- break
-
- if ec2_elbs:
- lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
- else:
- lbs = []
- for lb in elbs:
- for info in lb.instances:
- if self.instance_id == info.id:
- lbs.append(lb)
- return lbs
-
- def _get_auto_scaling_group_lbs(self):
- """Returns a list of ELBs associated with self.instance_id
- indirectly through its auto scaling group membership"""
-
- try:
- asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- self.module.fail_json(msg=str(e))
-
- asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
- if len(asg_instances) > 1:
- self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
-
- if not asg_instances:
- asg_elbs = []
- else:
- asg_name = asg_instances[0].group_name
-
- asgs = asg.get_all_groups([asg_name])
- if len(asg_instances) != 1:
- self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
-
- asg_elbs = asgs[0].load_balancers
-
- return asg_elbs
-
- def _get_instance(self):
- """Returns a boto.ec2.InstanceObject for self.instance_id"""
- try:
- ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- self.module.fail_json(msg=str(e))
- return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state={'required': True, 'choices': ['present', 'absent']},
- instance_id={'required': True},
- ec2_elbs={'default': None, 'required': False, 'type': 'list'},
- enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
- wait={'required': False, 'default': True, 'type': 'bool'},
- wait_timeout={'required': False, 'default': 0, 'type': 'int'}
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
-
- if not region:
- module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
-
- ec2_elbs = module.params['ec2_elbs']
- wait = module.params['wait']
- enable_availability_zone = module.params['enable_availability_zone']
- timeout = module.params['wait_timeout']
-
- if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
- module.fail_json(msg="ELBs are required for registration")
-
- instance_id = module.params['instance_id']
- elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params)
-
- if ec2_elbs is not None:
- for elb in ec2_elbs:
- if not elb_man.exists(elb):
- msg = "ELB %s does not exist" % elb
- module.fail_json(msg=msg)
-
- if not module.check_mode:
- if module.params['state'] == 'present':
- elb_man.register(wait, enable_availability_zone, timeout)
- elif module.params['state'] == 'absent':
- elb_man.deregister(wait, timeout)
-
- ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
- ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
-
- module.exit_json(**ec2_facts_result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elb_network_lb.py b/lib/ansible/modules/cloud/amazon/elb_network_lb.py
deleted file mode 100644
index 141223ce56..0000000000
--- a/lib/ansible/modules/cloud/amazon/elb_network_lb.py
+++ /dev/null
@@ -1,469 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2018, Rob White (@wimnat)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: elb_network_lb
-short_description: Manage a Network Load Balancer
-description:
- - Manage an AWS Network Elastic Load Balancer. See
- U(https://aws.amazon.com/blogs/aws/new-network-load-balancer-effortless-scaling-to-millions-of-requests-per-second/) for details.
-version_added: "2.6"
-requirements: [ boto3 ]
-author: "Rob White (@wimnat)"
-options:
- cross_zone_load_balancing:
- description:
- - Indicates whether cross-zone load balancing is enabled.
- default: false
- type: bool
- deletion_protection:
- description:
- - Indicates whether deletion protection for the ELB is enabled.
- default: false
- type: bool
- listeners:
- description:
- - A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys
- are CamelCased.
- type: list
- elements: dict
- suboptions:
- Port:
- description: The port on which the load balancer is listening.
- type: int
- required: true
- Protocol:
- description: The protocol for connections from clients to the load balancer.
- type: str
- required: true
- Certificates:
- description: The SSL server certificate.
- type: list
- elements: dict
- suboptions:
- CertificateArn:
- description: The Amazon Resource Name (ARN) of the certificate.
- type: str
- SslPolicy:
- description: The security policy that defines which ciphers and protocols are supported.
- type: str
- DefaultActions:
- description: The default actions for the listener.
- required: true
- type: list
- elements: dict
- suboptions:
- Type:
- description: The type of action.
- type: str
- TargetGroupArn:
- description: The Amazon Resource Name (ARN) of the target group.
- type: str
- name:
- description:
- - The name of the load balancer. This name must be unique within your AWS account, can have a maximum of 32 characters, must contain only alphanumeric
- characters or hyphens, and must not begin or end with a hyphen.
- required: true
- type: str
- purge_listeners:
- description:
- - If I(purge_listeners=true), existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter.
- - If the I(listeners) parameter is not set then listeners will not be modified.
- default: true
- type: bool
- purge_tags:
- description:
- - If I(purge_tags=true), existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter.
- - If the I(tags) parameter is not set then tags will not be modified.
- default: true
- type: bool
- subnet_mappings:
- description:
- - A list of dicts containing the IDs of the subnets to attach to the load balancer. You can also specify the allocation ID of an Elastic IP
- to attach to the load balancer. You can specify one Elastic IP address per subnet.
- - This parameter is mutually exclusive with I(subnets).
- type: list
- elements: dict
- subnets:
- description:
- - A list of the IDs of the subnets to attach to the load balancer. You can specify only one subnet per Availability Zone. You must specify subnets from
- at least two Availability Zones.
- - Required when I(state=present).
- - This parameter is mutually exclusive with I(subnet_mappings).
- type: list
- scheme:
- description:
- - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation.
- default: internet-facing
- choices: [ 'internet-facing', 'internal' ]
- type: str
- state:
- description:
- - Create or destroy the load balancer.
- - The current default is C(absent). However, this behavior is inconsistent with other modules
- and as such the default will change to C(present) in 2.14.
- To maintain the existing behavior explicitly set I(state=absent).
- choices: [ 'present', 'absent' ]
- type: str
- tags:
- description:
- - A dictionary of one or more tags to assign to the load balancer.
- type: dict
- wait:
- description:
- - Whether or not to wait for the network load balancer to reach the desired state.
- type: bool
- wait_timeout:
- description:
- - The duration in seconds to wait, used in conjunction with I(wait).
- type: int
-extends_documentation_fragment:
- - aws
- - ec2
-notes:
- - Listeners are matched based on port. If a listener's port is changed then a new listener will be created.
- - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created.
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Create an ELB and attach a listener
-- elb_network_lb:
- name: myelb
- subnets:
- - subnet-012345678
- - subnet-abcdef000
- listeners:
- - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive).
- Port: 80 # Required. The port on which the load balancer is listening.
- DefaultActions:
- - Type: forward # Required. Only 'forward' is accepted at this time
- TargetGroupName: mytargetgroup # Required. The name of the target group
- state: present
-
-# Create an ELB with an attached Elastic IP address
-- elb_network_lb:
- name: myelb
- subnet_mappings:
- - SubnetId: subnet-012345678
- AllocationId: eipalloc-aabbccdd
- listeners:
- - Protocol: TCP # Required. The protocol for connections from clients to the load balancer (TCP, TLS, UDP or TCP_UDP) (case-sensitive).
- Port: 80 # Required. The port on which the load balancer is listening.
- DefaultActions:
- - Type: forward # Required. Only 'forward' is accepted at this time
- TargetGroupName: mytargetgroup # Required. The name of the target group
- state: present
-
-# Remove an ELB
-- elb_network_lb:
- name: myelb
- state: absent
-
-'''
-
-RETURN = '''
-availability_zones:
- description: The Availability Zones for the load balancer.
- returned: when state is present
- type: list
- sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a', 'load_balancer_addresses': []}]"
-canonical_hosted_zone_id:
- description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
- returned: when state is present
- type: str
- sample: ABCDEF12345678
-created_time:
- description: The date and time the load balancer was created.
- returned: when state is present
- type: str
- sample: "2015-02-12T02:14:02+00:00"
-deletion_protection_enabled:
- description: Indicates whether deletion protection is enabled.
- returned: when state is present
- type: str
- sample: true
-dns_name:
- description: The public DNS name of the load balancer.
- returned: when state is present
- type: str
- sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
-idle_timeout_timeout_seconds:
- description: The idle timeout value, in seconds.
- returned: when state is present
- type: str
- sample: 60
-ip_address_type:
- description: The type of IP addresses used by the subnets for the load balancer.
- returned: when state is present
- type: str
- sample: ipv4
-listeners:
- description: Information about the listeners.
- returned: when state is present
- type: complex
- contains:
- listener_arn:
- description: The Amazon Resource Name (ARN) of the listener.
- returned: when state is present
- type: str
- sample: ""
- load_balancer_arn:
- description: The Amazon Resource Name (ARN) of the load balancer.
- returned: when state is present
- type: str
- sample: ""
- port:
- description: The port on which the load balancer is listening.
- returned: when state is present
- type: int
- sample: 80
- protocol:
- description: The protocol for connections from clients to the load balancer.
- returned: when state is present
- type: str
- sample: HTTPS
- certificates:
- description: The SSL server certificate.
- returned: when state is present
- type: complex
- contains:
- certificate_arn:
- description: The Amazon Resource Name (ARN) of the certificate.
- returned: when state is present
- type: str
- sample: ""
- ssl_policy:
- description: The security policy that defines which ciphers and protocols are supported.
- returned: when state is present
- type: str
- sample: ""
- default_actions:
- description: The default actions for the listener.
- returned: when state is present
- type: str
- contains:
- type:
- description: The type of action.
- returned: when state is present
- type: str
- sample: ""
- target_group_arn:
- description: The Amazon Resource Name (ARN) of the target group.
- returned: when state is present
- type: str
- sample: ""
-load_balancer_arn:
- description: The Amazon Resource Name (ARN) of the load balancer.
- returned: when state is present
- type: str
- sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455
-load_balancer_name:
- description: The name of the load balancer.
- returned: when state is present
- type: str
- sample: my-elb
-load_balancing_cross_zone_enabled:
- description: Indicates whether cross-zone load balancing is enabled.
- returned: when state is present
- type: str
- sample: true
-scheme:
- description: Internet-facing or internal load balancer.
- returned: when state is present
- type: str
- sample: internal
-state:
- description: The state of the load balancer.
- returned: when state is present
- type: dict
- sample: "{'code': 'active'}"
-tags:
- description: The tags attached to the load balancer.
- returned: when state is present
- type: dict
- sample: "{
- 'Tag': 'Example'
- }"
-type:
- description: The type of load balancer.
- returned: when state is present
- type: str
- sample: network
-vpc_id:
- description: The ID of the VPC for the load balancer.
- returned: when state is present
- type: str
- sample: vpc-0011223344
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags
-from ansible.module_utils.aws.elbv2 import NetworkLoadBalancer, ELBListeners, ELBListener
-
-
-def create_or_update_elb(elb_obj):
- """Create ELB or modify main attributes. json_exit here"""
-
- if elb_obj.elb:
- # ELB exists so check subnets, security groups and tags match what has been passed
-
- # Subnets
- if not elb_obj.compare_subnets():
- elb_obj.modify_subnets()
-
- # Tags - only need to play with tags if tags parameter has been set to something
- if elb_obj.tags is not None:
-
- # Delete necessary tags
- tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']),
- boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags)
- if tags_to_delete:
- elb_obj.delete_tags(tags_to_delete)
-
- # Add/update tags
- if tags_need_modify:
- elb_obj.modify_tags()
-
- else:
- # Create load balancer
- elb_obj.create_elb()
-
- # ELB attributes
- elb_obj.update_elb_attributes()
- elb_obj.modify_elb_attributes()
-
- # Listeners
- listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'])
-
- listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners()
-
- # Delete listeners
- for listener_to_delete in listeners_to_delete:
- listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn'])
- listener_obj.delete()
- listeners_obj.changed = True
-
- # Add listeners
- for listener_to_add in listeners_to_add:
- listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn'])
- listener_obj.add()
- listeners_obj.changed = True
-
- # Modify listeners
- for listener_to_modify in listeners_to_modify:
- listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn'])
- listener_obj.modify()
- listeners_obj.changed = True
-
- # If listeners changed, mark ELB as changed
- if listeners_obj.changed:
- elb_obj.changed = True
-
- # Get the ELB again
- elb_obj.update()
-
- # Get the ELB listeners again
- listeners_obj.update()
-
- # Update the ELB attributes
- elb_obj.update_elb_attributes()
-
- # Convert to snake_case and merge in everything we want to return to the user
- snaked_elb = camel_dict_to_snake_dict(elb_obj.elb)
- snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes))
- snaked_elb['listeners'] = []
- for listener in listeners_obj.current_listeners:
- snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener))
-
- # Change tags to ansible friendly dict
- snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags'])
-
- elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb)
-
-
-def delete_elb(elb_obj):
-
- if elb_obj.elb:
- elb_obj.delete()
-
- elb_obj.module.exit_json(changed=elb_obj.changed)
-
-
-def main():
-
- argument_spec = (
- dict(
- cross_zone_load_balancing=dict(type='bool'),
- deletion_protection=dict(type='bool'),
- listeners=dict(type='list',
- elements='dict',
- options=dict(
- Protocol=dict(type='str', required=True),
- Port=dict(type='int', required=True),
- SslPolicy=dict(type='str'),
- Certificates=dict(type='list'),
- DefaultActions=dict(type='list', required=True)
- )
- ),
- name=dict(required=True, type='str'),
- purge_listeners=dict(default=True, type='bool'),
- purge_tags=dict(default=True, type='bool'),
- subnets=dict(type='list'),
- subnet_mappings=dict(type='list'),
- scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']),
- state=dict(choices=['present', 'absent'], type='str'),
- tags=dict(type='dict'),
- wait_timeout=dict(type='int'),
- wait=dict(type='bool')
- )
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- mutually_exclusive=[['subnets', 'subnet_mappings']])
-
- # Check for subnets or subnet_mappings if state is present
- state = module.params.get("state")
- if state == 'present':
- if module.params.get("subnets") is None and module.params.get("subnet_mappings") is None:
- module.fail_json(msg="'subnets' or 'subnet_mappings' is required when state=present")
-
- if state is None:
- # See below, unless state==present we delete. Ouch.
- module.deprecate('State currently defaults to absent. This is inconsistent with other modules'
- ' and the default will be changed to `present` in Ansible 2.14',
- version='2.14')
-
- # Quick check of listeners parameters
- listeners = module.params.get("listeners")
- if listeners is not None:
- for listener in listeners:
- for key in listener.keys():
- protocols_list = ['TCP', 'TLS', 'UDP', 'TCP_UDP']
- if key == 'Protocol' and listener[key] not in protocols_list:
- module.fail_json(msg="'Protocol' must be either " + ", ".join(protocols_list))
-
- connection = module.client('elbv2')
- connection_ec2 = module.client('ec2')
-
- elb = NetworkLoadBalancer(connection, connection_ec2, module)
-
- if state == 'present':
- create_or_update_elb(elb)
- else:
- delete_elb(elb)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elb_target.py b/lib/ansible/modules/cloud/amazon/elb_target.py
deleted file mode 100644
index acb7c590dd..0000000000
--- a/lib/ansible/modules/cloud/amazon/elb_target.py
+++ /dev/null
@@ -1,354 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'status': ['preview'],
- 'supported_by': 'community',
- 'metadata_version': '1.1'}
-
-DOCUMENTATION = '''
----
-module: elb_target
-short_description: Manage a target in a target group
-description:
- - Used to register or deregister a target in a target group
-version_added: "2.5"
-author: "Rob White (@wimnat)"
-options:
- deregister_unused:
- description:
- - The default behaviour for targets that are unused is to leave them registered. If instead you would like to remove them
- set I(deregister_unused) to yes.
- type: bool
- target_az:
- description:
- - An Availability Zone or all. This determines whether the target receives traffic from the load balancer nodes in the specified
- Availability Zone or from all enabled Availability Zones for the load balancer. This parameter is not supported if the target
- type of the target group is instance.
- type: str
- target_group_arn:
- description:
- - The Amazon Resource Name (ARN) of the target group. Mutually exclusive of I(target_group_name).
- type: str
- target_group_name:
- description:
- - The name of the target group. Mutually exclusive of I(target_group_arn).
- type: str
- target_id:
- description:
- - The ID of the target.
- required: true
- type: str
- target_port:
- description:
- - The port on which the target is listening. You can specify a port override. If a target is already registered,
- you can register it again using a different port.
- - The default port for a target is the port for the target group.
- required: false
- type: int
- target_status:
- description:
- - Blocks and waits for the target status to equal given value. For more detail on target status see
- U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-health-checks.html#target-health-states)
- required: false
- choices: [ 'initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable' ]
- type: str
- target_status_timeout:
- description:
- - Maximum time in seconds to wait for target_status change
- required: false
- default: 60
- type: int
- state:
- description:
- - Register or deregister the target.
- required: true
- choices: [ 'present', 'absent' ]
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-notes:
- - If you specified a port override when you registered a target, you must specify both the target ID and the port when you deregister it.
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Register an IP address target to a target group
-- elb_target:
- target_group_name: myiptargetgroup
- target_id: i-1234567
- state: present
-
-# Register an instance target to a target group
-- elb_target:
- target_group_name: mytargetgroup
- target_id: i-1234567
- state: present
-
-# Deregister a target from a target group
-- elb_target:
- target_group_name: mytargetgroup
- target_id: i-1234567
- state: absent
-
-# Modify a target to use a different port
-# Register a target to a target group
-- elb_target:
- target_group_name: mytargetgroup
- target_id: i-1234567
- target_port: 8080
- state: present
-
-'''
-
-RETURN = '''
-
-'''
-
-import traceback
-from time import time, sleep
-from ansible.module_utils._text import to_native
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (boto3_conn, camel_dict_to_snake_dict,
- ec2_argument_spec, get_aws_connection_info,
- AWSRetry)
-
-try:
- import boto3
- from botocore.exceptions import ClientError, BotoCoreError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-
-@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound'])
-def describe_target_groups_with_backoff(connection, tg_name):
- return connection.describe_target_groups(Names=[tg_name])
-
-
-def convert_tg_name_to_arn(connection, module, tg_name):
-
- try:
- response = describe_target_groups_with_backoff(connection, tg_name)
- except ClientError as e:
- module.fail_json(msg="Unable to describe target group {0}: {1}".format(tg_name, to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except BotoCoreError as e:
- module.fail_json(msg="Unable to describe target group {0}: {1}".format(tg_name, to_native(e)),
- exception=traceback.format_exc())
-
- tg_arn = response['TargetGroups'][0]['TargetGroupArn']
-
- return tg_arn
-
-
-@AWSRetry.jittered_backoff(retries=10, delay=10, catch_extra_error_codes=['TargetGroupNotFound'])
-def describe_targets_with_backoff(connection, tg_arn, target):
- if target is None:
- tg = []
- else:
- tg = [target]
-
- return connection.describe_target_health(TargetGroupArn=tg_arn, Targets=tg)
-
-
-def describe_targets(connection, module, tg_arn, target=None):
-
- """
- Describe targets in a target group
-
- :param module: ansible module object
- :param connection: boto3 connection
- :param tg_arn: target group arn
- :param target: dictionary containing target id and port
- :return:
- """
-
- try:
- targets = describe_targets_with_backoff(connection, tg_arn, target)['TargetHealthDescriptions']
- if not targets:
- return {}
- return targets[0]
- except ClientError as e:
- module.fail_json(msg="Unable to describe target health for target {0}: {1}".format(target, to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except BotoCoreError as e:
- module.fail_json(msg="Unable to describe target health for target {0}: {1}".format(target, to_native(e)),
- exception=traceback.format_exc())
-
-
-@AWSRetry.jittered_backoff(retries=10, delay=10)
-def register_target_with_backoff(connection, target_group_arn, target):
- connection.register_targets(TargetGroupArn=target_group_arn, Targets=[target])
-
-
-def register_target(connection, module):
-
- """
- Registers a target to a target group
-
- :param module: ansible module object
- :param connection: boto3 connection
- :return:
- """
-
- target_az = module.params.get("target_az")
- target_group_arn = module.params.get("target_group_arn")
- target_id = module.params.get("target_id")
- target_port = module.params.get("target_port")
- target_status = module.params.get("target_status")
- target_status_timeout = module.params.get("target_status_timeout")
- changed = False
-
- if not target_group_arn:
- target_group_arn = convert_tg_name_to_arn(connection, module, module.params.get("target_group_name"))
-
- target = dict(Id=target_id)
- if target_az:
- target['AvailabilityZone'] = target_az
- if target_port:
- target['Port'] = target_port
-
- target_description = describe_targets(connection, module, target_group_arn, target)
-
- if 'Reason' in target_description['TargetHealth']:
- if target_description['TargetHealth']['Reason'] == "Target.NotRegistered":
- try:
- register_target_with_backoff(connection, target_group_arn, target)
- changed = True
- if target_status:
- target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout)
- except ClientError as e:
- module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except BotoCoreError as e:
- module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)),
- exception=traceback.format_exc())
-
- # Get all targets for the target group
- target_descriptions = describe_targets(connection, module, target_group_arn)
-
- module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn)
-
-
-@AWSRetry.jittered_backoff(retries=10, delay=10)
-def deregister_target_with_backoff(connection, target_group_arn, target):
- connection.deregister_targets(TargetGroupArn=target_group_arn, Targets=[target])
-
-
-def deregister_target(connection, module):
-
- """
- Deregisters a target to a target group
-
- :param module: ansible module object
- :param connection: boto3 connection
- :return:
- """
-
- deregister_unused = module.params.get("deregister_unused")
- target_group_arn = module.params.get("target_group_arn")
- target_id = module.params.get("target_id")
- target_port = module.params.get("target_port")
- target_status = module.params.get("target_status")
- target_status_timeout = module.params.get("target_status_timeout")
- changed = False
-
- if not target_group_arn:
- target_group_arn = convert_tg_name_to_arn(connection, module, module.params.get("target_group_name"))
-
- target = dict(Id=target_id)
- if target_port:
- target['Port'] = target_port
-
- target_description = describe_targets(connection, module, target_group_arn, target)
- current_target_state = target_description['TargetHealth']['State']
- current_target_reason = target_description['TargetHealth'].get('Reason')
-
- needs_deregister = False
-
- if deregister_unused and current_target_state == 'unused':
- if current_target_reason != 'Target.NotRegistered':
- needs_deregister = True
- elif current_target_state not in ['unused', 'draining']:
- needs_deregister = True
-
- if needs_deregister:
- try:
- deregister_target_with_backoff(connection, target_group_arn, target)
- changed = True
- except ClientError as e:
- module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except BotoCoreError as e:
- module.fail_json(msg="Unable to deregister target {0}: {1}".format(target, to_native(e)),
- exception=traceback.format_exc())
- else:
- if current_target_reason != 'Target.NotRegistered' and current_target_state != 'draining':
- module.warn(warning="Your specified target has an 'unused' state but is still registered to the target group. " +
- "To force deregistration use the 'deregister_unused' option.")
-
- if target_status:
- target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout)
-
- # Get all targets for the target group
- target_descriptions = describe_targets(connection, module, target_group_arn)
-
- module.exit_json(changed=changed, target_health_descriptions=camel_dict_to_snake_dict(target_descriptions), target_group_arn=target_group_arn)
-
-
-def target_status_check(connection, module, target_group_arn, target, target_status, target_status_timeout):
- reached_state = False
- timeout = target_status_timeout + time()
- while time() < timeout:
- health_state = describe_targets(connection, module, target_group_arn, target)['TargetHealth']['State']
- if health_state == target_status:
- reached_state = True
- break
- sleep(1)
- if not reached_state:
- module.fail_json(msg='Status check timeout of {0} exceeded, last status was {1}: '.format(target_status_timeout, health_state))
-
-
-def main():
-
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- deregister_unused=dict(type='bool', default=False),
- target_az=dict(type='str'),
- target_group_arn=dict(type='str'),
- target_group_name=dict(type='str'),
- target_id=dict(type='str', required=True),
- target_port=dict(type='int'),
- target_status=dict(choices=['initial', 'healthy', 'unhealthy', 'unused', 'draining', 'unavailable'], type='str'),
- target_status_timeout=dict(type='int', default=60),
- state=dict(required=True, choices=['present', 'absent'], type='str'),
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- mutually_exclusive=[['target_group_arn', 'target_group_name']]
- )
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
- connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)
-
- state = module.params.get("state")
-
- if state == 'present':
- register_target(connection, module)
- else:
- deregister_target(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elb_target_group.py b/lib/ansible/modules/cloud/amazon/elb_target_group.py
deleted file mode 100644
index d8d85a2bf6..0000000000
--- a/lib/ansible/modules/cloud/amazon/elb_target_group.py
+++ /dev/null
@@ -1,860 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: elb_target_group
-short_description: Manage a target group for an Application or Network load balancer
-description:
- - Manage an AWS Elastic Load Balancer target group. See
- U(https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html) or
- U(https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html) for details.
-version_added: "2.4"
-requirements: [ boto3 ]
-author: "Rob White (@wimnat)"
-options:
- deregistration_delay_timeout:
- description:
- - The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
- The range is 0-3600 seconds.
- type: int
- health_check_protocol:
- description:
- - The protocol the load balancer uses when performing health checks on targets.
- required: false
- choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP']
- type: str
- health_check_port:
- description:
- - The port the load balancer uses when performing health checks on targets.
- Can be set to 'traffic-port' to match target port.
- - When not defined will default to the port on which each target receives traffic from the load balancer.
- required: false
- type: str
- health_check_path:
- description:
- - The ping path that is the destination on the targets for health checks. The path must be defined in order to set a health check.
- - Requires the I(health_check_protocol) parameter to be set.
- required: false
- type: str
- health_check_interval:
- description:
- - The approximate amount of time, in seconds, between health checks of an individual target.
- required: false
- type: int
- health_check_timeout:
- description:
- - The amount of time, in seconds, during which no response from a target means a failed health check.
- required: false
- type: int
- healthy_threshold_count:
- description:
- - The number of consecutive health checks successes required before considering an unhealthy target healthy.
- required: false
- type: int
- modify_targets:
- description:
- - Whether or not to alter existing targets in the group to match what is passed with the module
- required: false
- default: yes
- type: bool
- name:
- description:
- - The name of the target group.
- required: true
- type: str
- port:
- description:
- - The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target. Required if
- I(state) is C(present).
- required: false
- type: int
- protocol:
- description:
- - The protocol to use for routing traffic to the targets. Required when I(state) is C(present).
- required: false
- choices: [ 'http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP', 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP']
- type: str
- purge_tags:
- description:
- - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the tag parameter is not set then
- tags will not be modified.
- required: false
- default: yes
- type: bool
- state:
- description:
- - Create or destroy the target group.
- required: true
- choices: [ 'present', 'absent' ]
- type: str
- stickiness_enabled:
- description:
- - Indicates whether sticky sessions are enabled.
- type: bool
- stickiness_lb_cookie_duration:
- description:
- - The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load
- balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds).
- type: int
- stickiness_type:
- description:
- - The type of sticky sessions. The possible value is lb_cookie.
- default: lb_cookie
- type: str
- successful_response_codes:
- description:
- - The HTTP codes to use when checking for a successful response from a target.
- - Accepts multiple values (for example, "200,202") or a range of values (for example, "200-299").
- - Requires the I(health_check_protocol) parameter to be set.
- required: false
- type: str
- tags:
- description:
- - A dictionary of one or more tags to assign to the target group.
- required: false
- type: dict
- target_type:
- description:
- - The type of target that you must specify when registering targets with this target group. The possible values are
- C(instance) (targets are specified by instance ID), C(ip) (targets are specified by IP address) or C(lambda) (target is specified by ARN).
- Note that you can't specify targets for a target group using more than one type. Target type lambda only accept one target. When more than
- one target is specified, only the first one is used. All additional targets are ignored.
- If the target type is ip, specify IP addresses from the subnets of the virtual private cloud (VPC) for the target
- group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10).
- You can't specify publicly routable IP addresses.
- - The default behavior is C(instance).
- required: false
- choices: ['instance', 'ip', 'lambda']
- version_added: 2.5
- type: str
- targets:
- description:
- - A list of targets to assign to the target group. This parameter defaults to an empty list. Unless you set the 'modify_targets' parameter then
- all existing targets will be removed from the group. The list should be an Id and a Port parameter. See the Examples for detail.
- required: false
- type: list
- unhealthy_threshold_count:
- description:
- - The number of consecutive health check failures required before considering a target unhealthy.
- required: false
- type: int
- vpc_id:
- description:
- - The identifier of the virtual private cloud (VPC). Required when I(state) is C(present).
- required: false
- type: str
- wait:
- description:
- - Whether or not to wait for the target group.
- type: bool
- default: false
- version_added: "2.4"
- wait_timeout:
- description:
- - The time to wait for the target group.
- default: 200
- version_added: "2.4"
- type: int
-extends_documentation_fragment:
- - aws
- - ec2
-notes:
- - Once a target group has been created, only its health check can then be modified using subsequent calls
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Create a target group with a default health check
-- elb_target_group:
- name: mytargetgroup
- protocol: http
- port: 80
- vpc_id: vpc-01234567
- state: present
-
-# Modify the target group with a custom health check
-- elb_target_group:
- name: mytargetgroup
- protocol: http
- port: 80
- vpc_id: vpc-01234567
- health_check_protocol: http
- health_check_path: /health_check
- health_check_port: 80
- successful_response_codes: 200
- health_check_interval: 15
- health_check_timeout: 3
- healthy_threshold_count: 4
- unhealthy_threshold_count: 3
- state: present
-
-# Delete a target group
-- elb_target_group:
- name: mytargetgroup
- state: absent
-
-# Create a target group with instance targets
-- elb_target_group:
- name: mytargetgroup
- protocol: http
- port: 81
- vpc_id: vpc-01234567
- health_check_protocol: http
- health_check_path: /
- successful_response_codes: "200,250-260"
- targets:
- - Id: i-01234567
- Port: 80
- - Id: i-98765432
- Port: 80
- state: present
- wait_timeout: 200
- wait: True
-
-# Create a target group with IP address targets
-- elb_target_group:
- name: mytargetgroup
- protocol: http
- port: 81
- vpc_id: vpc-01234567
- health_check_protocol: http
- health_check_path: /
- successful_response_codes: "200,250-260"
- target_type: ip
- targets:
- - Id: 10.0.0.10
- Port: 80
- AvailabilityZone: all
- - Id: 10.0.0.20
- Port: 80
- state: present
- wait_timeout: 200
- wait: True
-
-# Using lambda as targets require that the target group
-# itself is allow to invoke the lambda function.
-# therefore you need first to create an empty target group
-# to receive its arn, second, allow the target group
-# to invoke the lamba function and third, add the target
-# to the target group
-- name: first, create empty target group
- elb_target_group:
- name: my-lambda-targetgroup
- target_type: lambda
- state: present
- modify_targets: False
- register: out
-
-- name: second, allow invoke of the lambda
- lambda_policy:
- state: "{{ state | default('present') }}"
- function_name: my-lambda-function
- statement_id: someID
- action: lambda:InvokeFunction
- principal: elasticloadbalancing.amazonaws.com
- source_arn: "{{ out.target_group_arn }}"
-
-- name: third, add target
- elb_target_group:
- name: my-lambda-targetgroup
- target_type: lambda
- state: present
- targets:
- - Id: arn:aws:lambda:eu-central-1:123456789012:function:my-lambda-function
-
-'''
-
-RETURN = '''
-deregistration_delay_timeout_seconds:
- description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
- returned: when state present
- type: int
- sample: 300
-health_check_interval_seconds:
- description: The approximate amount of time, in seconds, between health checks of an individual target.
- returned: when state present
- type: int
- sample: 30
-health_check_path:
- description: The destination for the health check request.
- returned: when state present
- type: str
- sample: /index.html
-health_check_port:
- description: The port to use to connect with the target.
- returned: when state present
- type: str
- sample: traffic-port
-health_check_protocol:
- description: The protocol to use to connect with the target.
- returned: when state present
- type: str
- sample: HTTP
-health_check_timeout_seconds:
- description: The amount of time, in seconds, during which no response means a failed health check.
- returned: when state present
- type: int
- sample: 5
-healthy_threshold_count:
- description: The number of consecutive health checks successes required before considering an unhealthy target healthy.
- returned: when state present
- type: int
- sample: 5
-load_balancer_arns:
- description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
- returned: when state present
- type: list
- sample: []
-matcher:
- description: The HTTP codes to use when checking for a successful response from a target.
- returned: when state present
- type: dict
- sample: {
- "http_code": "200"
- }
-port:
- description: The port on which the targets are listening.
- returned: when state present
- type: int
- sample: 80
-protocol:
- description: The protocol to use for routing traffic to the targets.
- returned: when state present
- type: str
- sample: HTTP
-stickiness_enabled:
- description: Indicates whether sticky sessions are enabled.
- returned: when state present
- type: bool
- sample: true
-stickiness_lb_cookie_duration_seconds:
- description: The time period, in seconds, during which requests from a client should be routed to the same target.
- returned: when state present
- type: int
- sample: 86400
-stickiness_type:
- description: The type of sticky sessions.
- returned: when state present
- type: str
- sample: lb_cookie
-tags:
- description: The tags attached to the target group.
- returned: when state present
- type: dict
- sample: "{
- 'Tag': 'Example'
- }"
-target_group_arn:
- description: The Amazon Resource Name (ARN) of the target group.
- returned: when state present
- type: str
- sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211"
-target_group_name:
- description: The name of the target group.
- returned: when state present
- type: str
- sample: mytargetgroup
-unhealthy_threshold_count:
- description: The number of consecutive health check failures required before considering the target unhealthy.
- returned: when state present
- type: int
- sample: 2
-vpc_id:
- description: The ID of the VPC for the targets.
- returned: when state present
- type: str
- sample: vpc-0123456
-'''
-
-import time
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict,
- compare_aws_tags, ansible_dict_to_boto3_tag_list)
-from distutils.version import LooseVersion
-
-
-def get_tg_attributes(connection, module, tg_arn):
- try:
- tg_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=tg_arn)['Attributes'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get target group attributes")
-
- # Replace '.' with '_' in attribute key names to make it more Ansibley
- return dict((k.replace('.', '_'), v) for k, v in tg_attributes.items())
-
-
-def get_target_group_tags(connection, module, target_group_arn):
- try:
- return connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get target group tags")
-
-
-def get_target_group(connection, module):
- try:
- target_group_paginator = connection.get_paginator('describe_target_groups')
- return (target_group_paginator.paginate(Names=[module.params.get("name")]).build_full_result())['TargetGroups'][0]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- if e.response['Error']['Code'] == 'TargetGroupNotFound':
- return None
- else:
- module.fail_json_aws(e, msg="Couldn't get target group")
-
-
-def wait_for_status(connection, module, target_group_arn, targets, status):
- polling_increment_secs = 5
- max_retries = (module.params.get('wait_timeout') // polling_increment_secs)
- status_achieved = False
-
- for x in range(0, max_retries):
- try:
- response = connection.describe_target_health(TargetGroupArn=target_group_arn, Targets=targets)
- if response['TargetHealthDescriptions'][0]['TargetHealth']['State'] == status:
- status_achieved = True
- break
- else:
- time.sleep(polling_increment_secs)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't describe target health")
-
- result = response
- return status_achieved, result
-
-
-def fail_if_ip_target_type_not_supported(module):
- if LooseVersion(botocore.__version__) < LooseVersion('1.7.2'):
- module.fail_json(msg="target_type ip requires botocore version 1.7.2 or later. Version %s is installed" %
- botocore.__version__)
-
-
-def create_or_update_target_group(connection, module):
-
- changed = False
- new_target_group = False
- params = dict()
- target_type = module.params.get("target_type")
- params['Name'] = module.params.get("name")
- params['TargetType'] = target_type
- if target_type != "lambda":
- params['Protocol'] = module.params.get("protocol").upper()
- params['Port'] = module.params.get("port")
- params['VpcId'] = module.params.get("vpc_id")
- tags = module.params.get("tags")
- purge_tags = module.params.get("purge_tags")
- deregistration_delay_timeout = module.params.get("deregistration_delay_timeout")
- stickiness_enabled = module.params.get("stickiness_enabled")
- stickiness_lb_cookie_duration = module.params.get("stickiness_lb_cookie_duration")
- stickiness_type = module.params.get("stickiness_type")
-
- health_option_keys = [
- "health_check_path", "health_check_protocol", "health_check_interval", "health_check_timeout",
- "healthy_threshold_count", "unhealthy_threshold_count", "successful_response_codes"
- ]
- health_options = any([module.params[health_option_key] is not None for health_option_key in health_option_keys])
-
- # Set health check if anything set
- if health_options:
-
- if module.params.get("health_check_protocol") is not None:
- params['HealthCheckProtocol'] = module.params.get("health_check_protocol").upper()
-
- if module.params.get("health_check_port") is not None:
- params['HealthCheckPort'] = module.params.get("health_check_port")
-
- if module.params.get("health_check_interval") is not None:
- params['HealthCheckIntervalSeconds'] = module.params.get("health_check_interval")
-
- if module.params.get("health_check_timeout") is not None:
- params['HealthCheckTimeoutSeconds'] = module.params.get("health_check_timeout")
-
- if module.params.get("healthy_threshold_count") is not None:
- params['HealthyThresholdCount'] = module.params.get("healthy_threshold_count")
-
- if module.params.get("unhealthy_threshold_count") is not None:
- params['UnhealthyThresholdCount'] = module.params.get("unhealthy_threshold_count")
-
- # Only need to check response code and path for http(s) health checks
- protocol = module.params.get("health_check_protocol")
- if protocol is not None and protocol.upper() in ['HTTP', 'HTTPS']:
-
- if module.params.get("health_check_path") is not None:
- params['HealthCheckPath'] = module.params.get("health_check_path")
-
- if module.params.get("successful_response_codes") is not None:
- params['Matcher'] = {}
- params['Matcher']['HttpCode'] = module.params.get("successful_response_codes")
-
- # Get target type
- if target_type == 'ip':
- fail_if_ip_target_type_not_supported(module)
-
- # Get target group
- tg = get_target_group(connection, module)
-
- if tg:
- diffs = [param for param in ('Port', 'Protocol', 'VpcId')
- if tg.get(param) != params.get(param)]
- if diffs:
- module.fail_json(msg="Cannot modify %s parameter(s) for a target group" %
- ", ".join(diffs))
- # Target group exists so check health check parameters match what has been passed
- health_check_params = dict()
-
- # Modify health check if anything set
- if health_options:
-
- # Health check protocol
- if 'HealthCheckProtocol' in params and tg['HealthCheckProtocol'] != params['HealthCheckProtocol']:
- health_check_params['HealthCheckProtocol'] = params['HealthCheckProtocol']
-
- # Health check port
- if 'HealthCheckPort' in params and tg['HealthCheckPort'] != params['HealthCheckPort']:
- health_check_params['HealthCheckPort'] = params['HealthCheckPort']
-
- # Health check interval
- if 'HealthCheckIntervalSeconds' in params and tg['HealthCheckIntervalSeconds'] != params['HealthCheckIntervalSeconds']:
- health_check_params['HealthCheckIntervalSeconds'] = params['HealthCheckIntervalSeconds']
-
- # Health check timeout
- if 'HealthCheckTimeoutSeconds' in params and tg['HealthCheckTimeoutSeconds'] != params['HealthCheckTimeoutSeconds']:
- health_check_params['HealthCheckTimeoutSeconds'] = params['HealthCheckTimeoutSeconds']
-
- # Healthy threshold
- if 'HealthyThresholdCount' in params and tg['HealthyThresholdCount'] != params['HealthyThresholdCount']:
- health_check_params['HealthyThresholdCount'] = params['HealthyThresholdCount']
-
- # Unhealthy threshold
- if 'UnhealthyThresholdCount' in params and tg['UnhealthyThresholdCount'] != params['UnhealthyThresholdCount']:
- health_check_params['UnhealthyThresholdCount'] = params['UnhealthyThresholdCount']
-
- # Only need to check response code and path for http(s) health checks
- if tg['HealthCheckProtocol'] in ['HTTP', 'HTTPS']:
- # Health check path
- if 'HealthCheckPath'in params and tg['HealthCheckPath'] != params['HealthCheckPath']:
- health_check_params['HealthCheckPath'] = params['HealthCheckPath']
-
- # Matcher (successful response codes)
- # TODO: required and here?
- if 'Matcher' in params:
- current_matcher_list = tg['Matcher']['HttpCode'].split(',')
- requested_matcher_list = params['Matcher']['HttpCode'].split(',')
- if set(current_matcher_list) != set(requested_matcher_list):
- health_check_params['Matcher'] = {}
- health_check_params['Matcher']['HttpCode'] = ','.join(requested_matcher_list)
-
- try:
- if health_check_params:
- connection.modify_target_group(TargetGroupArn=tg['TargetGroupArn'], **health_check_params)
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't update target group")
-
- # Do we need to modify targets?
- if module.params.get("modify_targets"):
- # get list of current target instances. I can't see anything like a describe targets in the doco so
- # describe_target_health seems to be the only way to get them
- try:
- current_targets = connection.describe_target_health(
- TargetGroupArn=tg['TargetGroupArn'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get target group health")
-
- if module.params.get("targets"):
-
- if target_type != "lambda":
- params['Targets'] = module.params.get("targets")
-
- # Correct type of target ports
- for target in params['Targets']:
- target['Port'] = int(target.get('Port', module.params.get('port')))
-
- current_instance_ids = []
-
- for instance in current_targets['TargetHealthDescriptions']:
- current_instance_ids.append(instance['Target']['Id'])
-
- new_instance_ids = []
- for instance in params['Targets']:
- new_instance_ids.append(instance['Id'])
-
- add_instances = set(new_instance_ids) - set(current_instance_ids)
-
- if add_instances:
- instances_to_add = []
- for target in params['Targets']:
- if target['Id'] in add_instances:
- instances_to_add.append({'Id': target['Id'], 'Port': target['Port']})
-
- changed = True
- try:
- connection.register_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_add)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't register targets")
-
- if module.params.get("wait"):
- status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], instances_to_add, 'healthy')
- if not status_achieved:
- module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console')
-
- remove_instances = set(current_instance_ids) - set(new_instance_ids)
-
- if remove_instances:
- instances_to_remove = []
- for target in current_targets['TargetHealthDescriptions']:
- if target['Target']['Id'] in remove_instances:
- instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']})
-
- changed = True
- try:
- connection.deregister_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_remove)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't remove targets")
-
- if module.params.get("wait"):
- status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], instances_to_remove, 'unused')
- if not status_achieved:
- module.fail_json(msg='Error waiting for target deregistration - please check the AWS console')
-
- # register lambda target
- else:
- try:
- changed = False
- target = module.params.get("targets")[0]
- if len(current_targets["TargetHealthDescriptions"]) == 0:
- changed = True
- else:
- for item in current_targets["TargetHealthDescriptions"]:
- if target["Id"] != item["Target"]["Id"]:
- changed = True
- break # only one target is possible with lambda
-
- if changed:
- if target.get("Id"):
- response = connection.register_targets(
- TargetGroupArn=tg['TargetGroupArn'],
- Targets=[
- {
- "Id": target['Id']
- }
- ]
- )
-
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e, msg="Couldn't register targets")
- else:
- if target_type != "lambda":
-
- current_instances = current_targets['TargetHealthDescriptions']
-
- if current_instances:
- instances_to_remove = []
- for target in current_targets['TargetHealthDescriptions']:
- instances_to_remove.append({'Id': target['Target']['Id'], 'Port': target['Target']['Port']})
-
- changed = True
- try:
- connection.deregister_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=instances_to_remove)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't remove targets")
-
- if module.params.get("wait"):
- status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], instances_to_remove, 'unused')
- if not status_achieved:
- module.fail_json(msg='Error waiting for target deregistration - please check the AWS console')
-
- # remove lambda targets
- else:
- changed = False
- if current_targets["TargetHealthDescriptions"]:
- changed = True
- # only one target is possible with lambda
- target_to_remove = current_targets["TargetHealthDescriptions"][0]["Target"]["Id"]
- if changed:
- connection.deregister_targets(
- TargetGroupArn=tg['TargetGroupArn'], Targets=[{"Id": target_to_remove}])
- else:
- try:
- connection.create_target_group(**params)
- changed = True
- new_target_group = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't create target group")
-
- tg = get_target_group(connection, module)
-
- if module.params.get("targets"):
- if target_type != "lambda":
- params['Targets'] = module.params.get("targets")
- try:
- connection.register_targets(TargetGroupArn=tg['TargetGroupArn'], Targets=params['Targets'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't register targets")
-
- if module.params.get("wait"):
- status_achieved, registered_instances = wait_for_status(connection, module, tg['TargetGroupArn'], params['Targets'], 'healthy')
- if not status_achieved:
- module.fail_json(msg='Error waiting for target registration to be healthy - please check the AWS console')
-
- else:
- try:
- target = module.params.get("targets")[0]
- response = connection.register_targets(
- TargetGroupArn=tg['TargetGroupArn'],
- Targets=[
- {
- "Id": target["Id"]
- }
- ]
- )
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(
- e, msg="Couldn't register targets")
-
- # Now set target group attributes
- update_attributes = []
-
- # Get current attributes
- current_tg_attributes = get_tg_attributes(connection, module, tg['TargetGroupArn'])
-
- if deregistration_delay_timeout is not None:
- if str(deregistration_delay_timeout) != current_tg_attributes['deregistration_delay_timeout_seconds']:
- update_attributes.append({'Key': 'deregistration_delay.timeout_seconds', 'Value': str(deregistration_delay_timeout)})
- if stickiness_enabled is not None:
- if stickiness_enabled and current_tg_attributes['stickiness_enabled'] != "true":
- update_attributes.append({'Key': 'stickiness.enabled', 'Value': 'true'})
- if stickiness_lb_cookie_duration is not None:
- if str(stickiness_lb_cookie_duration) != current_tg_attributes['stickiness_lb_cookie_duration_seconds']:
- update_attributes.append({'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': str(stickiness_lb_cookie_duration)})
- if stickiness_type is not None and "stickiness_type" in current_tg_attributes:
- if stickiness_type != current_tg_attributes['stickiness_type']:
- update_attributes.append({'Key': 'stickiness.type', 'Value': stickiness_type})
-
- if update_attributes:
- try:
- connection.modify_target_group_attributes(TargetGroupArn=tg['TargetGroupArn'], Attributes=update_attributes)
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- # Something went wrong setting attributes. If this target group was created during this task, delete it to leave a consistent state
- if new_target_group:
- connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn'])
- module.fail_json_aws(e, msg="Couldn't delete target group")
-
- # Tags - only need to play with tags if tags parameter has been set to something
- if tags:
- # Get tags
- current_tags = get_target_group_tags(connection, module, tg['TargetGroupArn'])
-
- # Delete necessary tags
- tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), tags, purge_tags)
- if tags_to_delete:
- try:
- connection.remove_tags(ResourceArns=[tg['TargetGroupArn']], TagKeys=tags_to_delete)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete tags from target group")
- changed = True
-
- # Add/update tags
- if tags_need_modify:
- try:
- connection.add_tags(ResourceArns=[tg['TargetGroupArn']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't add tags to target group")
- changed = True
-
- # Get the target group again
- tg = get_target_group(connection, module)
-
- # Get the target group attributes again
- tg.update(get_tg_attributes(connection, module, tg['TargetGroupArn']))
-
- # Convert tg to snake_case
- snaked_tg = camel_dict_to_snake_dict(tg)
-
- snaked_tg['tags'] = boto3_tag_list_to_ansible_dict(get_target_group_tags(connection, module, tg['TargetGroupArn']))
-
- module.exit_json(changed=changed, **snaked_tg)
-
-
-def delete_target_group(connection, module):
- changed = False
- tg = get_target_group(connection, module)
-
- if tg:
- try:
- connection.delete_target_group(TargetGroupArn=tg['TargetGroupArn'])
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't delete target group")
-
- module.exit_json(changed=changed)
-
-
-def main():
- protocols_list = ['http', 'https', 'tcp', 'tls', 'udp', 'tcp_udp', 'HTTP',
- 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP']
- argument_spec = dict(
- deregistration_delay_timeout=dict(type='int'),
- health_check_protocol=dict(choices=protocols_list),
- health_check_port=dict(),
- health_check_path=dict(),
- health_check_interval=dict(type='int'),
- health_check_timeout=dict(type='int'),
- healthy_threshold_count=dict(type='int'),
- modify_targets=dict(default=True, type='bool'),
- name=dict(required=True),
- port=dict(type='int'),
- protocol=dict(choices=protocols_list),
- purge_tags=dict(default=True, type='bool'),
- stickiness_enabled=dict(type='bool'),
- stickiness_type=dict(default='lb_cookie'),
- stickiness_lb_cookie_duration=dict(type='int'),
- state=dict(required=True, choices=['present', 'absent']),
- successful_response_codes=dict(),
- tags=dict(default={}, type='dict'),
- target_type=dict(choices=['instance', 'ip', 'lambda']),
- targets=dict(type='list'),
- unhealthy_threshold_count=dict(type='int'),
- vpc_id=dict(),
- wait_timeout=dict(type='int', default=200),
- wait=dict(type='bool', default=False)
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[
- ['target_type', 'instance', ['protocol', 'port', 'vpc_id']],
- ['target_type', 'ip', ['protocol', 'port', 'vpc_id']],
- ]
- )
-
- if module.params.get('target_type') is None:
- module.params['target_type'] = 'instance'
-
- connection = module.client('elbv2')
-
- if module.params.get('state') == 'present':
- create_or_update_target_group(connection, module)
- else:
- delete_target_group(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elb_target_group_info.py b/lib/ansible/modules/cloud/amazon/elb_target_group_info.py
deleted file mode 100644
index 60de28e7fb..0000000000
--- a/lib/ansible/modules/cloud/amazon/elb_target_group_info.py
+++ /dev/null
@@ -1,329 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: elb_target_group_info
-short_description: Gather information about ELB target groups in AWS
-description:
- - Gather information about ELB target groups in AWS
- - This module was called C(elb_target_group_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.4"
-requirements: [ boto3 ]
-author: Rob White (@wimnat)
-options:
- load_balancer_arn:
- description:
- - The Amazon Resource Name (ARN) of the load balancer.
- required: false
- type: str
- target_group_arns:
- description:
- - The Amazon Resource Names (ARN) of the target groups.
- required: false
- type: list
- names:
- description:
- - The names of the target groups.
- required: false
- type: list
- collect_targets_health:
- description:
- - When set to "yes", output contains targets health description
- required: false
- default: no
- type: bool
- version_added: 2.8
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Gather information about all target groups
-- elb_target_group_info:
-
-# Gather information about the target group attached to a particular ELB
-- elb_target_group_info:
- load_balancer_arn: "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
-
-# Gather information about a target groups named 'tg1' and 'tg2'
-- elb_target_group_info:
- names:
- - tg1
- - tg2
-
-'''
-
-RETURN = '''
-target_groups:
- description: a list of target groups
- returned: always
- type: complex
- contains:
- deregistration_delay_timeout_seconds:
- description: The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused.
- returned: always
- type: int
- sample: 300
- health_check_interval_seconds:
- description: The approximate amount of time, in seconds, between health checks of an individual target.
- returned: always
- type: int
- sample: 30
- health_check_path:
- description: The destination for the health check request.
- returned: always
- type: str
- sample: /index.html
- health_check_port:
- description: The port to use to connect with the target.
- returned: always
- type: str
- sample: traffic-port
- health_check_protocol:
- description: The protocol to use to connect with the target.
- returned: always
- type: str
- sample: HTTP
- health_check_timeout_seconds:
- description: The amount of time, in seconds, during which no response means a failed health check.
- returned: always
- type: int
- sample: 5
- healthy_threshold_count:
- description: The number of consecutive health checks successes required before considering an unhealthy target healthy.
- returned: always
- type: int
- sample: 5
- load_balancer_arns:
- description: The Amazon Resource Names (ARN) of the load balancers that route traffic to this target group.
- returned: always
- type: list
- sample: []
- matcher:
- description: The HTTP codes to use when checking for a successful response from a target.
- returned: always
- type: dict
- sample: {
- "http_code": "200"
- }
- port:
- description: The port on which the targets are listening.
- returned: always
- type: int
- sample: 80
- protocol:
- description: The protocol to use for routing traffic to the targets.
- returned: always
- type: str
- sample: HTTP
- stickiness_enabled:
- description: Indicates whether sticky sessions are enabled.
- returned: always
- type: bool
- sample: true
- stickiness_lb_cookie_duration_seconds:
- description: Indicates whether sticky sessions are enabled.
- returned: always
- type: int
- sample: 86400
- stickiness_type:
- description: The type of sticky sessions.
- returned: always
- type: str
- sample: lb_cookie
- tags:
- description: The tags attached to the target group.
- returned: always
- type: dict
- sample: "{
- 'Tag': 'Example'
- }"
- target_group_arn:
- description: The Amazon Resource Name (ARN) of the target group.
- returned: always
- type: str
- sample: "arn:aws:elasticloadbalancing:ap-southeast-2:01234567890:targetgroup/mytargetgroup/aabbccddee0044332211"
- targets_health_description:
- description: Targets health description.
- returned: when collect_targets_health is enabled
- type: complex
- contains:
- health_check_port:
- description: The port to check target health.
- returned: always
- type: str
- sample: '80'
- target:
- description: The target metadata.
- returned: always
- type: complex
- contains:
- id:
- description: The ID of the target.
- returned: always
- type: str
- sample: i-0123456789
- port:
- description: The port to use to connect with the target.
- returned: always
- type: int
- sample: 80
- target_health:
- description: The target health status.
- returned: always
- type: complex
- contains:
- state:
- description: The state of the target health.
- returned: always
- type: str
- sample: healthy
- target_group_name:
- description: The name of the target group.
- returned: always
- type: str
- sample: mytargetgroup
- unhealthy_threshold_count:
- description: The number of consecutive health check failures required before considering the target unhealthy.
- returned: always
- type: int
- sample: 2
- vpc_id:
- description: The ID of the VPC for the targets.
- returned: always
- type: str
- sample: vpc-0123456
-'''
-
-import traceback
-
-try:
- import boto3
- from botocore.exceptions import ClientError, NoCredentialsError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
- ec2_argument_spec, get_aws_connection_info)
-
-
-def get_target_group_attributes(connection, module, target_group_arn):
-
- try:
- target_group_attributes = boto3_tag_list_to_ansible_dict(connection.describe_target_group_attributes(TargetGroupArn=target_group_arn)['Attributes'])
- except ClientError as e:
- module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
- # Replace '.' with '_' in attribute key names to make it more Ansibley
- return dict((k.replace('.', '_'), v)
- for (k, v) in target_group_attributes.items())
-
-
-def get_target_group_tags(connection, module, target_group_arn):
-
- try:
- return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'])
- except ClientError as e:
- module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
-
-def get_target_group_targets_health(connection, module, target_group_arn):
-
- try:
- return connection.describe_target_health(TargetGroupArn=target_group_arn)['TargetHealthDescriptions']
- except ClientError as e:
- module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
-
-def list_target_groups(connection, module):
-
- load_balancer_arn = module.params.get("load_balancer_arn")
- target_group_arns = module.params.get("target_group_arns")
- names = module.params.get("names")
- collect_targets_health = module.params.get("collect_targets_health")
-
- try:
- target_group_paginator = connection.get_paginator('describe_target_groups')
- if not load_balancer_arn and not target_group_arns and not names:
- target_groups = target_group_paginator.paginate().build_full_result()
- if load_balancer_arn:
- target_groups = target_group_paginator.paginate(LoadBalancerArn=load_balancer_arn).build_full_result()
- if target_group_arns:
- target_groups = target_group_paginator.paginate(TargetGroupArns=target_group_arns).build_full_result()
- if names:
- target_groups = target_group_paginator.paginate(Names=names).build_full_result()
- except ClientError as e:
- if e.response['Error']['Code'] == 'TargetGroupNotFound':
- module.exit_json(target_groups=[])
- else:
- module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except NoCredentialsError as e:
- module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc())
-
- # Get the attributes and tags for each target group
- for target_group in target_groups['TargetGroups']:
- target_group.update(get_target_group_attributes(connection, module, target_group['TargetGroupArn']))
-
- # Turn the boto3 result in to ansible_friendly_snaked_names
- snaked_target_groups = [camel_dict_to_snake_dict(target_group) for target_group in target_groups['TargetGroups']]
-
- # Get tags for each target group
- for snaked_target_group in snaked_target_groups:
- snaked_target_group['tags'] = get_target_group_tags(connection, module, snaked_target_group['target_group_arn'])
- if collect_targets_health:
- snaked_target_group['targets_health_description'] = [camel_dict_to_snake_dict(
- target) for target in get_target_group_targets_health(connection, module, snaked_target_group['target_group_arn'])]
-
- module.exit_json(target_groups=snaked_target_groups)
-
-
-def main():
-
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- load_balancer_arn=dict(type='str'),
- target_group_arns=dict(type='list'),
- names=dict(type='list'),
- collect_targets_health=dict(default=False, type='bool', required=False)
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- mutually_exclusive=[['load_balancer_arn', 'target_group_arns', 'names']],
- supports_check_mode=True
- )
- if module._name == 'elb_target_group_facts':
- module.deprecate("The 'elb_target_group_facts' module has been renamed to 'elb_target_group_info'", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
-
- if region:
- connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)
- else:
- module.fail_json(msg="region must be specified")
-
- list_target_groups(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/elb_target_info.py b/lib/ansible/modules/cloud/amazon/elb_target_info.py
deleted file mode 100644
index 2bf1ddf73d..0000000000
--- a/lib/ansible/modules/cloud/amazon/elb_target_info.py
+++ /dev/null
@@ -1,439 +0,0 @@
-#!/usr/bin/python
-# Copyright: (c) 2018, Yaakov Kuperman <ykuperman@gmail.com>
-# GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-
-ANSIBLE_METADATA = {"metadata_version": "1.1",
- "status": ["preview"],
- "supported_by": "community"}
-
-
-DOCUMENTATION = """
----
-module: elb_target_info
-short_description: Gathers which target groups a target is associated with.
-description:
- - This module will search through every target group in a region to find
- which ones have registered a given instance ID or IP.
- - This module was called C(elb_target_facts) before Ansible 2.9. The usage did not change.
-
-version_added: "2.7"
-author: "Yaakov Kuperman (@yaakov-github)"
-options:
- instance_id:
- description:
- - What instance ID to get information for.
- type: str
- required: true
- get_unused_target_groups:
- description:
- - Whether or not to get target groups not used by any load balancers.
- type: bool
- default: true
-
-requirements:
- - boto3
- - botocore
-extends_documentation_fragment:
- - aws
- - ec2
-"""
-
-EXAMPLES = """
-# practical use case - dynamically deregistering and reregistering nodes
-
- - name: Get EC2 Metadata
- action: ec2_metadata_facts
-
- - name: Get initial list of target groups
- delegate_to: localhost
- elb_target_info:
- instance_id: "{{ ansible_ec2_instance_id }}"
- region: "{{ ansible_ec2_placement_region }}"
- register: target_info
-
- - name: save fact for later
- set_fact:
- original_tgs: "{{ target_info.instance_target_groups }}"
-
- - name: Deregister instance from all target groups
- delegate_to: localhost
- elb_target:
- target_group_arn: "{{ item.0.target_group_arn }}"
- target_port: "{{ item.1.target_port }}"
- target_az: "{{ item.1.target_az }}"
- target_id: "{{ item.1.target_id }}"
- state: absent
- target_status: "draining"
- region: "{{ ansible_ec2_placement_region }}"
- with_subelements:
- - "{{ original_tgs }}"
- - "targets"
-
- # This avoids having to wait for 'elb_target' to serially deregister each
- # target group. An alternative would be to run all of the 'elb_target'
- # tasks async and wait for them to finish.
-
- - name: wait for all targets to deregister simultaneously
- delegate_to: localhost
- elb_target_info:
- get_unused_target_groups: false
- instance_id: "{{ ansible_ec2_instance_id }}"
- region: "{{ ansible_ec2_placement_region }}"
- register: target_info
- until: (target_info.instance_target_groups | length) == 0
- retries: 60
- delay: 10
-
- - name: reregister in elbv2s
- elb_target:
- region: "{{ ansible_ec2_placement_region }}"
- target_group_arn: "{{ item.0.target_group_arn }}"
- target_port: "{{ item.1.target_port }}"
- target_az: "{{ item.1.target_az }}"
- target_id: "{{ item.1.target_id }}"
- state: present
- target_status: "initial"
- with_subelements:
- - "{{ original_tgs }}"
- - "targets"
-
- # wait until all groups associated with this instance are 'healthy' or
- # 'unused'
- - name: wait for registration
- elb_target_info:
- get_unused_target_groups: false
- instance_id: "{{ ansible_ec2_instance_id }}"
- region: "{{ ansible_ec2_placement_region }}"
- register: target_info
- until: (target_info.instance_target_groups |
- map(attribute='targets') |
- flatten |
- map(attribute='target_health') |
- rejectattr('state', 'equalto', 'healthy') |
- rejectattr('state', 'equalto', 'unused') |
- list |
- length) == 0
- retries: 61
- delay: 10
-
-# using the target groups to generate AWS CLI commands to reregister the
-# instance - useful in case the playbook fails mid-run and manual
-# rollback is required
- - name: "reregistration commands: ELBv2s"
- debug:
- msg: >
- aws --region {{ansible_ec2_placement_region}} elbv2
- register-targets --target-group-arn {{item.target_group_arn}}
- --targets{%for target in item.targets%}
- Id={{target.target_id}},
- Port={{target.target_port}}{%if target.target_az%},AvailabilityZone={{target.target_az}}
- {%endif%}
- {%endfor%}
- loop: "{{target_info.instance_target_groups}}"
-
-"""
-
-RETURN = """
-instance_target_groups:
- description: a list of target groups to which the instance is registered to
- returned: always
- type: complex
- contains:
- target_group_arn:
- description: The ARN of the target group
- type: str
- returned: always
- sample:
- - "arn:aws:elasticloadbalancing:eu-west-1:111111111111:targetgroup/target-group/deadbeefdeadbeef"
- target_group_type:
- description: Which target type is used for this group
- returned: always
- type: str
- sample:
- - ip
- - instance
- targets:
- description: A list of targets that point to this instance ID
- returned: always
- type: complex
- contains:
- target_id:
- description: the target ID referring to this instance
- type: str
- returned: always
- sample:
- - i-deadbeef
- - 1.2.3.4
- target_port:
- description: which port this target is listening on
- type: str
- returned: always
- sample:
- - 80
- target_az:
- description: which availability zone is explicitly
- associated with this target
- type: str
- returned: when an AZ is associated with this instance
- sample:
- - us-west-2a
- target_health:
- description:
- - The target health description.
- - See following link for all the possible values
- U(https://boto3.readthedocs.io/en/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_health)
- returned: always
- type: complex
- contains:
- description:
- description: description of target health
- returned: if I(state!=present)
- sample:
- - "Target desregistration is in progress"
- type: str
- reason:
- description: reason code for target health
- returned: if I(state!=healthy)
- sample:
- - "Target.Deregistration in progress"
- type: str
- state:
- description: health state
- returned: always
- sample:
- - "healthy"
- - "draining"
- - "initial"
- - "unhealthy"
- - "unused"
- - "unavailable"
- type: str
-"""
-
-__metaclass__ = type
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- # we can handle the lack of boto3 based on the ec2 module
- pass
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
-
-
-class Target(object):
- """Models a target in a target group"""
- def __init__(self, target_id, port, az, raw_target_health):
- self.target_port = port
- self.target_id = target_id
- self.target_az = az
- self.target_health = self.convert_target_health(raw_target_health)
-
- def convert_target_health(self, raw_target_health):
- return camel_dict_to_snake_dict(raw_target_health)
-
-
-class TargetGroup(object):
- """Models an elbv2 target group"""
-
- def __init__(self, **kwargs):
- self.target_group_type = kwargs["target_group_type"]
- self.target_group_arn = kwargs["target_group_arn"]
- # the relevant targets associated with this group
- self.targets = []
-
- def add_target(self, target_id, target_port, target_az, raw_target_health):
- self.targets.append(Target(target_id,
- target_port,
- target_az,
- raw_target_health))
-
- def to_dict(self):
- object_dict = vars(self)
- object_dict["targets"] = [vars(each) for each in self.get_targets()]
- return object_dict
-
- def get_targets(self):
- return list(self.targets)
-
-
-class TargetInfoGatherer(object):
-
- def __init__(self, module, instance_id, get_unused_target_groups):
- self.module = module
- try:
- self.ec2 = self.module.client(
- "ec2",
- retry_decorator=AWSRetry.jittered_backoff(retries=10)
- )
- except (ClientError, BotoCoreError) as e:
- self.module.fail_json_aws(e,
- msg="Couldn't connect to ec2"
- )
-
- try:
- self.elbv2 = self.module.client(
- "elbv2",
- retry_decorator=AWSRetry.jittered_backoff(retries=10)
- )
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e,
- msg="Could not connect to elbv2"
- )
-
- self.instance_id = instance_id
- self.get_unused_target_groups = get_unused_target_groups
- self.tgs = self._get_target_groups()
-
- def _get_instance_ips(self):
- """Fetch all IPs associated with this instance so that we can determine
- whether or not an instance is in an IP-based target group"""
- try:
- # get ahold of the instance in the API
- reservations = self.ec2.describe_instances(
- InstanceIds=[self.instance_id],
- aws_retry=True
- )["Reservations"]
- except (BotoCoreError, ClientError) as e:
- # typically this will happen if the instance doesn't exist
- self.module.fail_json_aws(e,
- msg="Could not get instance info" +
- " for instance '%s'" %
- (self.instance_id)
- )
-
- if len(reservations) < 1:
- self.module.fail_json(
- msg="Instance ID %s could not be found" % self.instance_id
- )
-
- instance = reservations[0]["Instances"][0]
-
- # IPs are represented in a few places in the API, this should
- # account for all of them
- ips = set()
- ips.add(instance["PrivateIpAddress"])
- for nic in instance["NetworkInterfaces"]:
- ips.add(nic["PrivateIpAddress"])
- for ip in nic["PrivateIpAddresses"]:
- ips.add(ip["PrivateIpAddress"])
-
- return list(ips)
-
- def _get_target_group_objects(self):
- """helper function to build a list of TargetGroup objects based on
- the AWS API"""
- try:
- paginator = self.elbv2.get_paginator(
- "describe_target_groups"
- )
- tg_response = paginator.paginate().build_full_result()
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e,
- msg="Could not describe target" +
- " groups"
- )
-
- # build list of TargetGroup objects representing every target group in
- # the system
- target_groups = []
- for each_tg in tg_response["TargetGroups"]:
- if not self.get_unused_target_groups and \
- len(each_tg["LoadBalancerArns"]) < 1:
- # only collect target groups that actually are connected
- # to LBs
- continue
-
- target_groups.append(
- TargetGroup(target_group_arn=each_tg["TargetGroupArn"],
- target_group_type=each_tg["TargetType"],
- )
- )
- return target_groups
-
- def _get_target_descriptions(self, target_groups):
- """Helper function to build a list of all the target descriptions
- for this target in a target group"""
- # Build a list of all the target groups pointing to this instance
- # based on the previous list
- tgs = set()
- # Loop through all the target groups
- for tg in target_groups:
- try:
- # Get the list of targets for that target group
- response = self.elbv2.describe_target_health(
- TargetGroupArn=tg.target_group_arn,
- aws_retry=True
- )
- except (BotoCoreError, ClientError) as e:
- self.module.fail_json_aws(e,
- msg="Could not describe target " +
- "health for target group %s" %
- tg.target_group_arn
- )
-
- for t in response["TargetHealthDescriptions"]:
- # If the target group has this instance as a target, add to
- # list. This logic also accounts for the possibility of a
- # target being in the target group multiple times with
- # overridden ports
- if t["Target"]["Id"] == self.instance_id or \
- t["Target"]["Id"] in self.instance_ips:
-
- # The 'AvailabilityZone' parameter is a weird one, see the
- # API docs for more. Basically it's only supposed to be
- # there under very specific circumstances, so we need
- # to account for that
- az = t["Target"]["AvailabilityZone"] \
- if "AvailabilityZone" in t["Target"] \
- else None
-
- tg.add_target(t["Target"]["Id"],
- t["Target"]["Port"],
- az,
- t["TargetHealth"])
- # since tgs is a set, each target group will be added only
- # once, even though we call add on each successful match
- tgs.add(tg)
- return list(tgs)
-
- def _get_target_groups(self):
- # do this first since we need the IPs later on in this function
- self.instance_ips = self._get_instance_ips()
-
- # build list of target groups
- target_groups = self._get_target_group_objects()
- return self._get_target_descriptions(target_groups)
-
-
-def main():
- argument_spec = dict(
- instance_id={"required": True, "type": "str"},
- get_unused_target_groups={"required": False,
- "default": True, "type": "bool"}
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
- if module._name == 'elb_target_facts':
- module.deprecate("The 'elb_target_facts' module has been renamed to 'elb_target_info'", version='2.13')
-
- instance_id = module.params["instance_id"]
- get_unused_target_groups = module.params["get_unused_target_groups"]
-
- tg_gatherer = TargetInfoGatherer(module,
- instance_id,
- get_unused_target_groups
- )
-
- instance_target_groups = [each.to_dict() for each in tg_gatherer.tgs]
-
- module.exit_json(instance_target_groups=instance_target_groups)
-
-
-if __name__ == "__main__":
- main()
diff --git a/lib/ansible/modules/cloud/amazon/execute_lambda.py b/lib/ansible/modules/cloud/amazon/execute_lambda.py
deleted file mode 100644
index 1d4fe4dc9c..0000000000
--- a/lib/ansible/modules/cloud/amazon/execute_lambda.py
+++ /dev/null
@@ -1,286 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: execute_lambda
-short_description: Execute an AWS Lambda function
-description:
- - This module executes AWS Lambda functions, allowing synchronous and asynchronous
- invocation.
-version_added: "2.2"
-extends_documentation_fragment:
- - aws
- - ec2
-author: "Ryan Scott Brown (@ryansb) <ryansb@redhat.com>"
-requirements:
- - python >= 2.6
- - boto3
-notes:
- - Async invocation will always return an empty C(output) key.
- - Synchronous invocation may result in a function timeout, resulting in an
- empty C(output) key.
-options:
- name:
- description:
- - The name of the function to be invoked. This can only be used for
- invocations within the calling account. To invoke a function in another
- account, use I(function_arn) to specify the full ARN.
- type: str
- function_arn:
- description:
- - The name of the function to be invoked
- type: str
- tail_log:
- description:
- - If I(tail_log=yes), the result of the task will include the last 4 KB
- of the CloudWatch log for the function execution. Log tailing only
- works if you use synchronous invocation I(wait=yes). This is usually
- used for development or testing Lambdas.
- type: bool
- default: false
- wait:
- description:
- - Whether to wait for the function results or not. If I(wait=no)
- the task will not return any results. To wait for the Lambda function
- to complete, set I(wait=yes) and the result will be available in the
- I(output) key.
- type: bool
- default: true
- dry_run:
- description:
- - Do not *actually* invoke the function. A C(DryRun) call will check that
- the caller has permissions to call the function, especially for
- checking cross-account permissions.
- type: bool
- default: false
- version_qualifier:
- description:
- - Which version/alias of the function to run. This defaults to the
- C(LATEST) revision, but can be set to any existing version or alias.
- See U(https://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html)
- for details.
- type: str
- payload:
- description:
- - A dictionary in any form to be provided as input to the Lambda function.
- default: {}
- type: dict
-'''
-
-EXAMPLES = '''
-- execute_lambda:
- name: test-function
- # the payload is automatically serialized and sent to the function
- payload:
- foo: bar
- value: 8
- register: response
-
-# Test that you have sufficient permissions to execute a Lambda function in
-# another account
-- execute_lambda:
- function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function
- dry_run: true
-
-- execute_lambda:
- name: test-function
- payload:
- foo: bar
- value: 8
- wait: true
- tail_log: true
- register: response
- # the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda
-
-# Pass the Lambda event payload as a json file.
-- execute_lambda:
- name: test-function
- payload: "{{ lookup('file','lambda_event.json') }}"
- register: response
-
-- execute_lambda:
- name: test-function
- version_qualifier: PRODUCTION
-'''
-
-RETURN = '''
-output:
- description: Function output if wait=true and the function returns a value
- returned: success
- type: dict
- sample: "{ 'output': 'something' }"
-logs:
- description: The last 4KB of the function logs. Only provided if I(tail_log) is true
- type: str
- returned: if I(tail_log) == true
-status:
- description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async)
- type: int
- sample: 200
- returned: always
-'''
-
-import base64
-import json
-import traceback
-
-try:
- import botocore
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
-from ansible.module_utils._text import to_native
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- name=dict(),
- function_arn=dict(),
- wait=dict(default=True, type='bool'),
- tail_log=dict(default=False, type='bool'),
- dry_run=dict(default=False, type='bool'),
- version_qualifier=dict(),
- payload=dict(default={}, type='dict'),
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[
- ['name', 'function_arn'],
- ]
- )
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- name = module.params.get('name')
- function_arn = module.params.get('function_arn')
- await_return = module.params.get('wait')
- dry_run = module.params.get('dry_run')
- tail_log = module.params.get('tail_log')
- version_qualifier = module.params.get('version_qualifier')
- payload = module.params.get('payload')
-
- if not HAS_BOTO3:
- module.fail_json(msg='Python module "boto3" is missing, please install it')
-
- if not (name or function_arn):
- module.fail_json(msg="Must provide either a function_arn or a name to invoke.")
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=HAS_BOTO3)
- if not region:
- module.fail_json(msg="The AWS region must be specified as an "
- "environment variable or in the AWS credentials "
- "profile.")
-
- try:
- client = boto3_conn(module, conn_type='client', resource='lambda',
- region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
- module.fail_json(msg="Failure connecting boto3 to AWS: %s" % to_native(e), exception=traceback.format_exc())
-
- invoke_params = {}
-
- if await_return:
- # await response
- invoke_params['InvocationType'] = 'RequestResponse'
- else:
- # fire and forget
- invoke_params['InvocationType'] = 'Event'
- if dry_run or module.check_mode:
- # dry_run overrides invocation type
- invoke_params['InvocationType'] = 'DryRun'
-
- if tail_log and await_return:
- invoke_params['LogType'] = 'Tail'
- elif tail_log and not await_return:
- module.fail_json(msg="The `tail_log` parameter is only available if "
- "the invocation waits for the function to complete. "
- "Set `wait` to true or turn off `tail_log`.")
- else:
- invoke_params['LogType'] = 'None'
-
- if version_qualifier:
- invoke_params['Qualifier'] = version_qualifier
-
- if payload:
- invoke_params['Payload'] = json.dumps(payload)
-
- if function_arn:
- invoke_params['FunctionName'] = function_arn
- elif name:
- invoke_params['FunctionName'] = name
-
- try:
- response = client.invoke(**invoke_params)
- except botocore.exceptions.ClientError as ce:
- if ce.response['Error']['Code'] == 'ResourceNotFoundException':
- module.fail_json(msg="Could not find Lambda to execute. Make sure "
- "the ARN is correct and your profile has "
- "permissions to execute this function.",
- exception=traceback.format_exc())
- module.fail_json(msg="Client-side error when invoking Lambda, check inputs and specific error",
- exception=traceback.format_exc())
- except botocore.exceptions.ParamValidationError as ve:
- module.fail_json(msg="Parameters to `invoke` failed to validate",
- exception=traceback.format_exc())
- except Exception as e:
- module.fail_json(msg="Unexpected failure while invoking Lambda function",
- exception=traceback.format_exc())
-
- results = {
- 'logs': '',
- 'status': response['StatusCode'],
- 'output': '',
- }
-
- if response.get('LogResult'):
- try:
- # logs are base64 encoded in the API response
- results['logs'] = base64.b64decode(response.get('LogResult', ''))
- except Exception as e:
- module.fail_json(msg="Failed while decoding logs", exception=traceback.format_exc())
-
- if invoke_params['InvocationType'] == 'RequestResponse':
- try:
- results['output'] = json.loads(response['Payload'].read().decode('utf8'))
- except Exception as e:
- module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc())
-
- if isinstance(results.get('output'), dict) and any(
- [results['output'].get('stackTrace'), results['output'].get('errorMessage')]):
- # AWS sends back stack traces and error messages when a function failed
- # in a RequestResponse (synchronous) context.
- template = ("Function executed, but there was an error in the Lambda function. "
- "Message: {errmsg}, Type: {type}, Stack Trace: {trace}")
- error_data = {
- # format the stacktrace sent back as an array into a multiline string
- 'trace': '\n'.join(
- [' '.join([
- str(x) for x in line # cast line numbers to strings
- ]) for line in results.get('output', {}).get('stackTrace', [])]
- ),
- 'errmsg': results['output'].get('errorMessage'),
- 'type': results['output'].get('errorType')
- }
- module.fail_json(msg=template.format(**error_data), result=results)
-
- module.exit_json(changed=True, result=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam.py b/lib/ansible/modules/cloud/amazon/iam.py
deleted file mode 100644
index 7ff7f74be1..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam.py
+++ /dev/null
@@ -1,873 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: iam
-short_description: Manage IAM users, groups, roles and keys
-description:
- - Allows for the management of IAM users, user API keys, groups, roles.
-version_added: "2.0"
-options:
- iam_type:
- description:
- - Type of IAM resource.
- choices: ["user", "group", "role"]
- type: str
- required: true
- name:
- description:
- - Name of IAM resource to create or identify.
- required: true
- type: str
- new_name:
- description:
- - When I(state=update), will replace I(name) with I(new_name) on IAM resource.
- type: str
- new_path:
- description:
- - When I(state=update), will replace the path with new_path on the IAM resource.
- type: str
- state:
- description:
- - Whether to create, delete or update the IAM resource. Note, roles cannot be updated.
- required: true
- choices: [ "present", "absent", "update" ]
- type: str
- path:
- description:
- - When creating or updating, specify the desired path of the resource.
- - If I(state=present), it will replace the current path to match what is passed in when they do not match.
- default: "/"
- type: str
- trust_policy:
- description:
- - The inline (JSON or YAML) trust policy document that grants an entity permission to assume the role.
- - Mutually exclusive with I(trust_policy_filepath).
- version_added: "2.2"
- type: dict
- trust_policy_filepath:
- description:
- - The path to the trust policy document that grants an entity permission to assume the role.
- - Mutually exclusive with I(trust_policy).
- version_added: "2.2"
- type: str
- access_key_state:
- description:
- - When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified.
- choices: [ "create", "remove", "active", "inactive", "Create", "Remove", "Active", "Inactive"]
- type: str
- key_count:
- description:
- - When I(access_key_state=create) it will ensure this quantity of keys are present.
- default: 1
- type: int
- access_key_ids:
- description:
- - A list of the keys that you want affected by the I(access_key_state) parameter.
- type: list
- groups:
- description:
- - A list of groups the user should belong to. When I(state=update), will gracefully remove groups not listed.
- type: list
- password:
- description:
- - When I(type=user) and either I(state=present) or I(state=update), define the users login password.
- - Note that this will always return 'changed'.
- type: str
- update_password:
- default: always
- choices: ['always', 'on_create']
- description:
- - When to update user passwords.
- - I(update_password=always) will ensure the password is set to I(password).
- - I(update_password=on_create) will only set the password for newly created users.
- type: str
-notes:
- - 'Currently boto does not support the removal of Managed Policies, the module will error out if your
- user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.'
-author:
- - "Jonathan I. Davila (@defionscode)"
- - "Paul Seiffert (@seiffert)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Basic user creation example
-tasks:
-- name: Create two new IAM users with API keys
- iam:
- iam_type: user
- name: "{{ item }}"
- state: present
- password: "{{ temp_pass }}"
- access_key_state: create
- loop:
- - jcleese
- - mpython
-
-# Advanced example, create two new groups and add the pre-existing user
-# jdavila to both groups.
-task:
-- name: Create Two Groups, Mario and Luigi
- iam:
- iam_type: group
- name: "{{ item }}"
- state: present
- loop:
- - Mario
- - Luigi
- register: new_groups
-
-- name:
- iam:
- iam_type: user
- name: jdavila
- state: update
- groups: "{{ item.created_group.group_name }}"
- loop: "{{ new_groups.results }}"
-
-# Example of role with custom trust policy for Lambda service
-- name: Create IAM role with custom trust relationship
- iam:
- iam_type: role
- name: AAALambdaTestRole
- state: present
- trust_policy:
- Version: '2012-10-17'
- Statement:
- - Action: sts:AssumeRole
- Effect: Allow
- Principal:
- Service: lambda.amazonaws.com
-
-'''
-RETURN = '''
-role_result:
- description: the IAM.role dict returned by Boto
- type: str
- returned: if iam_type=role and state=present
- sample: {
- "arn": "arn:aws:iam::A1B2C3D4E5F6:role/my-new-role",
- "assume_role_policy_document": "...truncated...",
- "create_date": "2017-09-02T14:32:23Z",
- "path": "/",
- "role_id": "AROAA1B2C3D4E5F6G7H8I",
- "role_name": "my-new-role"
- }
-roles:
- description: a list containing the name of the currently defined roles
- type: list
- returned: if iam_type=role and state=present
- sample: [
- "my-new-role",
- "my-existing-role-1",
- "my-existing-role-2",
- "my-existing-role-3",
- "my-existing-role-...",
- ]
-'''
-
-import json
-import traceback
-
-try:
- import boto.exception
- import boto.iam
- import boto.iam.connection
-except ImportError:
- pass # Taken care of by ec2.HAS_BOTO
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (HAS_BOTO, boto_exception, connect_to_aws, ec2_argument_spec,
- get_aws_connection_info)
-
-
-def _paginate(func, attr):
- '''
- paginates the results from func by continuously passing in
- the returned marker if the results were truncated. this returns
- an iterator over the items in the returned response. `attr` is
- the name of the attribute to iterate over in the response.
- '''
- finished, marker = False, None
- while not finished:
- res = func(marker=marker)
- for item in getattr(res, attr):
- yield item
-
- finished = res.is_truncated == 'false'
- if not finished:
- marker = res.marker
-
-
-def list_all_groups(iam):
- return [item['group_name'] for item in _paginate(iam.get_all_groups, 'groups')]
-
-
-def list_all_users(iam):
- return [item['user_name'] for item in _paginate(iam.get_all_users, 'users')]
-
-
-def list_all_roles(iam):
- return [item['role_name'] for item in _paginate(iam.list_roles, 'roles')]
-
-
-def list_all_instance_profiles(iam):
- return [item['instance_profile_name'] for item in _paginate(iam.list_instance_profiles, 'instance_profiles')]
-
-
-def create_user(module, iam, name, pwd, path, key_state, key_count):
- key_qty = 0
- keys = []
- try:
- user_meta = iam.create_user(
- name, path).create_user_response.create_user_result.user
- changed = True
- if pwd is not None:
- pwd = iam.create_login_profile(name, pwd)
- if key_state in ['create']:
- if key_count:
- while key_count > key_qty:
- keys.append(iam.create_access_key(
- user_name=name).create_access_key_response.
- create_access_key_result.
- access_key)
- key_qty += 1
- else:
- keys = None
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=False, msg=str(err))
- else:
- user_info = dict(created_user=user_meta, password=pwd, access_keys=keys)
- return (user_info, changed)
-
-
-def delete_dependencies_first(module, iam, name):
- changed = False
- # try to delete any keys
- try:
- current_keys = [ck['access_key_id'] for ck in
- iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
- for key in current_keys:
- iam.delete_access_key(key, name)
- changed = True
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=changed, msg="Failed to delete keys: %s" % err, exception=traceback.format_exc())
-
- # try to delete login profiles
- try:
- login_profile = iam.get_login_profiles(name).get_login_profile_response
- iam.delete_login_profile(name)
- changed = True
- except boto.exception.BotoServerError as err:
- error_msg = boto_exception(err)
- if 'Login Profile for User ' + name + ' cannot be found.' not in error_msg:
- module.fail_json(changed=changed, msg="Failed to delete login profile: %s" % err, exception=traceback.format_exc())
-
- # try to detach policies
- try:
- for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names:
- iam.delete_user_policy(name, policy)
- changed = True
- except boto.exception.BotoServerError as err:
- error_msg = boto_exception(err)
- if 'must detach all policies first' in error_msg:
- module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
- "that %s has Managed Polices. This is not "
- "currently supported by boto. Please detach the policies "
- "through the console and try again." % name)
- module.fail_json(changed=changed, msg="Failed to delete policies: %s" % err, exception=traceback.format_exc())
-
- # try to deactivate associated MFA devices
- try:
- mfa_devices = iam.get_all_mfa_devices(name).get('list_mfa_devices_response', {}).get('list_mfa_devices_result', {}).get('mfa_devices', [])
- for device in mfa_devices:
- iam.deactivate_mfa_device(name, device['serial_number'])
- changed = True
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=changed, msg="Failed to deactivate associated MFA devices: %s" % err, exception=traceback.format_exc())
-
- return changed
-
-
-def delete_user(module, iam, name):
- changed = delete_dependencies_first(module, iam, name)
- try:
- iam.delete_user(name)
- except boto.exception.BotoServerError as ex:
- module.fail_json(changed=changed, msg="Failed to delete user %s: %s" % (name, ex), exception=traceback.format_exc())
- else:
- changed = True
- return name, changed
-
-
-def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated):
- changed = False
- name_change = False
- if updated and new_name:
- name = new_name
- try:
- current_keys = [ck['access_key_id'] for ck in
- iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
- status = [ck['status'] for ck in
- iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
- key_qty = len(current_keys)
- except boto.exception.BotoServerError as err:
- error_msg = boto_exception(err)
- if 'cannot be found' in error_msg and updated:
- current_keys = [ck['access_key_id'] for ck in
- iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
- status = [ck['status'] for ck in
- iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
- name = new_name
- else:
- module.fail_json(changed=False, msg=str(err))
-
- updated_key_list = {}
-
- if new_name or new_path:
- c_path = iam.get_user(name).get_user_result.user['path']
- if (name != new_name) or (c_path != new_path):
- changed = True
- try:
- if not updated:
- user = iam.update_user(
- name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata
- else:
- user = iam.update_user(
- name, new_path=new_path).update_user_response.response_metadata
- user['updates'] = dict(
- old_username=name, new_username=new_name, old_path=c_path, new_path=new_path)
- except boto.exception.BotoServerError as err:
- error_msg = boto_exception(err)
- module.fail_json(changed=False, msg=str(err))
- else:
- if not updated:
- name_change = True
-
- if pwd:
- try:
- iam.update_login_profile(name, pwd)
- changed = True
- except boto.exception.BotoServerError:
- try:
- iam.create_login_profile(name, pwd)
- changed = True
- except boto.exception.BotoServerError as err:
- error_msg = boto_exception(str(err))
- if 'Password does not conform to the account password policy' in error_msg:
- module.fail_json(changed=False, msg="Password doesn't conform to policy")
- else:
- module.fail_json(msg=error_msg)
-
- try:
- current_keys = [ck['access_key_id'] for ck in
- iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
- status = [ck['status'] for ck in
- iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
- key_qty = len(current_keys)
- except boto.exception.BotoServerError as err:
- error_msg = boto_exception(err)
- if 'cannot be found' in error_msg and updated:
- current_keys = [ck['access_key_id'] for ck in
- iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
- status = [ck['status'] for ck in
- iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
- name = new_name
- else:
- module.fail_json(changed=False, msg=str(err))
-
- new_keys = []
- if key_state == 'create':
- try:
- while key_count > key_qty:
- new_keys.append(iam.create_access_key(
- user_name=name).create_access_key_response.create_access_key_result.access_key)
- key_qty += 1
- changed = True
-
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=False, msg=str(err))
-
- if keys and key_state:
- for access_key in keys:
- if key_state in ('active', 'inactive'):
- if access_key in current_keys:
- for current_key, current_key_state in zip(current_keys, status):
- if key_state != current_key_state.lower():
- try:
- iam.update_access_key(access_key, key_state.capitalize(), user_name=name)
- changed = True
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=False, msg=str(err))
- else:
- module.fail_json(msg="Supplied keys not found for %s. "
- "Current keys: %s. "
- "Supplied key(s): %s" %
- (name, current_keys, keys)
- )
-
- if key_state == 'remove':
- if access_key in current_keys:
- try:
- iam.delete_access_key(access_key, user_name=name)
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=False, msg=str(err))
- else:
- changed = True
-
- try:
- final_keys, final_key_status = \
- [ck['access_key_id'] for ck in
- iam.get_all_access_keys(name).
- list_access_keys_result.
- access_key_metadata],\
- [ck['status'] for ck in
- iam.get_all_access_keys(name).
- list_access_keys_result.
- access_key_metadata]
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=changed, msg=str(err))
-
- for fk, fks in zip(final_keys, final_key_status):
- updated_key_list.update({fk: fks})
-
- return name_change, updated_key_list, changed, new_keys
-
-
-def set_users_groups(module, iam, name, groups, updated=None,
- new_name=None):
- """ Sets groups for a user, will purge groups not explicitly passed, while
- retaining pre-existing groups that also are in the new list.
- """
- changed = False
-
- if updated:
- name = new_name
-
- try:
- orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user(
- name).list_groups_for_user_result.groups]
- remove_groups = [
- rg for rg in frozenset(orig_users_groups).difference(groups)]
- new_groups = [
- ng for ng in frozenset(groups).difference(orig_users_groups)]
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=changed, msg=str(err))
- else:
- if len(orig_users_groups) > 0:
- for new in new_groups:
- iam.add_user_to_group(new, name)
- for rm in remove_groups:
- iam.remove_user_from_group(rm, name)
- else:
- for group in groups:
- try:
- iam.add_user_to_group(group, name)
- except boto.exception.BotoServerError as err:
- error_msg = boto_exception(err)
- if ('The group with name %s cannot be found.' % group) in error_msg:
- module.fail_json(changed=False, msg="Group %s doesn't exist" % group)
-
- if len(remove_groups) > 0 or len(new_groups) > 0:
- changed = True
-
- return (groups, changed)
-
-
-def create_group(module=None, iam=None, name=None, path=None):
- changed = False
- try:
- iam.create_group(
- name, path).create_group_response.create_group_result.group
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=changed, msg=str(err))
- else:
- changed = True
- return name, changed
-
-
-def delete_group(module=None, iam=None, name=None):
- changed = False
- try:
- iam.delete_group(name)
- except boto.exception.BotoServerError as err:
- error_msg = boto_exception(err)
- if ('must delete policies first') in error_msg:
- for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names:
- iam.delete_group_policy(name, policy)
- try:
- iam.delete_group(name)
- except boto.exception.BotoServerError as err:
- error_msg = boto_exception(err)
- if ('must delete policies first') in error_msg:
- module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
- "that %s has Managed Polices. This is not "
- "currently supported by boto. Please detach the policies "
- "through the console and try again." % name)
- else:
- module.fail_json(changed=changed, msg=str(error_msg))
- else:
- changed = True
- else:
- module.fail_json(changed=changed, msg=str(error_msg))
- else:
- changed = True
- return changed, name
-
-
-def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
- changed = False
- try:
- current_group_path = iam.get_group(
- name).get_group_response.get_group_result.group['path']
- if new_path:
- if current_group_path != new_path:
- iam.update_group(name, new_path=new_path)
- changed = True
- if new_name:
- if name != new_name:
- iam.update_group(name, new_group_name=new_name, new_path=new_path)
- changed = True
- name = new_name
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=changed, msg=str(err))
-
- return changed, name, new_path, current_group_path
-
-
-def create_role(module, iam, name, path, role_list, prof_list, trust_policy_doc):
- changed = False
- iam_role_result = None
- instance_profile_result = None
- try:
- if name not in role_list:
- changed = True
- iam_role_result = iam.create_role(name,
- assume_role_policy_document=trust_policy_doc,
- path=path).create_role_response.create_role_result.role
-
- if name not in prof_list:
- instance_profile_result = iam.create_instance_profile(name, path=path) \
- .create_instance_profile_response.create_instance_profile_result.instance_profile
- iam.add_role_to_instance_profile(name, name)
- else:
- instance_profile_result = iam.get_instance_profile(name).get_instance_profile_response.get_instance_profile_result.instance_profile
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=changed, msg=str(err))
- else:
- updated_role_list = list_all_roles(iam)
- iam_role_result = iam.get_role(name).get_role_response.get_role_result.role
- return changed, updated_role_list, iam_role_result, instance_profile_result
-
-
-def delete_role(module, iam, name, role_list, prof_list):
- changed = False
- iam_role_result = None
- instance_profile_result = None
- try:
- if name in role_list:
- cur_ins_prof = [rp['instance_profile_name'] for rp in
- iam.list_instance_profiles_for_role(name).
- list_instance_profiles_for_role_result.
- instance_profiles]
- for profile in cur_ins_prof:
- iam.remove_role_from_instance_profile(profile, name)
- try:
- iam.delete_role(name)
- except boto.exception.BotoServerError as err:
- error_msg = boto_exception(err)
- if ('must detach all policies first') in error_msg:
- for policy in iam.list_role_policies(name).list_role_policies_result.policy_names:
- iam.delete_role_policy(name, policy)
- try:
- iam_role_result = iam.delete_role(name)
- except boto.exception.BotoServerError as err:
- error_msg = boto_exception(err)
- if ('must detach all policies first') in error_msg:
- module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
- "that %s has Managed Polices. This is not "
- "currently supported by boto. Please detach the policies "
- "through the console and try again." % name)
- else:
- module.fail_json(changed=changed, msg=str(err))
- else:
- changed = True
-
- else:
- changed = True
-
- for prof in prof_list:
- if name == prof:
- instance_profile_result = iam.delete_instance_profile(name)
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=changed, msg=str(err))
- else:
- updated_role_list = list_all_roles(iam)
- return changed, updated_role_list, iam_role_result, instance_profile_result
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- iam_type=dict(required=True, choices=['user', 'group', 'role']),
- groups=dict(type='list', default=None, required=False),
- state=dict(required=True, choices=['present', 'absent', 'update']),
- password=dict(default=None, required=False, no_log=True),
- update_password=dict(default='always', required=False, choices=['always', 'on_create']),
- access_key_state=dict(default=None, required=False, choices=[
- 'active', 'inactive', 'create', 'remove',
- 'Active', 'Inactive', 'Create', 'Remove']),
- access_key_ids=dict(type='list', default=None, required=False),
- key_count=dict(type='int', default=1, required=False),
- name=dict(required=True),
- trust_policy_filepath=dict(default=None, required=False),
- trust_policy=dict(type='dict', default=None, required=False),
- new_name=dict(default=None, required=False),
- path=dict(default='/', required=False),
- new_path=dict(default=None, required=False)
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- mutually_exclusive=[['trust_policy', 'trust_policy_filepath']],
- )
-
- if not HAS_BOTO:
- module.fail_json(msg='This module requires boto, please install it')
-
- state = module.params.get('state').lower()
- iam_type = module.params.get('iam_type').lower()
- groups = module.params.get('groups')
- name = module.params.get('name')
- new_name = module.params.get('new_name')
- password = module.params.get('password')
- update_pw = module.params.get('update_password')
- path = module.params.get('path')
- new_path = module.params.get('new_path')
- key_count = module.params.get('key_count')
- key_state = module.params.get('access_key_state')
- trust_policy = module.params.get('trust_policy')
- trust_policy_filepath = module.params.get('trust_policy_filepath')
- key_ids = module.params.get('access_key_ids')
-
- if key_state:
- key_state = key_state.lower()
- if any([n in key_state for n in ['active', 'inactive']]) and not key_ids:
- module.fail_json(changed=False, msg="At least one access key has to be defined in order"
- " to use 'active' or 'inactive'")
-
- if iam_type == 'user' and module.params.get('password') is not None:
- pwd = module.params.get('password')
- elif iam_type != 'user' and module.params.get('password') is not None:
- module.fail_json(msg="a password is being specified when the iam_type "
- "is not user. Check parameters")
- else:
- pwd = None
-
- if iam_type != 'user' and (module.params.get('access_key_state') is not None or
- module.params.get('access_key_id') is not None):
- module.fail_json(msg="the IAM type must be user, when IAM access keys "
- "are being modified. Check parameters")
-
- if iam_type == 'role' and state == 'update':
- module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
- "please specify present or absent")
-
- # check if trust_policy is present -- it can be inline JSON or a file path to a JSON file
- if trust_policy_filepath:
- try:
- with open(trust_policy_filepath, 'r') as json_data:
- trust_policy_doc = json.dumps(json.load(json_data))
- except Exception as e:
- module.fail_json(msg=str(e) + ': ' + trust_policy_filepath)
- elif trust_policy:
- try:
- trust_policy_doc = json.dumps(trust_policy)
- except Exception as e:
- module.fail_json(msg=str(e) + ': ' + trust_policy)
- else:
- trust_policy_doc = None
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
-
- try:
- if region:
- iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
- else:
- iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
- except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg=str(e))
-
- result = {}
- changed = False
-
- try:
- orig_group_list = list_all_groups(iam)
-
- orig_user_list = list_all_users(iam)
-
- orig_role_list = list_all_roles(iam)
-
- orig_prof_list = list_all_instance_profiles(iam)
- except boto.exception.BotoServerError as err:
- module.fail_json(msg=err.message)
-
- if iam_type == 'user':
- been_updated = False
- user_groups = None
- user_exists = any([n in [name, new_name] for n in orig_user_list])
- if user_exists:
- current_path = iam.get_user(name).get_user_result.user['path']
- if not new_path and current_path != path:
- new_path = path
- path = current_path
-
- if state == 'present' and not user_exists and not new_name:
- (meta, changed) = create_user(
- module, iam, name, password, path, key_state, key_count)
- keys = iam.get_all_access_keys(name).list_access_keys_result.\
- access_key_metadata
- if groups:
- (user_groups, changed) = set_users_groups(
- module, iam, name, groups, been_updated, new_name)
- module.exit_json(
- user_meta=meta, groups=user_groups, keys=keys, changed=changed)
-
- elif state in ['present', 'update'] and user_exists:
- if update_pw == 'on_create':
- password = None
- if name not in orig_user_list and new_name in orig_user_list:
- been_updated = True
- name_change, key_list, user_changed, new_key = update_user(
- module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated)
- if new_key:
- user_meta = {'access_keys': list(new_key)}
- user_meta['access_keys'].extend(
- [{'access_key_id': key, 'status': value} for key, value in key_list.items() if
- key not in [it['access_key_id'] for it in new_key]])
- else:
- user_meta = {
- 'access_keys': [{'access_key_id': key, 'status': value} for key, value in key_list.items()]}
-
- if name_change and new_name:
- orig_name = name
- name = new_name
- if isinstance(groups, list):
- user_groups, groups_changed = set_users_groups(
- module, iam, name, groups, been_updated, new_name)
- if groups_changed == user_changed:
- changed = groups_changed
- else:
- changed = True
- else:
- changed = user_changed
- if new_name and new_path:
- module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name,
- new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list,
- created_keys=new_key, user_meta=user_meta)
- elif new_name and not new_path and not been_updated:
- module.exit_json(
- changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list,
- created_keys=new_key, user_meta=user_meta)
- elif new_name and not new_path and been_updated:
- module.exit_json(
- changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state,
- created_keys=new_key, user_meta=user_meta)
- elif not new_name and new_path:
- module.exit_json(
- changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path,
- keys=key_list, created_keys=new_key, user_meta=user_meta)
- else:
- module.exit_json(
- changed=changed, groups=user_groups, user_name=name, keys=key_list, created_keys=new_key,
- user_meta=user_meta)
-
- elif state == 'update' and not user_exists:
- module.fail_json(
- msg="The user %s does not exist. No update made." % name)
-
- elif state == 'absent':
- if user_exists:
- try:
- set_users_groups(module, iam, name, '')
- name, changed = delete_user(module, iam, name)
- module.exit_json(deleted_user=name, changed=changed)
-
- except Exception as ex:
- module.fail_json(changed=changed, msg=str(ex))
- else:
- module.exit_json(
- changed=False, msg="User %s is already absent from your AWS IAM users" % name)
-
- elif iam_type == 'group':
- group_exists = name in orig_group_list
-
- if state == 'present' and not group_exists:
- new_group, changed = create_group(module=module, iam=iam, name=name, path=path)
- module.exit_json(changed=changed, group_name=new_group)
- elif state in ['present', 'update'] and group_exists:
- changed, updated_name, updated_path, cur_path = update_group(
- module=module, iam=iam, name=name, new_name=new_name,
- new_path=new_path)
-
- if new_path and new_name:
- module.exit_json(changed=changed, old_group_name=name,
- new_group_name=updated_name, old_path=cur_path,
- new_group_path=updated_path)
-
- if new_path and not new_name:
- module.exit_json(changed=changed, group_name=name,
- old_path=cur_path,
- new_group_path=updated_path)
-
- if not new_path and new_name:
- module.exit_json(changed=changed, old_group_name=name,
- new_group_name=updated_name, group_path=cur_path)
-
- if not new_path and not new_name:
- module.exit_json(
- changed=changed, group_name=name, group_path=cur_path)
-
- elif state == 'update' and not group_exists:
- module.fail_json(
- changed=changed, msg="Update Failed. Group %s doesn't seem to exist!" % name)
-
- elif state == 'absent':
- if name in orig_group_list:
- removed_group, changed = delete_group(module=module, iam=iam, name=name)
- module.exit_json(changed=changed, delete_group=removed_group)
- else:
- module.exit_json(changed=changed, msg="Group already absent")
-
- elif iam_type == 'role':
- role_list = []
- if state == 'present':
- changed, role_list, role_result, instance_profile_result = create_role(
- module, iam, name, path, orig_role_list, orig_prof_list, trust_policy_doc)
- elif state == 'absent':
- changed, role_list, role_result, instance_profile_result = delete_role(
- module, iam, name, orig_role_list, orig_prof_list)
- elif state == 'update':
- module.fail_json(
- changed=False, msg='Role update not currently supported by boto.')
- module.exit_json(changed=changed, roles=role_list, role_result=role_result,
- instance_profile_result=instance_profile_result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_cert.py b/lib/ansible/modules/cloud/amazon/iam_cert.py
deleted file mode 100644
index ecdab0ae3e..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_cert.py
+++ /dev/null
@@ -1,315 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: iam_cert
-short_description: Manage server certificates for use on ELBs and CloudFront
-description:
- - Allows for the management of server certificates.
-version_added: "2.0"
-options:
- name:
- description:
- - Name of certificate to add, update or remove.
- required: true
- type: str
- new_name:
- description:
- - When state is present, this will update the name of the cert.
- - The cert, key and cert_chain parameters will be ignored if this is defined.
- type: str
- new_path:
- description:
- - When state is present, this will update the path of the cert.
- - The I(cert), I(key) and I(cert_chain) parameters will be ignored if this is defined.
- type: str
- state:
- description:
- - Whether to create(or update) or delete the certificate.
- - If I(new_path) or I(new_name) is defined, specifying present will attempt to make an update these.
- required: true
- choices: [ "present", "absent" ]
- type: str
- path:
- description:
- - When creating or updating, specify the desired path of the certificate.
- default: "/"
- type: str
- cert_chain:
- description:
- - The path to, or content of, the CA certificate chain in PEM encoded format.
- As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content.
- type: str
- cert:
- description:
- - The path to, or content of the certificate body in PEM encoded format.
- As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content.
- type: str
- key:
- description:
- - The path to, or content of the private key in PEM encoded format.
- As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content.
- type: str
- dup_ok:
- description:
- - By default the module will not upload a certificate that is already uploaded into AWS.
- - If I(dup_ok=True), it will upload the certificate as long as the name is unique.
- default: False
- type: bool
-
-requirements: [ "boto" ]
-author: Jonathan I. Davila (@defionscode)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Basic server certificate upload from local file
-- iam_cert:
- name: very_ssl
- state: present
- cert: "{{ lookup('file', 'path/to/cert') }}"
- key: "{{ lookup('file', 'path/to/key') }}"
- cert_chain: "{{ lookup('file', 'path/to/certchain') }}"
-
-# Basic server certificate upload
-- iam_cert:
- name: very_ssl
- state: present
- cert: path/to/cert
- key: path/to/key
- cert_chain: path/to/certchain
-
-# Server certificate upload using key string
-- iam_cert:
- name: very_ssl
- state: present
- path: "/a/cert/path/"
- cert: body_of_somecert
- key: vault_body_of_privcertkey
- cert_chain: body_of_myverytrustedchain
-
-# Basic rename of existing certificate
-- iam_cert:
- name: very_ssl
- new_name: new_very_ssl
- state: present
-
-'''
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, connect_to_aws
-import os
-
-try:
- import boto
- import boto.iam
- import boto.ec2
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-
-def cert_meta(iam, name):
- certificate = iam.get_server_certificate(name).get_server_certificate_result.server_certificate
- ocert = certificate.certificate_body
- opath = certificate.server_certificate_metadata.path
- ocert_id = certificate.server_certificate_metadata.server_certificate_id
- upload_date = certificate.server_certificate_metadata.upload_date
- exp = certificate.server_certificate_metadata.expiration
- arn = certificate.server_certificate_metadata.arn
- return opath, ocert, ocert_id, upload_date, exp, arn
-
-
-def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):
- update = False
-
- # IAM cert names are case insensitive
- names_lower = [n.lower() for n in [name, new_name] if n is not None]
- orig_cert_names_lower = [ocn.lower() for ocn in orig_cert_names]
-
- if any(ct in orig_cert_names_lower for ct in names_lower):
- for i_name in names_lower:
- if cert is not None:
- try:
- c_index = orig_cert_names_lower.index(i_name)
- except NameError:
- continue
- else:
- # NOTE: remove the carriage return to strictly compare the cert bodies.
- slug_cert = cert.replace('\r', '')
- slug_orig_cert_bodies = orig_cert_bodies[c_index].replace('\r', '')
- if slug_orig_cert_bodies == slug_cert:
- update = True
- break
- elif slug_cert.startswith(slug_orig_cert_bodies):
- update = True
- break
- else:
- module.fail_json(changed=False, msg='A cert with the name %s already exists and'
- ' has a different certificate body associated'
- ' with it. Certificates cannot have the same name' % orig_cert_names[c_index])
- else:
- update = True
- break
- elif cert in orig_cert_bodies and not dup_ok:
- for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):
- if crt_body == cert:
- module.fail_json(changed=False, msg='This certificate already'
- ' exists under the name %s' % crt_name)
-
- return update
-
-
-def cert_action(module, iam, name, cpath, new_name, new_path, state,
- cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok):
- if state == 'present':
- update = dup_check(module, iam, name, new_name, cert, orig_cert_names,
- orig_cert_bodies, dup_ok)
- if update:
- opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name)
- changed = True
- if new_name and new_path:
- iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)
- module.exit_json(changed=changed, original_name=name, new_name=new_name,
- original_path=opath, new_path=new_path, cert_body=ocert,
- upload_date=upload_date, expiration_date=exp, arn=arn)
- elif new_name and not new_path:
- iam.update_server_cert(name, new_cert_name=new_name)
- module.exit_json(changed=changed, original_name=name, new_name=new_name,
- cert_path=opath, cert_body=ocert,
- upload_date=upload_date, expiration_date=exp, arn=arn)
- elif not new_name and new_path:
- iam.update_server_cert(name, new_path=new_path)
- module.exit_json(changed=changed, name=new_name,
- original_path=opath, new_path=new_path, cert_body=ocert,
- upload_date=upload_date, expiration_date=exp, arn=arn)
- else:
- changed = False
- module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
- upload_date=upload_date, expiration_date=exp, arn=arn,
- msg='No new path or name specified. No changes made')
- else:
- changed = True
- iam.upload_server_cert(name, cert, key, cert_chain=cert_chain, path=cpath)
- opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name)
- module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
- upload_date=upload_date, expiration_date=exp, arn=arn)
- elif state == 'absent':
- if name in orig_cert_names:
- changed = True
- iam.delete_server_cert(name)
- module.exit_json(changed=changed, deleted_cert=name)
- else:
- changed = False
- module.exit_json(changed=changed, msg='Certificate with the name %s already absent' % name)
-
-
-def load_data(cert, key, cert_chain):
- # if paths are provided rather than lookups read the files and return the contents
- if cert and os.path.isfile(cert):
- with open(cert, 'r') as cert_fh:
- cert = cert_fh.read().rstrip()
- if key and os.path.isfile(key):
- with open(key, 'r') as key_fh:
- key = key_fh.read().rstrip()
- if cert_chain and os.path.isfile(cert_chain):
- with open(cert_chain, 'r') as cert_chain_fh:
- cert_chain = cert_chain_fh.read()
- return cert, key, cert_chain
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(required=True, choices=['present', 'absent']),
- name=dict(required=True),
- cert=dict(),
- key=dict(no_log=True),
- cert_chain=dict(),
- new_name=dict(),
- path=dict(default='/'),
- new_path=dict(),
- dup_ok=dict(type='bool')
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- mutually_exclusive=[
- ['new_path', 'key'],
- ['new_path', 'cert'],
- ['new_path', 'cert_chain'],
- ['new_name', 'key'],
- ['new_name', 'cert'],
- ['new_name', 'cert_chain'],
- ],
- )
-
- if not HAS_BOTO:
- module.fail_json(msg="Boto is required for this module")
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
-
- try:
- if region:
- iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
- else:
- iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
- except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg=str(e))
-
- state = module.params.get('state')
- name = module.params.get('name')
- path = module.params.get('path')
- new_name = module.params.get('new_name')
- new_path = module.params.get('new_path')
- dup_ok = module.params.get('dup_ok')
- if state == 'present' and not new_name and not new_path:
- cert, key, cert_chain = load_data(cert=module.params.get('cert'),
- key=module.params.get('key'),
- cert_chain=module.params.get('cert_chain'))
- else:
- cert = key = cert_chain = None
-
- orig_cert_names = [ctb['server_certificate_name'] for ctb in
- iam.get_all_server_certs().list_server_certificates_result.server_certificate_metadata_list]
- orig_cert_bodies = [iam.get_server_certificate(thing).get_server_certificate_result.certificate_body
- for thing in orig_cert_names]
- if new_name == name:
- new_name = None
- if new_path == path:
- new_path = None
-
- changed = False
- try:
- cert_action(module, iam, name, path, new_name, new_path, state,
- cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok)
- except boto.exception.BotoServerError as err:
- module.fail_json(changed=changed, msg=str(err), debug=[cert, key])
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_group.py b/lib/ansible/modules/cloud/amazon/iam_group.py
deleted file mode 100644
index 68327e68fb..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_group.py
+++ /dev/null
@@ -1,439 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: iam_group
-short_description: Manage AWS IAM groups
-description:
- - Manage AWS IAM groups.
-version_added: "2.4"
-author:
-- Nick Aslanidis (@naslanidis)
-- Maksym Postument (@infectsoldier)
-options:
- name:
- description:
- - The name of the group to create.
- required: true
- type: str
- managed_policies:
- description:
- - A list of managed policy ARNs or friendly names to attach to the role.
- - To embed an inline policy, use M(iam_policy).
- required: false
- type: list
- elements: str
- aliases: ['managed_policy']
- users:
- description:
- - A list of existing users to add as members of the group.
- required: false
- type: list
- elements: str
- state:
- description:
- - Create or remove the IAM group.
- required: true
- choices: [ 'present', 'absent' ]
- type: str
- purge_policies:
- description:
- - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
- required: false
- default: false
- type: bool
- aliases: ['purge_policy', 'purge_managed_policies']
- purge_users:
- description:
- - When I(purge_users=true) users which are not included in I(users) will be detached.
- required: false
- default: false
- type: bool
-requirements: [ botocore, boto3 ]
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Create a group
-- iam_group:
- name: testgroup1
- state: present
-
-# Create a group and attach a managed policy using its ARN
-- iam_group:
- name: testgroup1
- managed_policies:
- - arn:aws:iam::aws:policy/AmazonSNSFullAccess
- state: present
-
-# Create a group with users as members and attach a managed policy using its ARN
-- iam_group:
- name: testgroup1
- managed_policies:
- - arn:aws:iam::aws:policy/AmazonSNSFullAccess
- users:
- - test_user1
- - test_user2
- state: present
-
-# Remove all managed policies from an existing group with an empty list
-- iam_group:
- name: testgroup1
- state: present
- purge_policies: true
-
-# Remove all group members from an existing group
-- iam_group:
- name: testgroup1
- managed_policies:
- - arn:aws:iam::aws:policy/AmazonSNSFullAccess
- purge_users: true
- state: present
-
-
-# Delete the group
-- iam_group:
- name: testgroup1
- state: absent
-
-'''
-RETURN = '''
-iam_group:
- description: dictionary containing all the group information including group membership
- returned: success
- type: complex
- contains:
- group:
- description: dictionary containing all the group information
- returned: success
- type: complex
- contains:
- arn:
- description: the Amazon Resource Name (ARN) specifying the group
- type: str
- sample: "arn:aws:iam::1234567890:group/testgroup1"
- create_date:
- description: the date and time, in ISO 8601 date-time format, when the group was created
- type: str
- sample: "2017-02-08T04:36:28+00:00"
- group_id:
- description: the stable and unique string identifying the group
- type: str
- sample: AGPAIDBWE12NSFINE55TM
- group_name:
- description: the friendly name that identifies the group
- type: str
- sample: testgroup1
- path:
- description: the path to the group
- type: str
- sample: /
- users:
- description: list containing all the group members
- returned: success
- type: complex
- contains:
- arn:
- description: the Amazon Resource Name (ARN) specifying the user
- type: str
- sample: "arn:aws:iam::1234567890:user/test_user1"
- create_date:
- description: the date and time, in ISO 8601 date-time format, when the user was created
- type: str
- sample: "2017-02-08T04:36:28+00:00"
- user_id:
- description: the stable and unique string identifying the user
- type: str
- sample: AIDAIZTPY123YQRS22YU2
- user_name:
- description: the friendly name that identifies the user
- type: str
- sample: testgroup1
- path:
- description: the path to the user
- type: str
- sample: /
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible.module_utils.ec2 import AWSRetry
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def compare_attached_group_policies(current_attached_policies, new_attached_policies):
-
- # If new_attached_policies is None it means we want to remove all policies
- if len(current_attached_policies) > 0 and new_attached_policies is None:
- return False
-
- current_attached_policies_arn_list = []
- for policy in current_attached_policies:
- current_attached_policies_arn_list.append(policy['PolicyArn'])
-
- if set(current_attached_policies_arn_list) == set(new_attached_policies):
- return True
- else:
- return False
-
-
-def compare_group_members(current_group_members, new_group_members):
-
- # If new_attached_policies is None it means we want to remove all policies
- if len(current_group_members) > 0 and new_group_members is None:
- return False
- if set(current_group_members) == set(new_group_members):
- return True
- else:
- return False
-
-
-def convert_friendly_names_to_arns(connection, module, policy_names):
-
- if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]):
- return policy_names
- allpolicies = {}
- paginator = connection.get_paginator('list_policies')
- policies = paginator.paginate().build_full_result()['Policies']
-
- for policy in policies:
- allpolicies[policy['PolicyName']] = policy['Arn']
- allpolicies[policy['Arn']] = policy['Arn']
- try:
- return [allpolicies[policy] for policy in policy_names]
- except KeyError as e:
- module.fail_json(msg="Couldn't find policy: " + str(e))
-
-
-def create_or_update_group(connection, module):
-
- params = dict()
- params['GroupName'] = module.params.get('name')
- managed_policies = module.params.get('managed_policies')
- users = module.params.get('users')
- purge_users = module.params.get('purge_users')
- purge_policies = module.params.get('purge_policies')
- changed = False
- if managed_policies:
- managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
-
- # Get group
- try:
- group = get_group(connection, module, params['GroupName'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't get group")
-
- # If group is None, create it
- if group is None:
- # Check mode means we would create the group
- if module.check_mode:
- module.exit_json(changed=True)
-
- try:
- group = connection.create_group(**params)
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't create group")
-
- # Manage managed policies
- current_attached_policies = get_attached_policy_list(connection, module, params['GroupName'])
- if not compare_attached_group_policies(current_attached_policies, managed_policies):
- current_attached_policies_arn_list = []
- for policy in current_attached_policies:
- current_attached_policies_arn_list.append(policy['PolicyArn'])
-
- # If managed_policies has a single empty element we want to remove all attached policies
- if purge_policies:
- # Detach policies not present
- for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
- changed = True
- if not module.check_mode:
- try:
- connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params['GroupName'])
- # If there are policies to adjust that aren't in the current list, then things have changed
- # Otherwise the only changes were in purging above
- if set(managed_policies) - set(current_attached_policies_arn_list):
- changed = True
- # If there are policies in managed_policies attach each policy
- if managed_policies != [None] and not module.check_mode:
- for policy_arn in managed_policies:
- try:
- connection.attach_group_policy(GroupName=params['GroupName'], PolicyArn=policy_arn)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params['GroupName'])
-
- # Manage group memberships
- try:
- current_group_members = get_group(connection, module, params['GroupName'])['Users']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
-
- current_group_members_list = []
- for member in current_group_members:
- current_group_members_list.append(member['UserName'])
-
- if not compare_group_members(current_group_members_list, users):
-
- if purge_users:
- for user in list(set(current_group_members_list) - set(users)):
- # Ensure we mark things have changed if any user gets purged
- changed = True
- # Skip actions for check mode
- if not module.check_mode:
- try:
- connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't remove user %s from group %s" % (user, params['GroupName']))
- # If there are users to adjust that aren't in the current list, then things have changed
- # Otherwise the only changes were in purging above
- if set(users) - set(current_group_members_list):
- changed = True
- # Skip actions for check mode
- if users != [None] and not module.check_mode:
- for user in users:
- try:
- connection.add_user_to_group(GroupName=params['GroupName'], UserName=user)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params['GroupName']))
- if module.check_mode:
- module.exit_json(changed=changed)
-
- # Get the group again
- try:
- group = get_group(connection, module, params['GroupName'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
-
- module.exit_json(changed=changed, iam_group=camel_dict_to_snake_dict(group))
-
-
-def destroy_group(connection, module):
-
- params = dict()
- params['GroupName'] = module.params.get('name')
-
- try:
- group = get_group(connection, module, params['GroupName'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
- if group:
- # Check mode means we would remove this group
- if module.check_mode:
- module.exit_json(changed=True)
-
- # Remove any attached policies otherwise deletion fails
- try:
- for policy in get_attached_policy_list(connection, module, params['GroupName']):
- connection.detach_group_policy(GroupName=params['GroupName'], PolicyArn=policy['PolicyArn'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params['GroupName'])
-
- # Remove any users in the group otherwise deletion fails
- current_group_members_list = []
- try:
- current_group_members = get_group(connection, module, params['GroupName'])['Users']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, "Couldn't get group %s" % params['GroupName'])
- for member in current_group_members:
- current_group_members_list.append(member['UserName'])
- for user in current_group_members_list:
- try:
- connection.remove_user_from_group(GroupName=params['GroupName'], UserName=user)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params['GroupName']))
-
- try:
- connection.delete_group(**params)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, "Couldn't delete group %s" % params['GroupName'])
-
- else:
- module.exit_json(changed=False)
-
- module.exit_json(changed=True)
-
-
-@AWSRetry.exponential_backoff()
-def get_group(connection, module, name):
- try:
- paginator = connection.get_paginator('get_group')
- return paginator.paginate(GroupName=name).build_full_result()
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchEntity':
- return None
- else:
- raise
-
-
-@AWSRetry.exponential_backoff()
-def get_attached_policy_list(connection, module, name):
-
- try:
- paginator = connection.get_paginator('list_attached_group_policies')
- return paginator.paginate(GroupName=name).build_full_result()['AttachedPolicies']
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchEntity':
- return None
- else:
- raise
-
-
-def main():
-
- argument_spec = dict(
- name=dict(required=True),
- managed_policies=dict(default=[], type='list', aliases=['managed_policy']),
- users=dict(default=[], type='list'),
- state=dict(choices=['present', 'absent'], required=True),
- purge_users=dict(default=False, type='bool'),
- purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies'])
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- connection = module.client('iam')
-
- state = module.params.get("state")
-
- if state == 'present':
- create_or_update_group(connection, module)
- else:
- destroy_group(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_managed_policy.py b/lib/ansible/modules/cloud/amazon/iam_managed_policy.py
deleted file mode 100644
index e13c2bb6e1..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_managed_policy.py
+++ /dev/null
@@ -1,384 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: iam_managed_policy
-short_description: Manage User Managed IAM policies
-description:
- - Allows creating and removing managed IAM policies
-version_added: "2.4"
-options:
- policy_name:
- description:
- - The name of the managed policy.
- required: True
- type: str
- policy_description:
- description:
- - A helpful description of this policy, this value is immutable and only set when creating a new policy.
- default: ''
- type: str
- policy:
- description:
- - A properly json formatted policy
- type: json
- make_default:
- description:
- - Make this revision the default revision.
- default: True
- type: bool
- only_version:
- description:
- - Remove all other non default revisions, if this is used with C(make_default) it will result in all other versions of this policy being deleted.
- type: bool
- default: false
- state:
- description:
- - Should this managed policy be present or absent. Set to absent to detach all entities from this policy and remove it if found.
- default: present
- choices: [ "present", "absent" ]
- type: str
- fail_on_delete:
- description:
- - The I(fail_on_delete) option does nothing and will be removed in Ansible 2.14.
- type: bool
-
-author: "Dan Kozlowski (@dkhenry)"
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - boto3
- - botocore
-'''
-
-EXAMPLES = '''
-# Create Policy ex nihilo
-- name: Create IAM Managed Policy
- iam_managed_policy:
- policy_name: "ManagedPolicy"
- policy_description: "A Helpful managed policy"
- policy: "{{ lookup('template', 'managed_policy.json.j2') }}"
- state: present
-
-# Update a policy with a new default version
-- name: Create IAM Managed Policy
- iam_managed_policy:
- policy_name: "ManagedPolicy"
- policy: "{{ lookup('file', 'managed_policy_update.json') }}"
- state: present
-
-# Update a policy with a new non default version
-- name: Create IAM Managed Policy
- iam_managed_policy:
- policy_name: "ManagedPolicy"
- policy: "{{ lookup('file', 'managed_policy_update.json') }}"
- make_default: false
- state: present
-
-# Update a policy and make it the only version and the default version
-- name: Create IAM Managed Policy
- iam_managed_policy:
- policy_name: "ManagedPolicy"
- policy: "{ 'Version': '2012-10-17', 'Statement':[{'Effect': 'Allow','Action': '*','Resource': '*'}]}"
- only_version: true
- state: present
-
-# Remove a policy
-- name: Create IAM Managed Policy
- iam_managed_policy:
- policy_name: "ManagedPolicy"
- state: absent
-'''
-
-RETURN = '''
-policy:
- description: Returns the policy json structure, when state == absent this will return the value of the removed policy.
- returned: success
- type: str
- sample: '{
- "arn": "arn:aws:iam::aws:policy/AdministratorAccess "
- "attachment_count": 0,
- "create_date": "2017-03-01T15:42:55.981000+00:00",
- "default_version_id": "v1",
- "is_attachable": true,
- "path": "/",
- "policy_id": "ANPALM4KLDMTFXGOOJIHL",
- "policy_name": "AdministratorAccess",
- "update_date": "2017-03-01T15:42:55.981000+00:00"
- }'
-'''
-
-import json
-import traceback
-
-try:
- import botocore
-except ImportError:
- pass # caught by imported HAS_BOTO3
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (boto3_conn, get_aws_connection_info, ec2_argument_spec, AWSRetry,
- camel_dict_to_snake_dict, HAS_BOTO3, compare_policies)
-from ansible.module_utils._text import to_native
-
-
-@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
-def list_policies_with_backoff(iam):
- paginator = iam.get_paginator('list_policies')
- return paginator.paginate(Scope='Local').build_full_result()
-
-
-def get_policy_by_name(module, iam, name):
- try:
- response = list_policies_with_backoff(iam)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't list policies: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- for policy in response['Policies']:
- if policy['PolicyName'] == name:
- return policy
- return None
-
-
-def delete_oldest_non_default_version(module, iam, policy):
- try:
- versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions']
- if not v['IsDefaultVersion']]
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't list policy versions: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- versions.sort(key=lambda v: v['CreateDate'], reverse=True)
- for v in versions[-1:]:
- try:
- iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't delete policy version: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
-
-# This needs to return policy_version, changed
-def get_or_create_policy_version(module, iam, policy, policy_document):
- try:
- versions = iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions']
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't list policy versions: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- for v in versions:
- try:
- document = iam.get_policy_version(PolicyArn=policy['Arn'],
- VersionId=v['VersionId'])['PolicyVersion']['Document']
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't get policy version %s: %s" % (v['VersionId'], str(e)),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- # If the current policy matches the existing one
- if not compare_policies(document, json.loads(to_native(policy_document))):
- return v, False
-
- # No existing version so create one
- # There is a service limit (typically 5) of policy versions.
- #
- # Rather than assume that it is 5, we'll try to create the policy
- # and if that doesn't work, delete the oldest non default policy version
- # and try again.
- try:
- version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion']
- return version, True
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'LimitExceeded':
- delete_oldest_non_default_version(module, iam, policy)
- try:
- version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion']
- return version, True
- except botocore.exceptions.ClientError as second_e:
- e = second_e
- # Handle both when the exception isn't LimitExceeded or
- # the second attempt still failed
- module.fail_json(msg="Couldn't create policy version: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
-
-def set_if_default(module, iam, policy, policy_version, is_default):
- if is_default and not policy_version['IsDefaultVersion']:
- try:
- iam.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId'])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't set default policy version: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- return True
- return False
-
-
-def set_if_only(module, iam, policy, policy_version, is_only):
- if is_only:
- try:
- versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])[
- 'Versions'] if not v['IsDefaultVersion']]
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't list policy versions: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- for v in versions:
- try:
- iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't delete policy version: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- return len(versions) > 0
- return False
-
-
-def detach_all_entities(module, iam, policy, **kwargs):
- try:
- entities = iam.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't detach list entities for policy %s: %s" % (policy['PolicyName'], str(e)),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- for g in entities['PolicyGroups']:
- try:
- iam.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName'])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't detach group policy %s: %s" % (g['GroupName'], str(e)),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- for u in entities['PolicyUsers']:
- try:
- iam.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName'])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't detach user policy %s: %s" % (u['UserName'], str(e)),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- for r in entities['PolicyRoles']:
- try:
- iam.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName'])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't detach role policy %s: %s" % (r['RoleName'], str(e)),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- if entities['IsTruncated']:
- detach_all_entities(module, iam, policy, marker=entities['Marker'])
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- policy_name=dict(required=True),
- policy_description=dict(default=''),
- policy=dict(type='json'),
- make_default=dict(type='bool', default=True),
- only_version=dict(type='bool', default=False),
- fail_on_delete=dict(type='bool', removed_in_version='2.14'),
- state=dict(default='present', choices=['present', 'absent']),
- ))
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_if=[['state', 'present', ['policy']]]
- )
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required for this module')
-
- name = module.params.get('policy_name')
- description = module.params.get('policy_description')
- state = module.params.get('state')
- default = module.params.get('make_default')
- only = module.params.get('only_version')
-
- policy = None
-
- if module.params.get('policy') is not None:
- policy = json.dumps(json.loads(module.params.get('policy')))
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- iam = boto3_conn(module, conn_type='client', resource='iam',
- region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e:
- module.fail_json(msg="Can't authorize connection. Check your credentials and profile.",
- exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
-
- p = get_policy_by_name(module, iam, name)
- if state == 'present':
- if p is None:
- # No Policy so just create one
- try:
- rvalue = iam.create_policy(PolicyName=name, Path='/',
- PolicyDocument=policy, Description=description)
- except Exception as e:
- module.fail_json(msg="Couldn't create policy %s: %s" % (name, to_native(e)),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy']))
- else:
- policy_version, changed = get_or_create_policy_version(module, iam, p, policy)
- changed = set_if_default(module, iam, p, policy_version, default) or changed
- changed = set_if_only(module, iam, p, policy_version, only) or changed
- # If anything has changed we needto refresh the policy
- if changed:
- try:
- p = iam.get_policy(PolicyArn=p['Arn'])['Policy']
- except Exception as e:
- module.fail_json(msg="Couldn't get policy: %s" % to_native(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
- module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(p))
- else:
- # Check for existing policy
- if p:
- # Detach policy
- detach_all_entities(module, iam, p)
- # Delete Versions
- try:
- versions = iam.list_policy_versions(PolicyArn=p['Arn'])['Versions']
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't list policy versions: %s" % to_native(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- for v in versions:
- if not v['IsDefaultVersion']:
- try:
- iam.delete_policy_version(PolicyArn=p['Arn'], VersionId=v['VersionId'])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't delete policy version %s: %s" %
- (v['VersionId'], to_native(e)),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- # Delete policy
- try:
- iam.delete_policy(PolicyArn=p['Arn'])
- except Exception as e:
- module.fail_json(msg="Couldn't delete policy %s: %s" % (p['PolicyName'], to_native(e)),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- # This is the one case where we will return the old policy
- module.exit_json(changed=True, policy=camel_dict_to_snake_dict(p))
- else:
- module.exit_json(changed=False, policy=None)
-# end main
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_mfa_device_info.py b/lib/ansible/modules/cloud/amazon/iam_mfa_device_info.py
deleted file mode 100644
index b09da4da5e..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_mfa_device_info.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: iam_mfa_device_info
-short_description: List the MFA (Multi-Factor Authentication) devices registered for a user
-description:
- - List the MFA (Multi-Factor Authentication) devices registered for a user
- - This module was called C(iam_mfa_device_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.2"
-author: Victor Costan (@pwnall)
-options:
- user_name:
- description:
- - The name of the user whose MFA devices will be listed
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - boto3
- - botocore
-'''
-
-RETURN = """
-mfa_devices:
- description: The MFA devices registered for the given user
- returned: always
- type: list
- sample:
- - enable_date: "2016-03-11T23:25:36+00:00"
- serial_number: arn:aws:iam::085120003701:mfa/pwnall
- user_name: pwnall
- - enable_date: "2016-03-11T23:25:37+00:00"
- serial_number: arn:aws:iam::085120003702:mfa/pwnall
- user_name: pwnall
-"""
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# List MFA devices (more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html)
-- iam_mfa_device_info:
- register: mfa_devices
-
-# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
-- sts_assume_role:
- mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}"
- role_arn: "arn:aws:iam::123456789012:role/someRole"
- role_session_name: "someRoleSession"
- register: assumed_role
-'''
-
-try:
- import boto3
- from botocore.exceptions import ClientError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
- get_aws_connection_info)
-
-
-def list_mfa_devices(connection, module):
- user_name = module.params.get('user_name')
- changed = False
-
- args = {}
- if user_name is not None:
- args['UserName'] = user_name
- try:
- response = connection.list_mfa_devices(**args)
- except ClientError as e:
- module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
-
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- user_name=dict(required=False, default=None)
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec)
- if module._name == 'iam_mfa_device_facts':
- module.deprecate("The 'iam_mfa_device_facts' module has been renamed to 'iam_mfa_device_info'", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- if region:
- connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- else:
- module.fail_json(msg="region must be specified")
-
- list_mfa_devices(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_password_policy.py b/lib/ansible/modules/cloud/amazon/iam_password_policy.py
deleted file mode 100644
index 08334992fc..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_password_policy.py
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: iam_password_policy
-short_description: Update an IAM Password Policy
-description:
- - Module updates an IAM Password Policy on a given AWS account
-version_added: "2.8"
-requirements: [ 'botocore', 'boto3' ]
-author:
- - "Aaron Smith (@slapula)"
-options:
- state:
- description:
- - Specifies the overall state of the password policy.
- required: true
- choices: ['present', 'absent']
- type: str
- min_pw_length:
- description:
- - Minimum password length.
- default: 6
- aliases: [minimum_password_length]
- type: int
- require_symbols:
- description:
- - Require symbols in password.
- default: false
- type: bool
- require_numbers:
- description:
- - Require numbers in password.
- default: false
- type: bool
- require_uppercase:
- description:
- - Require uppercase letters in password.
- default: false
- type: bool
- require_lowercase:
- description:
- - Require lowercase letters in password.
- default: false
- type: bool
- allow_pw_change:
- description:
- - Allow users to change their password.
- default: false
- type: bool
- aliases: [allow_password_change]
- pw_max_age:
- description:
- - Maximum age for a password in days. When this option is 0 then passwords
- do not expire automatically.
- default: 0
- aliases: [password_max_age]
- type: int
- pw_reuse_prevent:
- description:
- - Prevent re-use of passwords.
- default: 0
- aliases: [password_reuse_prevent, prevent_reuse]
- type: int
- pw_expire:
- description:
- - Prevents users from change an expired password.
- default: false
- type: bool
- aliases: [password_expire, expire]
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Password policy for AWS account
- iam_password_policy:
- state: present
- min_pw_length: 8
- require_symbols: false
- require_numbers: true
- require_uppercase: true
- require_lowercase: true
- allow_pw_change: true
- pw_max_age: 60
- pw_reuse_prevent: 5
- pw_expire: false
-'''
-
-RETURN = ''' # '''
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-
-class IAMConnection(object):
- def __init__(self, module):
- try:
- self.connection = module.resource('iam')
- self.module = module
- except Exception as e:
- module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
-
- def policy_to_dict(self, policy):
- policy_attributes = [
- 'allow_users_to_change_password', 'expire_passwords', 'hard_expiry',
- 'max_password_age', 'minimum_password_length', 'password_reuse_prevention',
- 'require_lowercase_characters', 'require_numbers', 'require_symbols', 'require_uppercase_characters'
- ]
- ret = {}
- for attr in policy_attributes:
- ret[attr] = getattr(policy, attr)
- return ret
-
- def update_password_policy(self, module, policy):
- min_pw_length = module.params.get('min_pw_length')
- require_symbols = module.params.get('require_symbols')
- require_numbers = module.params.get('require_numbers')
- require_uppercase = module.params.get('require_uppercase')
- require_lowercase = module.params.get('require_lowercase')
- allow_pw_change = module.params.get('allow_pw_change')
- pw_max_age = module.params.get('pw_max_age')
- pw_reuse_prevent = module.params.get('pw_reuse_prevent')
- pw_expire = module.params.get('pw_expire')
-
- update_parameters = dict(
- MinimumPasswordLength=min_pw_length,
- RequireSymbols=require_symbols,
- RequireNumbers=require_numbers,
- RequireUppercaseCharacters=require_uppercase,
- RequireLowercaseCharacters=require_lowercase,
- AllowUsersToChangePassword=allow_pw_change,
- HardExpiry=pw_expire
- )
- if pw_reuse_prevent:
- update_parameters.update(PasswordReusePrevention=pw_reuse_prevent)
- if pw_max_age:
- update_parameters.update(MaxPasswordAge=pw_max_age)
-
- try:
- original_policy = self.policy_to_dict(policy)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- original_policy = {}
-
- try:
- results = policy.update(**update_parameters)
- policy.reload()
- updated_policy = self.policy_to_dict(policy)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't update IAM Password Policy")
-
- changed = (original_policy != updated_policy)
- return (changed, updated_policy, camel_dict_to_snake_dict(results))
-
- def delete_password_policy(self, policy):
- try:
- results = policy.delete()
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- if e.response['Error']['Code'] == 'NoSuchEntity':
- self.module.exit_json(changed=False, task_status={'IAM': "Couldn't find IAM Password Policy"})
- else:
- self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy")
- return camel_dict_to_snake_dict(results)
-
-
-def main():
- module = AnsibleAWSModule(
- argument_spec={
- 'state': dict(choices=['present', 'absent'], required=True),
- 'min_pw_length': dict(type='int', aliases=['minimum_password_length'], default=6),
- 'require_symbols': dict(type='bool', default=False),
- 'require_numbers': dict(type='bool', default=False),
- 'require_uppercase': dict(type='bool', default=False),
- 'require_lowercase': dict(type='bool', default=False),
- 'allow_pw_change': dict(type='bool', aliases=['allow_password_change'], default=False),
- 'pw_max_age': dict(type='int', aliases=['password_max_age'], default=0),
- 'pw_reuse_prevent': dict(type='int', aliases=['password_reuse_prevent', 'prevent_reuse'], default=0),
- 'pw_expire': dict(type='bool', aliases=['password_expire', 'expire'], default=False),
- },
- supports_check_mode=True,
- )
-
- resource = IAMConnection(module)
- policy = resource.connection.AccountPasswordPolicy()
-
- state = module.params.get('state')
-
- if state == 'present':
- (changed, new_policy, update_result) = resource.update_password_policy(module, policy)
- module.exit_json(changed=changed, task_status={'IAM': update_result}, policy=new_policy)
-
- if state == 'absent':
- delete_result = resource.delete_password_policy(policy)
- module.exit_json(changed=True, task_status={'IAM': delete_result})
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_policy.py b/lib/ansible/modules/cloud/amazon/iam_policy.py
deleted file mode 100644
index 8db58e083a..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_policy.py
+++ /dev/null
@@ -1,346 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: iam_policy
-short_description: Manage inline IAM policies for users, groups, and roles
-description:
- - Allows uploading or removing inline IAM policies for IAM users, groups or roles.
- - To administer managed policies please see M(iam_user), M(iam_role),
- M(iam_group) and M(iam_managed_policy)
-version_added: "2.0"
-options:
- iam_type:
- description:
- - Type of IAM resource.
- required: true
- choices: [ "user", "group", "role"]
- type: str
- iam_name:
- description:
- - Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name.
- required: true
- type: str
- policy_name:
- description:
- - The name label for the policy to create or remove.
- required: true
- type: str
- policy_document:
- description:
- - The path to the properly json formatted policy file.
- - Mutually exclusive with I(policy_json).
- - This option has been deprecated and will be removed in 2.14. The existing behavior can be
- reproduced by using the I(policy_json) option and reading the file using the lookup plugin.
- type: str
- policy_json:
- description:
- - A properly json formatted policy as string.
- - Mutually exclusive with I(policy_document).
- - See U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813) on how to use it properly.
- type: json
- state:
- description:
- - Whether to create or delete the IAM policy.
- choices: [ "present", "absent"]
- default: present
- type: str
- skip_duplicates:
- description:
- - When I(skip_duplicates=true) the module looks for any policies that match the document you pass in. If there is a match it will not make
- a new policy object with the same rules.
- - The current default is C(true). However, this behavior can be confusing and as such the default will change to C(false) in 2.14. To maintain
- the existing behavior explicitly set I(skip_duplicates=true).
- type: bool
-
-author:
- - "Jonathan I. Davila (@defionscode)"
- - "Dennis Podkovyrin (@sbj-ss)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Create a policy with the name of 'Admin' to the group 'administrators'
-- name: Assign a policy called Admin to the administrators group
- iam_policy:
- iam_type: group
- iam_name: administrators
- policy_name: Admin
- state: present
- policy_document: admin_policy.json
-
-# Advanced example, create two new groups and add a READ-ONLY policy to both
-# groups.
-- name: Create Two Groups, Mario and Luigi
- iam:
- iam_type: group
- name: "{{ item }}"
- state: present
- loop:
- - Mario
- - Luigi
- register: new_groups
-
-- name: Apply READ-ONLY policy to new groups that have been recently created
- iam_policy:
- iam_type: group
- iam_name: "{{ item.created_group.group_name }}"
- policy_name: "READ-ONLY"
- policy_document: readonlypolicy.json
- state: present
- loop: "{{ new_groups.results }}"
-
-# Create a new S3 policy with prefix per user
-- name: Create S3 policy from template
- iam_policy:
- iam_type: user
- iam_name: "{{ item.user }}"
- policy_name: "s3_limited_access_{{ item.prefix }}"
- state: present
- policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} "
- loop:
- - user: s3_user
- prefix: s3_user_prefix
-
-'''
-import json
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import compare_policies
-from ansible.module_utils.six import string_types
-
-
-class PolicyError(Exception):
- pass
-
-
-class Policy:
-
- def __init__(self, client, name, policy_name, policy_document, policy_json, skip_duplicates, state, check_mode):
- self.client = client
- self.name = name
- self.policy_name = policy_name
- self.policy_document = policy_document
- self.policy_json = policy_json
- self.skip_duplicates = skip_duplicates
- self.state = state
- self.check_mode = check_mode
- self.changed = False
-
- @staticmethod
- def _iam_type():
- return ''
-
- def _list(self, name):
- return {}
-
- def list(self):
- return self._list(self.name).get('PolicyNames', [])
-
- def _get(self, name, policy_name):
- return '{}'
-
- def get(self, policy_name):
- return self._get(self.name, policy_name)['PolicyDocument']
-
- def _put(self, name, policy_name, policy_doc):
- pass
-
- def put(self, policy_doc):
- if not self.check_mode:
- self._put(self.name, self.policy_name, json.dumps(policy_doc, sort_keys=True))
- self.changed = True
-
- def _delete(self, name, policy_name):
- pass
-
- def delete(self):
- if self.policy_name not in self.list():
- self.changed = False
- return
-
- self.changed = True
- if not self.check_mode:
- self._delete(self.name, self.policy_name)
-
- def get_policy_text(self):
- try:
- if self.policy_document is not None:
- return self.get_policy_from_document()
- if self.policy_json is not None:
- return self.get_policy_from_json()
- except json.JSONDecodeError as e:
- raise PolicyError('Failed to decode the policy as valid JSON: %s' % str(e))
- return None
-
- def get_policy_from_document(self):
- try:
- with open(self.policy_document, 'r') as json_data:
- pdoc = json.load(json_data)
- json_data.close()
- except IOError as e:
- if e.errno == 2:
- raise PolicyError('policy_document {0:!r} does not exist'.format(self.policy_document))
- raise
- return pdoc
-
- def get_policy_from_json(self):
- if isinstance(self.policy_json, string_types):
- pdoc = json.loads(self.policy_json)
- else:
- pdoc = self.policy_json
- return pdoc
-
- def create(self):
- matching_policies = []
- policy_doc = self.get_policy_text()
- policy_match = False
- for pol in self.list():
- if not compare_policies(self.get(pol), policy_doc):
- matching_policies.append(pol)
- policy_match = True
-
- if (self.policy_name not in matching_policies) and not (self.skip_duplicates and policy_match):
- self.put(policy_doc)
-
- def run(self):
- if self.state == 'present':
- self.create()
- elif self.state == 'absent':
- self.delete()
- return {
- 'changed': self.changed,
- self._iam_type() + '_name': self.name,
- 'policies': self.list()
- }
-
-
-class UserPolicy(Policy):
-
- @staticmethod
- def _iam_type():
- return 'user'
-
- def _list(self, name):
- return self.client.list_user_policies(UserName=name)
-
- def _get(self, name, policy_name):
- return self.client.get_user_policy(UserName=name, PolicyName=policy_name)
-
- def _put(self, name, policy_name, policy_doc):
- return self.client.put_user_policy(UserName=name, PolicyName=policy_name, PolicyDocument=policy_doc)
-
- def _delete(self, name, policy_name):
- return self.client.delete_user_policy(UserName=name, PolicyName=policy_name)
-
-
-class RolePolicy(Policy):
-
- @staticmethod
- def _iam_type():
- return 'role'
-
- def _list(self, name):
- return self.client.list_role_policies(RoleName=name)
-
- def _get(self, name, policy_name):
- return self.client.get_role_policy(RoleName=name, PolicyName=policy_name)
-
- def _put(self, name, policy_name, policy_doc):
- return self.client.put_role_policy(RoleName=name, PolicyName=policy_name, PolicyDocument=policy_doc)
-
- def _delete(self, name, policy_name):
- return self.client.delete_role_policy(RoleName=name, PolicyName=policy_name)
-
-
-class GroupPolicy(Policy):
-
- @staticmethod
- def _iam_type():
- return 'group'
-
- def _list(self, name):
- return self.client.list_group_policies(GroupName=name)
-
- def _get(self, name, policy_name):
- return self.client.get_group_policy(GroupName=name, PolicyName=policy_name)
-
- def _put(self, name, policy_name, policy_doc):
- return self.client.put_group_policy(GroupName=name, PolicyName=policy_name, PolicyDocument=policy_doc)
-
- def _delete(self, name, policy_name):
- return self.client.delete_group_policy(GroupName=name, PolicyName=policy_name)
-
-
-def main():
- argument_spec = dict(
- iam_type=dict(required=True, choices=['user', 'group', 'role']),
- state=dict(default='present', choices=['present', 'absent']),
- iam_name=dict(required=True),
- policy_name=dict(required=True),
- policy_document=dict(default=None, required=False),
- policy_json=dict(type='json', default=None, required=False),
- skip_duplicates=dict(type='bool', default=None, required=False)
- )
- mutually_exclusive = [['policy_document', 'policy_json']]
-
- module = AnsibleAWSModule(argument_spec=argument_spec, mutually_exclusive=mutually_exclusive, supports_check_mode=True)
-
- skip_duplicates = module.params.get('skip_duplicates')
-
- if (skip_duplicates is None):
- module.deprecate('The skip_duplicates behaviour has caused confusion and'
- ' will be disabled by default in Ansible 2.14',
- version='2.14')
- skip_duplicates = True
-
- if module.params.get('policy_document'):
- module.deprecate('The policy_document option has been deprecated and'
- ' will be removed in Ansible 2.14',
- version='2.14')
-
- args = dict(
- client=module.client('iam'),
- name=module.params.get('iam_name'),
- policy_name=module.params.get('policy_name'),
- policy_document=module.params.get('policy_document'),
- policy_json=module.params.get('policy_json'),
- skip_duplicates=skip_duplicates,
- state=module.params.get('state'),
- check_mode=module.check_mode,
- )
- iam_type = module.params.get('iam_type')
-
- try:
- if iam_type == 'user':
- policy = UserPolicy(**args)
- elif iam_type == 'role':
- policy = RolePolicy(**args)
- elif iam_type == 'group':
- policy = GroupPolicy(**args)
-
- module.exit_json(**(policy.run()))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
- except PolicyError as e:
- module.fail_json(msg=str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_policy_info.py b/lib/ansible/modules/cloud/amazon/iam_policy_info.py
deleted file mode 100644
index ba2fc69400..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_policy_info.py
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: iam_policy_info
-short_description: Retrieve inline IAM policies for users, groups, and roles
-description:
- - Supports fetching of inline IAM policies for IAM users, groups and roles.
-version_added: "2.10"
-options:
- iam_type:
- description:
- - Type of IAM resource you wish to retrieve inline policies for.
- required: yes
- choices: [ "user", "group", "role"]
- type: str
- iam_name:
- description:
- - Name of IAM resource you wish to retrieve inline policies for. In other words, the user name, group name or role name.
- required: yes
- type: str
- policy_name:
- description:
- - Name of a specific IAM inline policy you with to retrieve.
- required: no
- type: str
-
-author:
- - Mark Chappell (@tremble)
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Describe all inline IAM policies on an IAM User
-- iam_policy_info:
- iam_type: user
- iam_name: example_user
-
-# Describe a specific inline policy on an IAM Role
-- iam_policy_info:
- iam_type: role
- iam_name: example_role
- policy_name: example_policy
-
-'''
-RETURN = '''
-policies:
- description: A list containing the matching IAM inline policy names and their data
- returned: success
- type: complex
- contains:
- policy_name:
- description: The Name of the inline policy
- returned: success
- type: str
- policy_document:
- description: The JSON document representing the inline IAM policy
- returned: success
- type: list
-policy_names:
- description: A list of matching names of the IAM inline policies on the queried object
- returned: success
- type: list
-all_policy_names:
- description: A list of names of all of the IAM inline policies on the queried object
- returned: success
- type: list
-'''
-
-import json
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.six import string_types
-
-
-class PolicyError(Exception):
- pass
-
-
-class Policy:
-
- def __init__(self, client, name, policy_name):
- self.client = client
- self.name = name
- self.policy_name = policy_name
- self.changed = False
-
- @staticmethod
- def _iam_type():
- return ''
-
- def _list(self, name):
- return {}
-
- def list(self):
- return self._list(self.name).get('PolicyNames', [])
-
- def _get(self, name, policy_name):
- return '{}'
-
- def get(self, policy_name):
- return self._get(self.name, policy_name)['PolicyDocument']
-
- def get_all(self):
- policies = list()
- for policy in self.list():
- policies.append({"policy_name": policy, "policy_document": self.get(policy)})
- return policies
-
- def run(self):
- policy_list = self.list()
- ret_val = {
- 'changed': False,
- self._iam_type() + '_name': self.name,
- 'all_policy_names': policy_list
- }
- if self.policy_name is None:
- ret_val.update(policies=self.get_all())
- ret_val.update(policy_names=policy_list)
- elif self.policy_name in policy_list:
- ret_val.update(policies=[{
- "policy_name": self.policy_name,
- "policy_document": self.get(self.policy_name)}])
- ret_val.update(policy_names=[self.policy_name])
- return ret_val
-
-
-class UserPolicy(Policy):
-
- @staticmethod
- def _iam_type():
- return 'user'
-
- def _list(self, name):
- return self.client.list_user_policies(UserName=name)
-
- def _get(self, name, policy_name):
- return self.client.get_user_policy(UserName=name, PolicyName=policy_name)
-
-
-class RolePolicy(Policy):
-
- @staticmethod
- def _iam_type():
- return 'role'
-
- def _list(self, name):
- return self.client.list_role_policies(RoleName=name)
-
- def _get(self, name, policy_name):
- return self.client.get_role_policy(RoleName=name, PolicyName=policy_name)
-
-
-class GroupPolicy(Policy):
-
- @staticmethod
- def _iam_type():
- return 'group'
-
- def _list(self, name):
- return self.client.list_group_policies(GroupName=name)
-
- def _get(self, name, policy_name):
- return self.client.get_group_policy(GroupName=name, PolicyName=policy_name)
-
-
-def main():
- argument_spec = dict(
- iam_type=dict(required=True, choices=['user', 'group', 'role']),
- iam_name=dict(required=True),
- policy_name=dict(default=None, required=False),
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
-
- args = dict(
- client=module.client('iam'),
- name=module.params.get('iam_name'),
- policy_name=module.params.get('policy_name'),
- )
- iam_type = module.params.get('iam_type')
-
- try:
- if iam_type == 'user':
- policy = UserPolicy(**args)
- elif iam_type == 'role':
- policy = RolePolicy(**args)
- elif iam_type == 'group':
- policy = GroupPolicy(**args)
-
- module.exit_json(**(policy.run()))
- except (BotoCoreError, ClientError) as e:
- if e.response['Error']['Code'] == 'NoSuchEntity':
- module.exit_json(changed=False, msg=e.response['Error']['Message'])
- module.fail_json_aws(e)
- except PolicyError as e:
- module.fail_json(msg=str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_role.py b/lib/ansible/modules/cloud/amazon/iam_role.py
deleted file mode 100644
index 71a5b0377e..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_role.py
+++ /dev/null
@@ -1,673 +0,0 @@
-#!/usr/bin/python
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: iam_role
-short_description: Manage AWS IAM roles
-description:
- - Manage AWS IAM roles.
-version_added: "2.3"
-author: "Rob White (@wimnat)"
-options:
- path:
- description:
- - The path to the role. For more information about paths, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html).
- default: "/"
- type: str
- name:
- description:
- - The name of the role to create.
- required: true
- type: str
- description:
- description:
- - Provides a description of the role.
- version_added: "2.5"
- type: str
- boundary:
- description:
- - The ARN of an IAM managed policy to use to restrict the permissions this role can pass on to IAM roles/users that it creates.
- - Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false).
- - This is intended for roles/users that have permissions to create new IAM objects.
- - For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html).
- - Requires botocore 1.10.57 or above.
- aliases: [boundary_policy_arn]
- version_added: "2.7"
- type: str
- assume_role_policy_document:
- description:
- - The trust relationship policy document that grants an entity permission to assume the role.
- - This parameter is required when I(state=present).
- type: json
- managed_policies:
- description:
- - A list of managed policy ARNs or, since Ansible 2.4, a list of either managed policy ARNs or friendly names.
- - To remove all policies set I(purge_polices=true) and I(managed_policies=[None]).
- - To embed an inline policy, use M(iam_policy).
- aliases: ['managed_policy']
- type: list
- max_session_duration:
- description:
- - The maximum duration (in seconds) of a session when assuming the role.
- - Valid values are between 1 and 12 hours (3600 and 43200 seconds).
- version_added: "2.10"
- type: int
- purge_policies:
- description:
- - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
- - By default I(purge_policies=true). In Ansible 2.14 this will be changed to I(purge_policies=false).
- version_added: "2.5"
- type: bool
- aliases: ['purge_policy', 'purge_managed_policies']
- state:
- description:
- - Create or remove the IAM role.
- default: present
- choices: [ present, absent ]
- type: str
- create_instance_profile:
- description:
- - Creates an IAM instance profile along with the role.
- default: true
- version_added: "2.5"
- type: bool
- delete_instance_profile:
- description:
- - When I(delete_instance_profile=true) and I(state=absent) deleting a role will also delete the instance
- profile created with the same I(name) as the role.
- - Only applies when I(state=absent).
- default: false
- version_added: "2.10"
- type: bool
- tags:
- description:
- - Tag dict to apply to the queue.
- - Requires botocore 1.12.46 or above.
- version_added: "2.10"
- type: dict
- purge_tags:
- description:
- - Remove tags not listed in I(tags) when tags is specified.
- default: true
- version_added: "2.10"
- type: bool
-requirements: [ botocore, boto3 ]
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Create a role with description and tags
- iam_role:
- name: mynewrole
- assume_role_policy_document: "{{ lookup('file','policy.json') }}"
- description: This is My New Role
- tags:
- env: dev
-
-- name: "Create a role and attach a managed policy called 'PowerUserAccess'"
- iam_role:
- name: mynewrole
- assume_role_policy_document: "{{ lookup('file','policy.json') }}"
- managed_policies:
- - arn:aws:iam::aws:policy/PowerUserAccess
-
-- name: Keep the role created above but remove all managed policies
- iam_role:
- name: mynewrole
- assume_role_policy_document: "{{ lookup('file','policy.json') }}"
- managed_policies: []
-
-- name: Delete the role
- iam_role:
- name: mynewrole
- assume_role_policy_document: "{{ lookup('file', 'policy.json') }}"
- state: absent
-
-'''
-RETURN = '''
-iam_role:
- description: dictionary containing the IAM Role data
- returned: success
- type: complex
- contains:
- path:
- description: the path to the role
- type: str
- returned: always
- sample: /
- role_name:
- description: the friendly name that identifies the role
- type: str
- returned: always
- sample: myrole
- role_id:
- description: the stable and unique string identifying the role
- type: str
- returned: always
- sample: ABCDEFF4EZ4ABCDEFV4ZC
- arn:
- description: the Amazon Resource Name (ARN) specifying the role
- type: str
- returned: always
- sample: "arn:aws:iam::1234567890:role/mynewrole"
- create_date:
- description: the date and time, in ISO 8601 date-time format, when the role was created
- type: str
- returned: always
- sample: "2016-08-14T04:36:28+00:00"
- assume_role_policy_document:
- description: the policy that grants an entity permission to assume the role
- type: str
- returned: always
- sample: {
- 'statement': [
- {
- 'action': 'sts:AssumeRole',
- 'effect': 'Allow',
- 'principal': {
- 'service': 'ec2.amazonaws.com'
- },
- 'sid': ''
- }
- ],
- 'version': '2012-10-17'
- }
- attached_policies:
- description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role
- type: list
- returned: always
- sample: [
- {
- 'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess',
- 'policy_name': 'PowerUserAccess'
- }
- ]
- tags:
- description: role tags
- type: dict
- returned: always
- sample: '{"Env": "Prod"}'
-'''
-
-import json
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, compare_policies
-from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, compare_aws_tags
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def compare_assume_role_policy_doc(current_policy_doc, new_policy_doc):
- if not compare_policies(current_policy_doc, json.loads(new_policy_doc)):
- return True
- else:
- return False
-
-
-@AWSRetry.jittered_backoff()
-def _list_policies(connection):
- paginator = connection.get_paginator('list_policies')
- return paginator.paginate().build_full_result()['Policies']
-
-
-def convert_friendly_names_to_arns(connection, module, policy_names):
- if not any([not policy.startswith('arn:') for policy in policy_names]):
- return policy_names
- allpolicies = {}
- policies = _list_policies(connection)
-
- for policy in policies:
- allpolicies[policy['PolicyName']] = policy['Arn']
- allpolicies[policy['Arn']] = policy['Arn']
- try:
- return [allpolicies[policy] for policy in policy_names]
- except KeyError as e:
- module.fail_json_aws(e, msg="Couldn't find policy")
-
-
-def attach_policies(connection, module, policies_to_attach, params):
- changed = False
- for policy_arn in policies_to_attach:
- try:
- if not module.check_mode:
- connection.attach_role_policy(RoleName=params['RoleName'], PolicyArn=policy_arn, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, params['RoleName']))
- changed = True
- return changed
-
-
-def remove_policies(connection, module, policies_to_remove, params):
- changed = False
- for policy in policies_to_remove:
- try:
- if not module.check_mode:
- connection.detach_role_policy(RoleName=params['RoleName'], PolicyArn=policy, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, params['RoleName']))
- changed = True
- return changed
-
-
-def generate_create_params(module):
- params = dict()
- params['Path'] = module.params.get('path')
- params['RoleName'] = module.params.get('name')
- params['AssumeRolePolicyDocument'] = module.params.get('assume_role_policy_document')
- if module.params.get('description') is not None:
- params['Description'] = module.params.get('description')
- if module.params.get('max_session_duration') is not None:
- params['MaxSessionDuration'] = module.params.get('max_session_duration')
- if module.params.get('boundary') is not None:
- params['PermissionsBoundary'] = module.params.get('boundary')
- if module.params.get('tags') is not None:
- params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags'))
-
- return params
-
-
-def create_basic_role(connection, module, params):
- """
- Perform the Role creation.
- Assumes tests for the role existing have already been performed.
- """
-
- try:
- if not module.check_mode:
- role = connection.create_role(aws_retry=True, **params)
- # 'Description' is documented as key of the role returned by create_role
- # but appears to be an AWS bug (the value is not returned using the AWS CLI either).
- # Get the role after creating it.
- role = get_role_with_backoff(connection, module, params['RoleName'])
- else:
- role = {'MadeInCheckMode': True}
- role['AssumeRolePolicyDocument'] = json.loads(params['AssumeRolePolicyDocument'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to create role")
-
- return role
-
-
-def update_role_assumed_policy(connection, module, params, role):
- # Check Assumed Policy document
- if compare_assume_role_policy_doc(role['AssumeRolePolicyDocument'], params['AssumeRolePolicyDocument']):
- return False
-
- if module.check_mode:
- return True
-
- try:
- connection.update_assume_role_policy(
- RoleName=params['RoleName'],
- PolicyDocument=json.dumps(json.loads(params['AssumeRolePolicyDocument'])),
- aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(params['RoleName']))
- return True
-
-
-def update_role_description(connection, module, params, role):
- # Check Description update
- if params.get('Description') is None:
- return False
- if role.get('Description') == params['Description']:
- return False
-
- if module.check_mode:
- return True
-
- try:
- connection.update_role_description(RoleName=params['RoleName'], Description=params['Description'], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to update description for role {0}".format(params['RoleName']))
- return True
-
-
-def update_role_max_session_duration(connection, module, params, role):
- # Check MaxSessionDuration update
- if params.get('MaxSessionDuration') is None:
- return False
- if role.get('MaxSessionDuration') == params['MaxSessionDuration']:
- return False
-
- if module.check_mode:
- return True
-
- try:
- connection.update_role(RoleName=params['RoleName'], MaxSessionDuration=params['MaxSessionDuration'], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(params['RoleName']))
- return True
-
-
-def update_role_permissions_boundary(connection, module, params, role):
- # Check PermissionsBoundary
- if params.get('PermissionsBoundary') is None:
- return False
- if params.get('PermissionsBoundary') == role.get('PermissionsBoundary', {}).get('PermissionsBoundaryArn', ''):
- return False
-
- if module.check_mode:
- return True
-
- if params.get('PermissionsBoundary') == '':
- try:
- connection.delete_role_permissions_boundary(RoleName=params['RoleName'], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(params['RoleName']))
- else:
- try:
- connection.put_role_permissions_boundary(RoleName=params['RoleName'], PermissionsBoundary=params['PermissionsBoundary'], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(params['RoleName']))
- return True
-
-
-def update_managed_policies(connection, module, params, role, managed_policies, purge_policies):
- # Check Managed Policies
- if managed_policies is None:
- return False
-
- # If we're manipulating a fake role
- if role.get('MadeInCheckMode', False):
- role['AttachedPolicies'] = list(map(lambda x: {'PolicyArn': x, 'PolicyName': x.split(':')[5]}, managed_policies))
- return True
-
- # Get list of current attached managed policies
- current_attached_policies = get_attached_policy_list(connection, module, params['RoleName'])
- current_attached_policies_arn_list = [policy['PolicyArn'] for policy in current_attached_policies]
-
- if len(managed_policies) == 1 and managed_policies[0] is None:
- managed_policies = []
-
- policies_to_remove = set(current_attached_policies_arn_list) - set(managed_policies)
- policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list)
-
- changed = False
-
- if purge_policies:
- changed |= remove_policies(connection, module, policies_to_remove, params)
-
- changed |= attach_policies(connection, module, policies_to_attach, params)
-
- return changed
-
-
-def create_or_update_role(connection, module):
-
- params = generate_create_params(module)
- role_name = params['RoleName']
- create_instance_profile = module.params.get('create_instance_profile')
- purge_policies = module.params.get('purge_policies')
- if purge_policies is None:
- purge_policies = True
- managed_policies = module.params.get('managed_policies')
- if managed_policies:
- # Attempt to list the policies early so we don't leave things behind if we can't find them.
- managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
-
- changed = False
-
- # Get role
- role = get_role(connection, module, role_name)
-
- # If role is None, create it
- if role is None:
- role = create_basic_role(connection, module, params)
- changed = True
- else:
- changed |= update_role_tags(connection, module, params, role)
- changed |= update_role_assumed_policy(connection, module, params, role)
- changed |= update_role_description(connection, module, params, role)
- changed |= update_role_max_session_duration(connection, module, params, role)
- changed |= update_role_permissions_boundary(connection, module, params, role)
-
- if create_instance_profile:
- changed |= create_instance_profiles(connection, module, params, role)
-
- changed |= update_managed_policies(connection, module, params, role, managed_policies, purge_policies)
-
- # Get the role again
- if not role.get('MadeInCheckMode', False):
- role = get_role(connection, module, params['RoleName'])
- role['AttachedPolicies'] = get_attached_policy_list(connection, module, params['RoleName'])
- role['tags'] = get_role_tags(connection, module)
-
- module.exit_json(
- changed=changed, iam_role=camel_dict_to_snake_dict(role, ignore_list=['tags']),
- **camel_dict_to_snake_dict(role, ignore_list=['tags']))
-
-
-def create_instance_profiles(connection, module, params, role):
-
- if role.get('MadeInCheckMode', False):
- return False
-
- # Fetch existing Profiles
- try:
- instance_profiles = connection.list_instance_profiles_for_role(RoleName=params['RoleName'], aws_retry=True)['InstanceProfiles']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(params['RoleName']))
-
- # Profile already exists
- if any(p['InstanceProfileName'] == params['RoleName'] for p in instance_profiles):
- return False
-
- if module.check_mode:
- return True
-
- # Make sure an instance profile is created
- try:
- connection.create_instance_profile(InstanceProfileName=params['RoleName'], Path=params['Path'], aws_retry=True)
- except ClientError as e:
- # If the profile already exists, no problem, move on.
- # Implies someone's changing things at the same time...
- if e.response['Error']['Code'] == 'EntityAlreadyExists':
- return False
- else:
- module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName']))
- except BotoCoreError as e:
- module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(params['RoleName']))
-
- # And attach the role to the profile
- try:
- connection.add_role_to_instance_profile(InstanceProfileName=params['RoleName'], RoleName=params['RoleName'], aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(params['RoleName']))
-
- return True
-
-
-def remove_instance_profiles(connection, module, role_params, role):
- role_name = module.params.get('name')
- delete_profiles = module.params.get("delete_instance_profile")
-
- try:
- instance_profiles = connection.list_instance_profiles_for_role(aws_retry=True, **role_params)['InstanceProfiles']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name))
-
- # Remove the role from the instance profile(s)
- for profile in instance_profiles:
- profile_name = profile['InstanceProfileName']
- try:
- if not module.check_mode:
- connection.remove_role_from_instance_profile(aws_retry=True, InstanceProfileName=profile_name, **role_params)
- if profile_name == role_name:
- if delete_profiles:
- try:
- connection.delete_instance_profile(InstanceProfileName=profile_name, aws_retry=True)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name))
-
-
-def destroy_role(connection, module):
-
- role_name = module.params.get('name')
- role = get_role(connection, module, role_name)
- role_params = dict()
- role_params['RoleName'] = role_name
- boundary_params = dict(role_params)
- boundary_params['PermissionsBoundary'] = ''
-
- if role is None:
- module.exit_json(changed=False)
-
- # Before we try to delete the role we need to remove any
- # - attached instance profiles
- # - attached managed policies
- # - permissions boundary
- remove_instance_profiles(connection, module, role_params, role)
- update_managed_policies(connection, module, role_params, role, [], True)
- update_role_permissions_boundary(connection, module, boundary_params, role)
-
- try:
- if not module.check_mode:
- connection.delete_role(aws_retry=True, **role_params)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to delete role")
-
- module.exit_json(changed=True)
-
-
-def get_role_with_backoff(connection, module, name):
- try:
- return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(connection.get_role)(RoleName=name)['Role']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
-
-
-def get_role(connection, module, name):
- try:
- return connection.get_role(RoleName=name, aws_retry=True)['Role']
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchEntity':
- return None
- else:
- module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
- except BotoCoreError as e:
- module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
-
-
-def get_attached_policy_list(connection, module, name):
- try:
- return connection.list_attached_role_policies(RoleName=name, aws_retry=True)['AttachedPolicies']
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name))
-
-
-def get_role_tags(connection, module):
- role_name = module.params.get('name')
- if not hasattr(connection, 'list_role_tags'):
- return {}
- try:
- return boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name))
-
-
-def update_role_tags(connection, module, params, role):
- new_tags = params.get('Tags')
- if new_tags is None:
- return False
- new_tags = boto3_tag_list_to_ansible_dict(new_tags)
-
- role_name = module.params.get('name')
- purge_tags = module.params.get('purge_tags')
-
- try:
- existing_tags = boto3_tag_list_to_ansible_dict(connection.list_role_tags(RoleName=role_name, aws_retry=True)['Tags'])
- except (ClientError, KeyError):
- existing_tags = {}
-
- tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
-
- if not module.check_mode:
- try:
- if tags_to_remove:
- connection.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True)
- if tags_to_add:
- connection.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True)
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg='Unable to set tags for role %s' % role_name)
-
- changed = bool(tags_to_add) or bool(tags_to_remove)
- return changed
-
-
-def main():
-
- argument_spec = dict(
- name=dict(type='str', required=True),
- path=dict(type='str', default="/"),
- assume_role_policy_document=dict(type='json'),
- managed_policies=dict(type='list', aliases=['managed_policy']),
- max_session_duration=dict(type='int'),
- state=dict(type='str', choices=['present', 'absent'], default='present'),
- description=dict(type='str'),
- boundary=dict(type='str', aliases=['boundary_policy_arn']),
- create_instance_profile=dict(type='bool', default=True),
- delete_instance_profile=dict(type='bool', default=False),
- purge_policies=dict(type='bool', aliases=['purge_policy', 'purge_managed_policies']),
- tags=dict(type='dict'),
- purge_tags=dict(type='bool', default=True),
- )
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[('state', 'present', ['assume_role_policy_document'])],
- supports_check_mode=True)
-
- if module.params.get('purge_policies') is None:
- module.deprecate('In Ansible 2.14 the default value of purge_policies will change from true to false.'
- ' To maintain the existing behaviour explicity set purge_policies=true', version='2.14')
-
- if module.params.get('boundary'):
- if module.params.get('create_instance_profile'):
- module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.")
- if not module.params.get('boundary').startswith('arn:aws:iam'):
- module.fail_json(msg="Boundary policy must be an ARN")
- if module.params.get('tags') is not None and not module.botocore_at_least('1.12.46'):
- module.fail_json(msg="When managing tags botocore must be at least v1.12.46. "
- "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions()))
- if module.params.get('boundary') is not None and not module.botocore_at_least('1.10.57'):
- module.fail_json(msg="When using a boundary policy, botocore must be at least v1.10.57. "
- "Current versions: boto3-{boto3_version} botocore-{botocore_version}".format(**module._gather_versions()))
- if module.params.get('max_session_duration'):
- max_session_duration = module.params.get('max_session_duration')
- if max_session_duration < 3600 or max_session_duration > 43200:
- module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)")
- if module.params.get('path'):
- path = module.params.get('path')
- if not path.endswith('/') or not path.startswith('/'):
- module.fail_json(msg="path must begin and end with /")
-
- connection = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
-
- state = module.params.get("state")
-
- if state == 'present':
- create_or_update_role(connection, module)
- else:
- destroy_role(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_role_info.py b/lib/ansible/modules/cloud/amazon/iam_role_info.py
deleted file mode 100644
index 802870d756..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_role_info.py
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: iam_role_info
-short_description: Gather information on IAM roles
-description:
- - Gathers information about IAM roles.
- - This module was called C(iam_role_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.5"
-requirements: [ boto3 ]
-author:
- - "Will Thames (@willthames)"
-options:
- name:
- description:
- - Name of a role to search for.
- - Mutually exclusive with I(path_prefix).
- aliases:
- - role_name
- type: str
- path_prefix:
- description:
- - Prefix of role to restrict IAM role search for.
- - Mutually exclusive with I(name).
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# find all existing IAM roles
-- iam_role_info:
- register: result
-
-# describe a single role
-- iam_role_info:
- name: MyIAMRole
-
-# describe all roles matching a path prefix
-- iam_role_info:
- path_prefix: /application/path
-'''
-
-RETURN = '''
-iam_roles:
- description: List of IAM roles
- returned: always
- type: complex
- contains:
- arn:
- description: Amazon Resource Name for IAM role.
- returned: always
- type: str
- sample: arn:aws:iam::123456789012:role/AnsibleTestRole
- assume_role_policy_document:
- description: Policy Document describing what can assume the role.
- returned: always
- type: str
- create_date:
- description: Date IAM role was created.
- returned: always
- type: str
- sample: '2017-10-23T00:05:08+00:00'
- inline_policies:
- description: List of names of inline policies.
- returned: always
- type: list
- sample: []
- managed_policies:
- description: List of attached managed policies.
- returned: always
- type: complex
- contains:
- policy_arn:
- description: Amazon Resource Name for the policy.
- returned: always
- type: str
- sample: arn:aws:iam::123456789012:policy/AnsibleTestEC2Policy
- policy_name:
- description: Name of managed policy.
- returned: always
- type: str
- sample: AnsibleTestEC2Policy
- instance_profiles:
- description: List of attached instance profiles.
- returned: always
- type: complex
- contains:
- arn:
- description: Amazon Resource Name for the instance profile.
- returned: always
- type: str
- sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestEC2Policy
- create_date:
- description: Date instance profile was created.
- returned: always
- type: str
- sample: '2017-10-23T00:05:08+00:00'
- instance_profile_id:
- description: Amazon Identifier for the instance profile.
- returned: always
- type: str
- sample: AROAII7ABCD123456EFGH
- instance_profile_name:
- description: Name of instance profile.
- returned: always
- type: str
- sample: AnsibleTestEC2Policy
- path:
- description: Path of instance profile.
- returned: always
- type: str
- sample: /
- roles:
- description: List of roles associated with this instance profile.
- returned: always
- type: list
- sample: []
- path:
- description: Path of role.
- returned: always
- type: str
- sample: /
- role_id:
- description: Amazon Identifier for the role.
- returned: always
- type: str
- sample: AROAII7ABCD123456EFGH
- role_name:
- description: Name of the role.
- returned: always
- type: str
- sample: AnsibleTestRole
- tags:
- description: Role tags.
- type: dict
- returned: always
- sample: '{"Env": "Prod"}'
-'''
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, AWSRetry
-
-
-@AWSRetry.exponential_backoff()
-def list_iam_roles_with_backoff(client, **kwargs):
- paginator = client.get_paginator('list_roles')
- return paginator.paginate(**kwargs).build_full_result()
-
-
-@AWSRetry.exponential_backoff()
-def list_iam_role_policies_with_backoff(client, role_name):
- paginator = client.get_paginator('list_role_policies')
- return paginator.paginate(RoleName=role_name).build_full_result()['PolicyNames']
-
-
-@AWSRetry.exponential_backoff()
-def list_iam_attached_role_policies_with_backoff(client, role_name):
- paginator = client.get_paginator('list_attached_role_policies')
- return paginator.paginate(RoleName=role_name).build_full_result()['AttachedPolicies']
-
-
-@AWSRetry.exponential_backoff()
-def list_iam_instance_profiles_for_role_with_backoff(client, role_name):
- paginator = client.get_paginator('list_instance_profiles_for_role')
- return paginator.paginate(RoleName=role_name).build_full_result()['InstanceProfiles']
-
-
-def describe_iam_role(module, client, role):
- name = role['RoleName']
- try:
- role['InlinePolicies'] = list_iam_role_policies_with_backoff(client, name)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get inline policies for role %s" % name)
- try:
- role['ManagedPolicies'] = list_iam_attached_role_policies_with_backoff(client, name)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get managed policies for role %s" % name)
- try:
- role['InstanceProfiles'] = list_iam_instance_profiles_for_role_with_backoff(client, name)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get instance profiles for role %s" % name)
- try:
- role['tags'] = boto3_tag_list_to_ansible_dict(role['Tags'])
- del role['Tags']
- except KeyError:
- role['tags'] = {}
- return role
-
-
-def describe_iam_roles(module, client):
- name = module.params['name']
- path_prefix = module.params['path_prefix']
- if name:
- try:
- roles = [client.get_role(RoleName=name)['Role']]
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchEntity':
- return []
- else:
- module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name)
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name)
- else:
- params = dict()
- if path_prefix:
- if not path_prefix.startswith('/'):
- path_prefix = '/' + path_prefix
- if not path_prefix.endswith('/'):
- path_prefix = path_prefix + '/'
- params['PathPrefix'] = path_prefix
- try:
- roles = list_iam_roles_with_backoff(client, **params)['Roles']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't list IAM roles")
- return [camel_dict_to_snake_dict(describe_iam_role(module, client, role), ignore_list=['tags']) for role in roles]
-
-
-def main():
- """
- Module action handler
- """
- argument_spec = dict(
- name=dict(aliases=['role_name']),
- path_prefix=dict(),
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[['name', 'path_prefix']])
- if module._name == 'iam_role_facts':
- module.deprecate("The 'iam_role_facts' module has been renamed to 'iam_role_info'", version='2.13')
-
- client = module.client('iam')
-
- module.exit_json(changed=False, iam_roles=describe_iam_roles(module, client))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_saml_federation.py b/lib/ansible/modules/cloud/amazon/iam_saml_federation.py
deleted file mode 100644
index ee3c720afb..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_saml_federation.py
+++ /dev/null
@@ -1,249 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: iam_saml_federation
-version_added: "2.10"
-short_description: Maintain IAM SAML federation configuration.
-requirements:
- - boto3
-description:
- - Provides a mechanism to manage AWS IAM SAML Identity Federation providers (create/update/delete metadata).
-options:
- name:
- description:
- - The name of the provider to create.
- required: true
- type: str
- saml_metadata_document:
- description:
- - The XML document generated by an identity provider (IdP) that supports SAML 2.0.
- type: str
- state:
- description:
- - Whether to create or delete identity provider. If 'present' is specified it will attempt to update the identity provider matching the name field.
- default: present
- choices: [ "present", "absent" ]
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-author:
- - Tony (@axc450)
- - Aidan Rowe (@aidan-)
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-# It is assumed that their matching environment variables are set.
-# Creates a new iam saml identity provider if not present
-- name: saml provider
- iam_saml_federation:
- name: example1
- # the > below opens an indented block, so no escaping/quoting is needed when in the indentation level under this key
- saml_metadata_document: >
- <?xml version="1.0"?>...
- <md:EntityDescriptor
-# Creates a new iam saml identity provider if not present
-- name: saml provider
- iam_saml_federation:
- name: example2
- saml_metadata_document: "{{ item }}"
- with_file: /path/to/idp/metdata.xml
-# Removes iam saml identity provider
-- name: remove saml provider
- iam_saml_federation:
- name: example3
- state: absent
-'''
-
-RETURN = '''
-saml_provider:
- description: Details of the SAML Identity Provider that was created/modified.
- type: complex
- returned: present
- contains:
- arn:
- description: The ARN of the identity provider.
- type: str
- returned: present
- sample: "arn:aws:iam::123456789012:saml-provider/my_saml_provider"
- metadata_document:
- description: The XML metadata document that includes information about an identity provider.
- type: str
- returned: present
- create_date:
- description: The date and time when the SAML provider was created in ISO 8601 date-time format.
- type: str
- returned: present
- sample: "2017-02-08T04:36:28+00:00"
- expire_date:
- description: The expiration date and time for the SAML provider in ISO 8601 date-time format.
- type: str
- returned: present
- sample: "2017-02-08T04:36:28+00:00"
-'''
-
-try:
- import botocore.exceptions
-except ImportError:
- pass
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import AWSRetry
-
-
-class SAMLProviderManager:
- """Handles SAML Identity Provider configuration"""
-
- def __init__(self, module):
- self.module = module
-
- try:
- self.conn = module.client('iam')
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Unknown boto error")
-
- # use retry decorator for boto3 calls
- @AWSRetry.backoff(tries=3, delay=5)
- def _list_saml_providers(self):
- return self.conn.list_saml_providers()
-
- @AWSRetry.backoff(tries=3, delay=5)
- def _get_saml_provider(self, arn):
- return self.conn.get_saml_provider(SAMLProviderArn=arn)
-
- @AWSRetry.backoff(tries=3, delay=5)
- def _update_saml_provider(self, arn, metadata):
- return self.conn.update_saml_provider(SAMLProviderArn=arn, SAMLMetadataDocument=metadata)
-
- @AWSRetry.backoff(tries=3, delay=5)
- def _create_saml_provider(self, metadata, name):
- return self.conn.create_saml_provider(SAMLMetadataDocument=metadata, Name=name)
-
- @AWSRetry.backoff(tries=3, delay=5)
- def _delete_saml_provider(self, arn):
- return self.conn.delete_saml_provider(SAMLProviderArn=arn)
-
- def _get_provider_arn(self, name):
- providers = self._list_saml_providers()
- for p in providers['SAMLProviderList']:
- provider_name = p['Arn'].split('/', 1)[1]
- if name == provider_name:
- return p['Arn']
-
- return None
-
- def create_or_update_saml_provider(self, name, metadata):
- if not metadata:
- self.module.fail_json(msg="saml_metadata_document must be defined for present state")
-
- res = {'changed': False}
- try:
- arn = self._get_provider_arn(name)
- except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name))
-
- if arn: # see if metadata needs updating
- try:
- resp = self._get_saml_provider(arn)
- except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Could not retrieve the identity provider '{0}'".format(name))
-
- if metadata.strip() != resp['SAMLMetadataDocument'].strip():
- # provider needs updating
- res['changed'] = True
- if not self.module.check_mode:
- try:
- resp = self._update_saml_provider(arn, metadata)
- res['saml_provider'] = self._build_res(resp['SAMLProviderArn'])
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Could not update the identity provider '{0}'".format(name))
-
- else: # create
- res['changed'] = True
- if not self.module.check_mode:
- try:
- resp = self._create_saml_provider(metadata, name)
- res['saml_provider'] = self._build_res(resp['SAMLProviderArn'])
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Could not create the identity provider '{0}'".format(name))
-
- self.module.exit_json(**res)
-
- def delete_saml_provider(self, name):
- res = {'changed': False}
- try:
- arn = self._get_provider_arn(name)
- except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
- self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name))
-
- if arn: # delete
- res['changed'] = True
- if not self.module.check_mode:
- try:
- self._delete_saml_provider(arn)
- except botocore.exceptions.ClientError as e:
- self.module.fail_json_aws(e, msg="Could not delete the identity provider '{0}'".format(name))
-
- self.module.exit_json(**res)
-
- def _build_res(self, arn):
- saml_provider = self._get_saml_provider(arn)
- return {
- "arn": arn,
- "metadata_document": saml_provider["SAMLMetadataDocument"],
- "create_date": saml_provider["CreateDate"].isoformat(),
- "expire_date": saml_provider["ValidUntil"].isoformat()
- }
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- saml_metadata_document=dict(default=None, required=False),
- state=dict(default='present', required=False, choices=['present', 'absent']),
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- required_if=[('state', 'present', ['saml_metadata_document'])]
- )
-
- name = module.params['name']
- state = module.params.get('state')
- saml_metadata_document = module.params.get('saml_metadata_document')
-
- sp_man = SAMLProviderManager(module)
-
- if state == 'present':
- sp_man.create_or_update_saml_provider(name, saml_metadata_document)
- elif state == 'absent':
- sp_man.delete_saml_provider(name)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_server_certificate_info.py b/lib/ansible/modules/cloud/amazon/iam_server_certificate_info.py
deleted file mode 100644
index a7970371c5..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_server_certificate_info.py
+++ /dev/null
@@ -1,172 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: iam_server_certificate_info
-short_description: Retrieve the information of a server certificate
-description:
- - Retrieve the attributes of a server certificate.
- - This module was called C(iam_server_certificate_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.2"
-author: "Allen Sanabria (@linuxdynasty)"
-requirements: [boto3, botocore]
-options:
- name:
- description:
- - The name of the server certificate you are retrieving attributes for.
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Retrieve server certificate
-- iam_server_certificate_info:
- name: production-cert
- register: server_cert
-
-# Fail if the server certificate name was not found
-- iam_server_certificate_info:
- name: production-cert
- register: server_cert
- failed_when: "{{ server_cert.results | length == 0 }}"
-'''
-
-RETURN = '''
-server_certificate_id:
- description: The 21 character certificate id
- returned: success
- type: str
- sample: "ADWAJXWTZAXIPIMQHMJPO"
-certificate_body:
- description: The asn1der encoded PEM string
- returned: success
- type: str
- sample: "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----"
-server_certificate_name:
- description: The name of the server certificate
- returned: success
- type: str
- sample: "server-cert-name"
-arn:
- description: The Amazon resource name of the server certificate
- returned: success
- type: str
- sample: "arn:aws:iam::911277865346:server-certificate/server-cert-name"
-path:
- description: The path of the server certificate
- returned: success
- type: str
- sample: "/"
-expiration:
- description: The date and time this server certificate will expire, in ISO 8601 format.
- returned: success
- type: str
- sample: "2017-06-15T12:00:00+00:00"
-upload_date:
- description: The date and time this server certificate was uploaded, in ISO 8601 format.
- returned: success
- type: str
- sample: "2015-04-25T00:36:40+00:00"
-'''
-
-
-try:
- import boto3
- import botocore.exceptions
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
-
-
-def get_server_certs(iam, name=None):
- """Retrieve the attributes of a server certificate if it exists or all certs.
- Args:
- iam (botocore.client.IAM): The boto3 iam instance.
-
- Kwargs:
- name (str): The name of the server certificate.
-
- Basic Usage:
- >>> import boto3
- >>> iam = boto3.client('iam')
- >>> name = "server-cert-name"
- >>> results = get_server_certs(iam, name)
- {
- "upload_date": "2015-04-25T00:36:40+00:00",
- "server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO",
- "certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----",
- "server_certificate_name": "server-cert-name",
- "expiration": "2017-06-15T12:00:00+00:00",
- "path": "/",
- "arn": "arn:aws:iam::911277865346:server-certificate/server-cert-name"
- }
- """
- results = dict()
- try:
- if name:
- server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']]
- else:
- server_certs = iam.list_server_certificates()['ServerCertificateMetadataList']
-
- for server_cert in server_certs:
- if not name:
- server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate']
- cert_md = server_cert['ServerCertificateMetadata']
- results[cert_md['ServerCertificateName']] = {
- 'certificate_body': server_cert['CertificateBody'],
- 'server_certificate_id': cert_md['ServerCertificateId'],
- 'server_certificate_name': cert_md['ServerCertificateName'],
- 'arn': cert_md['Arn'],
- 'path': cert_md['Path'],
- 'expiration': cert_md['Expiration'].isoformat(),
- 'upload_date': cert_md['UploadDate'].isoformat(),
- }
-
- except botocore.exceptions.ClientError:
- pass
-
- return results
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- name=dict(type='str'),
- ))
-
- module = AnsibleModule(argument_spec=argument_spec,)
- if module._name == 'iam_server_certificate_facts':
- module.deprecate("The 'iam_server_certificate_facts' module has been renamed to 'iam_server_certificate_info'", version='2.13')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- try:
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- iam = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
-
- cert_name = module.params.get('name')
- results = get_server_certs(iam, cert_name)
- module.exit_json(results=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_user.py b/lib/ansible/modules/cloud/amazon/iam_user.py
deleted file mode 100644
index a85cd94c98..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_user.py
+++ /dev/null
@@ -1,370 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: iam_user
-short_description: Manage AWS IAM users
-description:
- - Manage AWS IAM users.
-version_added: "2.5"
-author: Josh Souza (@joshsouza)
-options:
- name:
- description:
- - The name of the user to create.
- required: true
- type: str
- managed_policies:
- description:
- - A list of managed policy ARNs or friendly names to attach to the user.
- - To embed an inline policy, use M(iam_policy).
- required: false
- type: list
- aliases: ['managed_policy']
- state:
- description:
- - Create or remove the IAM user.
- required: true
- choices: [ 'present', 'absent' ]
- type: str
- purge_policies:
- description:
- - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched.
- required: false
- default: false
- type: bool
- aliases: ['purge_policy', 'purge_managed_policies']
-requirements: [ botocore, boto3 ]
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-# Note: This module does not allow management of groups that users belong to.
-# Groups should manage their membership directly using `iam_group`,
-# as users belong to them.
-
-# Create a user
-- iam_user:
- name: testuser1
- state: present
-
-# Create a user and attach a managed policy using its ARN
-- iam_user:
- name: testuser1
- managed_policies:
- - arn:aws:iam::aws:policy/AmazonSNSFullAccess
- state: present
-
-# Remove all managed policies from an existing user with an empty list
-- iam_user:
- name: testuser1
- state: present
- purge_policies: true
-
-# Delete the user
-- iam_user:
- name: testuser1
- state: absent
-
-'''
-RETURN = '''
-user:
- description: dictionary containing all the user information
- returned: success
- type: complex
- contains:
- arn:
- description: the Amazon Resource Name (ARN) specifying the user
- type: str
- sample: "arn:aws:iam::1234567890:user/testuser1"
- create_date:
- description: the date and time, in ISO 8601 date-time format, when the user was created
- type: str
- sample: "2017-02-08T04:36:28+00:00"
- user_id:
- description: the stable and unique string identifying the user
- type: str
- sample: AGPAIDBWE12NSFINE55TM
- user_name:
- description: the friendly name that identifies the user
- type: str
- sample: testuser1
- path:
- description: the path to the user
- type: str
- sample: /
-'''
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-import traceback
-
-try:
- from botocore.exceptions import ClientError, ParamValidationError, BotoCoreError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def compare_attached_policies(current_attached_policies, new_attached_policies):
-
- # If new_attached_policies is None it means we want to remove all policies
- if len(current_attached_policies) > 0 and new_attached_policies is None:
- return False
-
- current_attached_policies_arn_list = []
- for policy in current_attached_policies:
- current_attached_policies_arn_list.append(policy['PolicyArn'])
-
- if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)):
- return True
- else:
- return False
-
-
-def convert_friendly_names_to_arns(connection, module, policy_names):
-
- # List comprehension that looks for any policy in the 'policy_names' list
- # that does not begin with 'arn'. If there aren't any, short circuit.
- # If there are, translate friendly name to the full arn
- if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]):
- return policy_names
- allpolicies = {}
- paginator = connection.get_paginator('list_policies')
- policies = paginator.paginate().build_full_result()['Policies']
-
- for policy in policies:
- allpolicies[policy['PolicyName']] = policy['Arn']
- allpolicies[policy['Arn']] = policy['Arn']
- try:
- return [allpolicies[policy] for policy in policy_names]
- except KeyError as e:
- module.fail_json(msg="Couldn't find policy: " + str(e))
-
-
-def create_or_update_user(connection, module):
-
- params = dict()
- params['UserName'] = module.params.get('name')
- managed_policies = module.params.get('managed_policies')
- purge_policies = module.params.get('purge_policies')
- changed = False
- if managed_policies:
- managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
-
- # Get user
- user = get_user(connection, module, params['UserName'])
-
- # If user is None, create it
- if user is None:
- # Check mode means we would create the user
- if module.check_mode:
- module.exit_json(changed=True)
-
- try:
- connection.create_user(**params)
- changed = True
- except ClientError as e:
- module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- except ParamValidationError as e:
- module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc())
-
- # Manage managed policies
- current_attached_policies = get_attached_policy_list(connection, module, params['UserName'])
- if not compare_attached_policies(current_attached_policies, managed_policies):
- current_attached_policies_arn_list = []
- for policy in current_attached_policies:
- current_attached_policies_arn_list.append(policy['PolicyArn'])
-
- # If managed_policies has a single empty element we want to remove all attached policies
- if purge_policies:
- # Detach policies not present
- for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
- changed = True
- if not module.check_mode:
- try:
- connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
- except ClientError as e:
- module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
- policy_arn, params['UserName'], to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except ParamValidationError as e:
- module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
- policy_arn, params['UserName'], to_native(e)),
- exception=traceback.format_exc())
-
- # If there are policies to adjust that aren't in the current list, then things have changed
- # Otherwise the only changes were in purging above
- if set(managed_policies).difference(set(current_attached_policies_arn_list)):
- changed = True
- # If there are policies in managed_policies attach each policy
- if managed_policies != [None] and not module.check_mode:
- for policy_arn in managed_policies:
- try:
- connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
- except ClientError as e:
- module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
- policy_arn, params['UserName'], to_native(e)),
- exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
- except ParamValidationError as e:
- module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
- policy_arn, params['UserName'], to_native(e)),
- exception=traceback.format_exc())
- if module.check_mode:
- module.exit_json(changed=changed)
-
- # Get the user again
- user = get_user(connection, module, params['UserName'])
-
- module.exit_json(changed=changed, iam_user=camel_dict_to_snake_dict(user))
-
-
-def destroy_user(connection, module):
-
- user_name = module.params.get('name')
-
- user = get_user(connection, module, user_name)
- # User is not present
- if not user:
- module.exit_json(changed=False)
-
- # Check mode means we would remove this user
- if module.check_mode:
- module.exit_json(changed=True)
-
- # Remove any attached policies otherwise deletion fails
- try:
- for policy in get_attached_policy_list(connection, module, user_name):
- connection.detach_user_policy(UserName=user_name, PolicyArn=policy['PolicyArn'])
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name))
-
- try:
- # Remove user's access keys
- access_keys = connection.list_access_keys(UserName=user_name)["AccessKeyMetadata"]
- for access_key in access_keys:
- connection.delete_access_key(UserName=user_name, AccessKeyId=access_key["AccessKeyId"])
-
- # Remove user's login profile (console password)
- delete_user_login_profile(connection, module, user_name)
-
- # Remove user's ssh public keys
- ssh_public_keys = connection.list_ssh_public_keys(UserName=user_name)["SSHPublicKeys"]
- for ssh_public_key in ssh_public_keys:
- connection.delete_ssh_public_key(UserName=user_name, SSHPublicKeyId=ssh_public_key["SSHPublicKeyId"])
-
- # Remove user's service specific credentials
- service_credentials = connection.list_service_specific_credentials(UserName=user_name)["ServiceSpecificCredentials"]
- for service_specific_credential in service_credentials:
- connection.delete_service_specific_credential(
- UserName=user_name,
- ServiceSpecificCredentialId=service_specific_credential["ServiceSpecificCredentialId"]
- )
-
- # Remove user's signing certificates
- signing_certificates = connection.list_signing_certificates(UserName=user_name)["Certificates"]
- for signing_certificate in signing_certificates:
- connection.delete_signing_certificate(
- UserName=user_name,
- CertificateId=signing_certificate["CertificateId"]
- )
-
- # Remove user's MFA devices
- mfa_devices = connection.list_mfa_devices(UserName=user_name)["MFADevices"]
- for mfa_device in mfa_devices:
- connection.deactivate_mfa_device(UserName=user_name, SerialNumber=mfa_device["SerialNumber"])
-
- # Remove user's inline policies
- inline_policies = connection.list_user_policies(UserName=user_name)["PolicyNames"]
- for policy_name in inline_policies:
- connection.delete_user_policy(UserName=user_name, PolicyName=policy_name)
-
- # Remove user's group membership
- user_groups = connection.list_groups_for_user(UserName=user_name)["Groups"]
- for group in user_groups:
- connection.remove_user_from_group(UserName=user_name, GroupName=group["GroupName"])
-
- connection.delete_user(UserName=user_name)
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name))
-
- module.exit_json(changed=True)
-
-
-def get_user(connection, module, name):
-
- params = dict()
- params['UserName'] = name
-
- try:
- return connection.get_user(**params)
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchEntity':
- return None
- else:
- module.fail_json(msg="Unable to get user {0}: {1}".format(name, to_native(e)),
- **camel_dict_to_snake_dict(e.response))
-
-
-def get_attached_policy_list(connection, module, name):
-
- try:
- return connection.list_attached_user_policies(UserName=name)['AttachedPolicies']
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchEntity':
- return None
- else:
- module.fail_json_aws(e, msg="Unable to get policies for user {0}".format(name))
-
-
-def delete_user_login_profile(connection, module, user_name):
-
- try:
- return connection.delete_login_profile(UserName=user_name)
- except ClientError as e:
- if e.response["Error"]["Code"] == "NoSuchEntity":
- return None
- else:
- module.fail_json_aws(e, msg="Unable to delete login profile for user {0}".format(user_name))
-
-
-def main():
-
- argument_spec = dict(
- name=dict(required=True, type='str'),
- managed_policies=dict(default=[], type='list', aliases=['managed_policy']),
- state=dict(choices=['present', 'absent'], required=True),
- purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies'])
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- connection = module.client('iam')
-
- state = module.params.get("state")
-
- if state == 'present':
- create_or_update_user(connection, module)
- else:
- destroy_user(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_user_info.py b/lib/ansible/modules/cloud/amazon/iam_user_info.py
deleted file mode 100644
index c7a25409b7..0000000000
--- a/lib/ansible/modules/cloud/amazon/iam_user_info.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/usr/bin/python
-
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-
-DOCUMENTATION = '''
----
-module: iam_user_info
-short_description: Gather IAM user(s) facts in AWS
-description:
- - This module can be used to gather IAM user(s) facts in AWS.
-version_added: "2.10"
-author:
- - Constantin Bugneac (@Constantin07)
- - Abhijeet Kasurde (@Akasurde)
-options:
- name:
- description:
- - The name of the IAM user to look for.
- required: false
- type: str
- group:
- description:
- - The group name name of the IAM user to look for. Mutually exclusive with C(path).
- required: false
- type: str
- path:
- description:
- - The path to the IAM user. Mutually exclusive with C(group).
- - If specified, then would get all user names whose path starts with user provided value.
- required: false
- default: '/'
- type: str
-requirements:
- - botocore
- - boto3
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = r'''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-# Gather facts about "test" user.
-- name: Get IAM user facts
- iam_user_info:
- name: "test"
-
-# Gather facts about all users in the "dev" group.
-- name: Get IAM user facts
- iam_user_info:
- group: "dev"
-
-# Gather facts about all users with "/division_abc/subdivision_xyz/" path.
-- name: Get IAM user facts
- iam_user_info:
- path: "/division_abc/subdivision_xyz/"
-'''
-
-RETURN = r'''
-iam_users:
- description: list of maching iam users
- returned: success
- type: complex
- contains:
- arn:
- description: the ARN of the user
- returned: if user exists
- type: str
- sample: "arn:aws:iam::156360693172:user/dev/test_user"
- create_date:
- description: the datetime user was created
- returned: if user exists
- type: str
- sample: "2016-05-24T12:24:59+00:00"
- password_last_used:
- description: the last datetime the password was used by user
- returned: if password was used at least once
- type: str
- sample: "2016-05-25T13:39:11+00:00"
- path:
- description: the path to user
- returned: if user exists
- type: str
- sample: "/dev/"
- user_id:
- description: the unique user id
- returned: if user exists
- type: str
- sample: "AIDUIOOCQKTUGI6QJLGH2"
- user_name:
- description: the user name
- returned: if user exists
- type: str
- sample: "test_user"
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
-
-try:
- import botocore
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-@AWSRetry.exponential_backoff()
-def list_iam_users_with_backoff(client, operation, **kwargs):
- paginator = client.get_paginator(operation)
- return paginator.paginate(**kwargs).build_full_result()
-
-
-def list_iam_users(connection, module):
-
- name = module.params.get('name')
- group = module.params.get('group')
- path = module.params.get('path')
-
- params = dict()
- iam_users = []
-
- if not group and not path:
- if name:
- params['UserName'] = name
- try:
- iam_users.append(connection.get_user(**params)['User'])
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name)
-
- if group:
- params['GroupName'] = group
- try:
- iam_users = list_iam_users_with_backoff(connection, 'get_group', **params)['Users']
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get IAM user info for group %s" % group)
- if name:
- iam_users = [user for user in iam_users if user['UserName'] == name]
-
- if path and not group:
- params['PathPrefix'] = path
- try:
- iam_users = list_iam_users_with_backoff(connection, 'list_users', **params)['Users']
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Couldn't get IAM user info for path %s" % path)
- if name:
- iam_users = [user for user in iam_users if user['UserName'] == name]
-
- module.exit_json(iam_users=[camel_dict_to_snake_dict(user) for user in iam_users])
-
-
-def main():
- argument_spec = dict(
- name=dict(),
- group=dict(),
- path=dict(default='/')
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- mutually_exclusive=[
- ['group', 'path']
- ],
- supports_check_mode=True
- )
-
- connection = module.client('iam')
-
- list_iam_users(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/kinesis_stream.py b/lib/ansible/modules/cloud/amazon/kinesis_stream.py
deleted file mode 100644
index c924616aad..0000000000
--- a/lib/ansible/modules/cloud/amazon/kinesis_stream.py
+++ /dev/null
@@ -1,1428 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: kinesis_stream
-short_description: Manage a Kinesis Stream.
-description:
- - Create or Delete a Kinesis Stream.
- - Update the retention period of a Kinesis Stream.
- - Update Tags on a Kinesis Stream.
- - Enable/disable server side encryption on a Kinesis Stream.
-version_added: "2.2"
-requirements: [ boto3 ]
-author: Allen Sanabria (@linuxdynasty)
-options:
- name:
- description:
- - The name of the Kinesis Stream you are managing.
- required: true
- type: str
- shards:
- description:
- - The number of shards you want to have with this stream.
- - This is required when I(state=present)
- type: int
- retention_period:
- description:
- - The length of time (in hours) data records are accessible after they are added to
- the stream.
- - The default retention period is 24 hours and can not be less than 24 hours.
- - The maximum retention period is 168 hours.
- - The retention period can be modified during any point in time.
- type: int
- state:
- description:
- - Create or Delete the Kinesis Stream.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- wait:
- description:
- - Wait for operation to complete before returning.
- default: true
- type: bool
- wait_timeout:
- description:
- - How many seconds to wait for an operation to complete before timing out.
- default: 300
- type: int
- tags:
- description:
- - "A dictionary of resource tags of the form: C({ tag1: value1, tag2: value2 })."
- aliases: [ "resource_tags" ]
- type: dict
- encryption_state:
- description:
- - Enable or Disable encryption on the Kinesis Stream.
- choices: [ 'enabled', 'disabled' ]
- version_added: "2.5"
- type: str
- encryption_type:
- description:
- - The type of encryption.
- - Defaults to C(KMS)
- choices: ['KMS', 'NONE']
- version_added: "2.5"
- type: str
- key_id:
- description:
- - The GUID or alias for the KMS key.
- version_added: "2.5"
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Basic creation example:
-- name: Set up Kinesis Stream with 10 shards and wait for the stream to become ACTIVE
- kinesis_stream:
- name: test-stream
- shards: 10
- wait: yes
- wait_timeout: 600
- register: test_stream
-
-# Basic creation example with tags:
-- name: Set up Kinesis Stream with 10 shards, tag the environment, and wait for the stream to become ACTIVE
- kinesis_stream:
- name: test-stream
- shards: 10
- tags:
- Env: development
- wait: yes
- wait_timeout: 600
- register: test_stream
-
-# Basic creation example with tags and increase the retention period from the default 24 hours to 48 hours:
-- name: Set up Kinesis Stream with 10 shards, tag the environment, increase the retention period and wait for the stream to become ACTIVE
- kinesis_stream:
- name: test-stream
- retention_period: 48
- shards: 10
- tags:
- Env: development
- wait: yes
- wait_timeout: 600
- register: test_stream
-
-# Basic delete example:
-- name: Delete Kinesis Stream test-stream and wait for it to finish deleting.
- kinesis_stream:
- name: test-stream
- state: absent
- wait: yes
- wait_timeout: 600
- register: test_stream
-
-# Basic enable encryption example:
-- name: Encrypt Kinesis Stream test-stream.
- kinesis_stream:
- name: test-stream
- state: present
- encryption_state: enabled
- encryption_type: KMS
- key_id: alias/aws/kinesis
- wait: yes
- wait_timeout: 600
- register: test_stream
-
-# Basic disable encryption example:
-- name: Encrypt Kinesis Stream test-stream.
- kinesis_stream:
- name: test-stream
- state: present
- encryption_state: disabled
- encryption_type: KMS
- key_id: alias/aws/kinesis
- wait: yes
- wait_timeout: 600
- register: test_stream
-'''
-
-RETURN = '''
-stream_name:
- description: The name of the Kinesis Stream.
- returned: when state == present.
- type: str
- sample: "test-stream"
-stream_arn:
- description: The amazon resource identifier
- returned: when state == present.
- type: str
- sample: "arn:aws:kinesis:east-side:123456789:stream/test-stream"
-stream_status:
- description: The current state of the Kinesis Stream.
- returned: when state == present.
- type: str
- sample: "ACTIVE"
-retention_period_hours:
- description: Number of hours messages will be kept for a Kinesis Stream.
- returned: when state == present.
- type: int
- sample: 24
-tags:
- description: Dictionary containing all the tags associated with the Kinesis stream.
- returned: when state == present.
- type: dict
- sample: {
- "Name": "Splunk",
- "Env": "development"
- }
-'''
-
-import re
-import datetime
-import time
-from functools import reduce
-
-try:
- import botocore.exceptions
-except ImportError:
- pass # Taken care of by ec2.HAS_BOTO3
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import HAS_BOTO3, boto3_conn, ec2_argument_spec, get_aws_connection_info
-from ansible.module_utils._text import to_native
-
-
-def convert_to_lower(data):
- """Convert all uppercase keys in dict with lowercase_
- Args:
- data (dict): Dictionary with keys that have upper cases in them
- Example.. FooBar == foo_bar
- if a val is of type datetime.datetime, it will be converted to
- the ISO 8601
-
- Basic Usage:
- >>> test = {'FooBar': []}
- >>> test = convert_to_lower(test)
- {
- 'foo_bar': []
- }
-
- Returns:
- Dictionary
- """
- results = dict()
- if isinstance(data, dict):
- for key, val in data.items():
- key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower()
- if key[0] == '_':
- key = key[1:]
- if isinstance(val, datetime.datetime):
- results[key] = val.isoformat()
- elif isinstance(val, dict):
- results[key] = convert_to_lower(val)
- elif isinstance(val, list):
- converted = list()
- for item in val:
- converted.append(convert_to_lower(item))
- results[key] = converted
- else:
- results[key] = val
- return results
-
-
-def make_tags_in_proper_format(tags):
- """Take a dictionary of tags and convert them into the AWS Tags format.
- Args:
- tags (list): The tags you want applied.
-
- Basic Usage:
- >>> tags = [{'Key': 'env', 'Value': 'development'}]
- >>> make_tags_in_proper_format(tags)
- {
- "env": "development",
- }
-
- Returns:
- Dict
- """
- formatted_tags = dict()
- for tag in tags:
- formatted_tags[tag.get('Key')] = tag.get('Value')
-
- return formatted_tags
-
-
-def make_tags_in_aws_format(tags):
- """Take a dictionary of tags and convert them into the AWS Tags format.
- Args:
- tags (dict): The tags you want applied.
-
- Basic Usage:
- >>> tags = {'env': 'development', 'service': 'web'}
- >>> make_tags_in_proper_format(tags)
- [
- {
- "Value": "web",
- "Key": "service"
- },
- {
- "Value": "development",
- "key": "env"
- }
- ]
-
- Returns:
- List
- """
- formatted_tags = list()
- for key, val in tags.items():
- formatted_tags.append({
- 'Key': key,
- 'Value': val
- })
-
- return formatted_tags
-
-
-def get_tags(client, stream_name, check_mode=False):
- """Retrieve the tags for a Kinesis Stream.
- Args:
- client (botocore.client.EC2): Boto3 client.
- stream_name (str): Name of the Kinesis stream.
-
- Kwargs:
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('kinesis')
- >>> stream_name = 'test-stream'
- >> get_tags(client, stream_name)
-
- Returns:
- Tuple (bool, str, dict)
- """
- err_msg = ''
- success = False
- params = {
- 'StreamName': stream_name,
- }
- results = dict()
- try:
- if not check_mode:
- results = (
- client.list_tags_for_stream(**params)['Tags']
- )
- else:
- results = [
- {
- 'Key': 'DryRunMode',
- 'Value': 'true'
- },
- ]
- success = True
- except botocore.exceptions.ClientError as e:
- err_msg = to_native(e)
-
- return success, err_msg, results
-
-
-def find_stream(client, stream_name, check_mode=False):
- """Retrieve a Kinesis Stream.
- Args:
- client (botocore.client.EC2): Boto3 client.
- stream_name (str): Name of the Kinesis stream.
-
- Kwargs:
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('kinesis')
- >>> stream_name = 'test-stream'
-
- Returns:
- Tuple (bool, str, dict)
- """
- err_msg = ''
- success = False
- params = {
- 'StreamName': stream_name,
- }
- results = dict()
- has_more_shards = True
- shards = list()
- try:
- if not check_mode:
- while has_more_shards:
- results = (
- client.describe_stream(**params)['StreamDescription']
- )
- shards.extend(results.pop('Shards'))
- has_more_shards = results['HasMoreShards']
- results['Shards'] = shards
- num_closed_shards = len([s for s in shards if 'EndingSequenceNumber' in s['SequenceNumberRange']])
- results['OpenShardsCount'] = len(shards) - num_closed_shards
- results['ClosedShardsCount'] = num_closed_shards
- results['ShardsCount'] = len(shards)
- else:
- results = {
- 'OpenShardsCount': 5,
- 'ClosedShardsCount': 0,
- 'ShardsCount': 5,
- 'HasMoreShards': True,
- 'RetentionPeriodHours': 24,
- 'StreamName': stream_name,
- 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/{0}'.format(stream_name),
- 'StreamStatus': 'ACTIVE',
- 'EncryptionType': 'NONE'
- }
- success = True
- except botocore.exceptions.ClientError as e:
- err_msg = to_native(e)
-
- return success, err_msg, results
-
-
-def wait_for_status(client, stream_name, status, wait_timeout=300,
- check_mode=False):
- """Wait for the status to change for a Kinesis Stream.
- Args:
- client (botocore.client.EC2): Boto3 client
- stream_name (str): The name of the kinesis stream.
- status (str): The status to wait for.
- examples. status=available, status=deleted
-
- Kwargs:
- wait_timeout (int): Number of seconds to wait, until this timeout is reached.
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('kinesis')
- >>> stream_name = 'test-stream'
- >>> wait_for_status(client, stream_name, 'ACTIVE', 300)
-
- Returns:
- Tuple (bool, str, dict)
- """
- polling_increment_secs = 5
- wait_timeout = time.time() + wait_timeout
- status_achieved = False
- stream = dict()
- err_msg = ""
-
- while wait_timeout > time.time():
- try:
- find_success, find_msg, stream = (
- find_stream(client, stream_name, check_mode=check_mode)
- )
- if check_mode:
- status_achieved = True
- break
-
- elif status != 'DELETING':
- if find_success and stream:
- if stream.get('StreamStatus') == status:
- status_achieved = True
- break
-
- else:
- if not find_success:
- status_achieved = True
- break
-
- except botocore.exceptions.ClientError as e:
- err_msg = to_native(e)
-
- time.sleep(polling_increment_secs)
-
- if not status_achieved:
- err_msg = "Wait time out reached, while waiting for results"
- else:
- err_msg = "Status {0} achieved successfully".format(status)
-
- return status_achieved, err_msg, stream
-
-
-def tags_action(client, stream_name, tags, action='create', check_mode=False):
- """Create or delete multiple tags from a Kinesis Stream.
- Args:
- client (botocore.client.EC2): Boto3 client.
- resource_id (str): The Amazon resource id.
- tags (list): List of dictionaries.
- examples.. [{Name: "", Values: [""]}]
-
- Kwargs:
- action (str): The action to perform.
- valid actions == create and delete
- default=create
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('ec2')
- >>> resource_id = 'pcx-123345678'
- >>> tags = {'env': 'development'}
- >>> update_tags(client, resource_id, tags)
- [True, '']
-
- Returns:
- List (bool, str)
- """
- success = False
- err_msg = ""
- params = {'StreamName': stream_name}
- try:
- if not check_mode:
- if action == 'create':
- params['Tags'] = tags
- client.add_tags_to_stream(**params)
- success = True
- elif action == 'delete':
- params['TagKeys'] = list(tags)
- client.remove_tags_from_stream(**params)
- success = True
- else:
- err_msg = 'Invalid action {0}'.format(action)
- else:
- if action == 'create':
- success = True
- elif action == 'delete':
- success = True
- else:
- err_msg = 'Invalid action {0}'.format(action)
-
- except botocore.exceptions.ClientError as e:
- err_msg = to_native(e)
-
- return success, err_msg
-
-
-def recreate_tags_from_list(list_of_tags):
- """Recreate tags from a list of tuples into the Amazon Tag format.
- Args:
- list_of_tags (list): List of tuples.
-
- Basic Usage:
- >>> list_of_tags = [('Env', 'Development')]
- >>> recreate_tags_from_list(list_of_tags)
- [
- {
- "Value": "Development",
- "Key": "Env"
- }
- ]
-
- Returns:
- List
- """
- tags = list()
- i = 0
- for i in range(len(list_of_tags)):
- key_name = list_of_tags[i][0]
- key_val = list_of_tags[i][1]
- tags.append(
- {
- 'Key': key_name,
- 'Value': key_val
- }
- )
- return tags
-
-
-def update_tags(client, stream_name, tags, check_mode=False):
- """Update tags for an amazon resource.
- Args:
- resource_id (str): The Amazon resource id.
- tags (dict): Dictionary of tags you want applied to the Kinesis stream.
-
- Kwargs:
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('ec2')
- >>> stream_name = 'test-stream'
- >>> tags = {'env': 'development'}
- >>> update_tags(client, stream_name, tags)
- [True, '']
-
- Return:
- Tuple (bool, str)
- """
- success = False
- changed = False
- err_msg = ''
- tag_success, tag_msg, current_tags = (
- get_tags(client, stream_name, check_mode=check_mode)
- )
- if current_tags:
- tags = make_tags_in_aws_format(tags)
- current_tags_set = (
- set(
- reduce(
- lambda x, y: x + y,
- [make_tags_in_proper_format(current_tags).items()]
- )
- )
- )
-
- new_tags_set = (
- set(
- reduce(
- lambda x, y: x + y,
- [make_tags_in_proper_format(tags).items()]
- )
- )
- )
- tags_to_delete = list(current_tags_set.difference(new_tags_set))
- tags_to_update = list(new_tags_set.difference(current_tags_set))
- if tags_to_delete:
- tags_to_delete = make_tags_in_proper_format(
- recreate_tags_from_list(tags_to_delete)
- )
- delete_success, delete_msg = (
- tags_action(
- client, stream_name, tags_to_delete, action='delete',
- check_mode=check_mode
- )
- )
- if not delete_success:
- return delete_success, changed, delete_msg
- if tags_to_update:
- tags = make_tags_in_proper_format(
- recreate_tags_from_list(tags_to_update)
- )
- else:
- return True, changed, 'Tags do not need to be updated'
-
- if tags:
- create_success, create_msg = (
- tags_action(
- client, stream_name, tags, action='create',
- check_mode=check_mode
- )
- )
- if create_success:
- changed = True
- return create_success, changed, create_msg
-
- return success, changed, err_msg
-
-
-def stream_action(client, stream_name, shard_count=1, action='create',
- timeout=300, check_mode=False):
- """Create or Delete an Amazon Kinesis Stream.
- Args:
- client (botocore.client.EC2): Boto3 client.
- stream_name (str): The name of the kinesis stream.
-
- Kwargs:
- shard_count (int): Number of shards this stream will use.
- action (str): The action to perform.
- valid actions == create and delete
- default=create
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('kinesis')
- >>> stream_name = 'test-stream'
- >>> shard_count = 20
- >>> stream_action(client, stream_name, shard_count, action='create')
-
- Returns:
- List (bool, str)
- """
- success = False
- err_msg = ''
- params = {
- 'StreamName': stream_name
- }
- try:
- if not check_mode:
- if action == 'create':
- params['ShardCount'] = shard_count
- client.create_stream(**params)
- success = True
- elif action == 'delete':
- client.delete_stream(**params)
- success = True
- else:
- err_msg = 'Invalid action {0}'.format(action)
- else:
- if action == 'create':
- success = True
- elif action == 'delete':
- success = True
- else:
- err_msg = 'Invalid action {0}'.format(action)
-
- except botocore.exceptions.ClientError as e:
- err_msg = to_native(e)
-
- return success, err_msg
-
-
-def stream_encryption_action(client, stream_name, action='start_encryption', encryption_type='', key_id='',
- timeout=300, check_mode=False):
- """Create, Encrypt or Delete an Amazon Kinesis Stream.
- Args:
- client (botocore.client.EC2): Boto3 client.
- stream_name (str): The name of the kinesis stream.
-
- Kwargs:
- shard_count (int): Number of shards this stream will use.
- action (str): The action to perform.
- valid actions == create and delete
- default=create
- encryption_type (str): NONE or KMS
- key_id (str): The GUID or alias for the KMS key
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('kinesis')
- >>> stream_name = 'test-stream'
- >>> shard_count = 20
- >>> stream_action(client, stream_name, shard_count, action='create', encryption_type='KMS',key_id='alias/aws')
-
- Returns:
- List (bool, str)
- """
- success = False
- err_msg = ''
- params = {
- 'StreamName': stream_name
- }
- try:
- if not check_mode:
- if action == 'start_encryption':
- params['EncryptionType'] = encryption_type
- params['KeyId'] = key_id
- client.start_stream_encryption(**params)
- success = True
- elif action == 'stop_encryption':
- params['EncryptionType'] = encryption_type
- params['KeyId'] = key_id
- client.stop_stream_encryption(**params)
- success = True
- else:
- err_msg = 'Invalid encryption action {0}'.format(action)
- else:
- if action == 'start_encryption':
- success = True
- elif action == 'stop_encryption':
- success = True
- else:
- err_msg = 'Invalid encryption action {0}'.format(action)
-
- except botocore.exceptions.ClientError as e:
- err_msg = to_native(e)
-
- return success, err_msg
-
-
-def retention_action(client, stream_name, retention_period=24,
- action='increase', check_mode=False):
- """Increase or Decrease the retention of messages in the Kinesis stream.
- Args:
- client (botocore.client.EC2): Boto3 client.
- stream_name (str): The name of the kinesis stream.
-
- Kwargs:
- retention_period (int): This is how long messages will be kept before
- they are discarded. This can not be less than 24 hours.
- action (str): The action to perform.
- valid actions == create and delete
- default=create
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('kinesis')
- >>> stream_name = 'test-stream'
- >>> retention_period = 48
- >>> retention_action(client, stream_name, retention_period, action='increase')
-
- Returns:
- Tuple (bool, str)
- """
- success = False
- err_msg = ''
- params = {
- 'StreamName': stream_name
- }
- try:
- if not check_mode:
- if action == 'increase':
- params['RetentionPeriodHours'] = retention_period
- client.increase_stream_retention_period(**params)
- success = True
- err_msg = (
- 'Retention Period increased successfully to {0}'.format(retention_period)
- )
- elif action == 'decrease':
- params['RetentionPeriodHours'] = retention_period
- client.decrease_stream_retention_period(**params)
- success = True
- err_msg = (
- 'Retention Period decreased successfully to {0}'.format(retention_period)
- )
- else:
- err_msg = 'Invalid action {0}'.format(action)
- else:
- if action == 'increase':
- success = True
- elif action == 'decrease':
- success = True
- else:
- err_msg = 'Invalid action {0}'.format(action)
-
- except botocore.exceptions.ClientError as e:
- err_msg = to_native(e)
-
- return success, err_msg
-
-
-def update_shard_count(client, stream_name, number_of_shards=1, check_mode=False):
- """Increase or Decrease the number of shards in the Kinesis stream.
- Args:
- client (botocore.client.EC2): Boto3 client.
- stream_name (str): The name of the kinesis stream.
-
- Kwargs:
- number_of_shards (int): Number of shards this stream will use.
- default=1
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('kinesis')
- >>> stream_name = 'test-stream'
- >>> number_of_shards = 3
- >>> update_shard_count(client, stream_name, number_of_shards)
-
- Returns:
- Tuple (bool, str)
- """
- success = True
- err_msg = ''
- params = {
- 'StreamName': stream_name,
- 'ScalingType': 'UNIFORM_SCALING'
- }
- if not check_mode:
- params['TargetShardCount'] = number_of_shards
- try:
- client.update_shard_count(**params)
- except botocore.exceptions.ClientError as e:
- return False, str(e)
-
- return success, err_msg
-
-
-def update(client, current_stream, stream_name, number_of_shards=1, retention_period=None,
- tags=None, wait=False, wait_timeout=300, check_mode=False):
- """Update an Amazon Kinesis Stream.
- Args:
- client (botocore.client.EC2): Boto3 client.
- stream_name (str): The name of the kinesis stream.
-
- Kwargs:
- number_of_shards (int): Number of shards this stream will use.
- default=1
- retention_period (int): This is how long messages will be kept before
- they are discarded. This can not be less than 24 hours.
- tags (dict): The tags you want applied.
- wait (bool): Wait until Stream is ACTIVE.
- default=False
- wait_timeout (int): How long to wait until this operation is considered failed.
- default=300
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('kinesis')
- >>> current_stream = {
- 'ShardCount': 3,
- 'HasMoreShards': True,
- 'RetentionPeriodHours': 24,
- 'StreamName': 'test-stream',
- 'StreamARN': 'arn:aws:kinesis:us-west-2:123456789:stream/test-stream',
- 'StreamStatus': "ACTIVE'
- }
- >>> stream_name = 'test-stream'
- >>> retention_period = 48
- >>> number_of_shards = 10
- >>> update(client, current_stream, stream_name,
- number_of_shards, retention_period )
-
- Returns:
- Tuple (bool, bool, str)
- """
- success = True
- changed = False
- err_msg = ''
- if retention_period:
- if wait:
- wait_success, wait_msg, current_stream = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
- )
- if not wait_success:
- return wait_success, False, wait_msg
-
- if current_stream.get('StreamStatus') == 'ACTIVE':
- retention_changed = False
- if retention_period > current_stream['RetentionPeriodHours']:
- retention_changed, retention_msg = (
- retention_action(
- client, stream_name, retention_period, action='increase',
- check_mode=check_mode
- )
- )
-
- elif retention_period < current_stream['RetentionPeriodHours']:
- retention_changed, retention_msg = (
- retention_action(
- client, stream_name, retention_period, action='decrease',
- check_mode=check_mode
- )
- )
-
- elif retention_period == current_stream['RetentionPeriodHours']:
- retention_msg = (
- 'Retention {0} is the same as {1}'
- .format(
- retention_period,
- current_stream['RetentionPeriodHours']
- )
- )
- success = True
-
- if retention_changed:
- success = True
- changed = True
-
- err_msg = retention_msg
- if changed and wait:
- wait_success, wait_msg, current_stream = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
- )
- if not wait_success:
- return wait_success, False, wait_msg
- elif changed and not wait:
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name, check_mode=check_mode)
- )
- if stream_found:
- if current_stream['StreamStatus'] != 'ACTIVE':
- err_msg = (
- 'Retention Period for {0} is in the process of updating'
- .format(stream_name)
- )
- return success, changed, err_msg
- else:
- err_msg = (
- 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
- .format(current_stream.get('StreamStatus', 'UNKNOWN'))
- )
- return success, changed, err_msg
-
- if current_stream['OpenShardsCount'] != number_of_shards:
- success, err_msg = (
- update_shard_count(client, stream_name, number_of_shards, check_mode=check_mode)
- )
-
- if not success:
- return success, changed, err_msg
-
- changed = True
-
- if wait:
- wait_success, wait_msg, current_stream = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
- )
- if not wait_success:
- return wait_success, changed, wait_msg
- else:
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name, check_mode=check_mode)
- )
- if stream_found and current_stream['StreamStatus'] != 'ACTIVE':
- err_msg = (
- 'Number of shards for {0} is in the process of updating'
- .format(stream_name)
- )
- return success, changed, err_msg
-
- if tags:
- tag_success, tag_changed, err_msg = (
- update_tags(client, stream_name, tags, check_mode=check_mode)
- )
- if wait:
- success, err_msg, status_stream = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
- )
- if success and changed:
- err_msg = 'Kinesis Stream {0} updated successfully.'.format(stream_name)
- elif success and not changed:
- err_msg = 'Kinesis Stream {0} did not change.'.format(stream_name)
-
- return success, changed, err_msg
-
-
-def create_stream(client, stream_name, number_of_shards=1, retention_period=None,
- tags=None, wait=False, wait_timeout=300, check_mode=False):
- """Create an Amazon Kinesis Stream.
- Args:
- client (botocore.client.EC2): Boto3 client.
- stream_name (str): The name of the kinesis stream.
-
- Kwargs:
- number_of_shards (int): Number of shards this stream will use.
- default=1
- retention_period (int): Can not be less than 24 hours
- default=None
- tags (dict): The tags you want applied.
- default=None
- wait (bool): Wait until Stream is ACTIVE.
- default=False
- wait_timeout (int): How long to wait until this operation is considered failed.
- default=300
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('kinesis')
- >>> stream_name = 'test-stream'
- >>> number_of_shards = 10
- >>> tags = {'env': 'test'}
- >>> create_stream(client, stream_name, number_of_shards, tags=tags)
-
- Returns:
- Tuple (bool, bool, str, dict)
- """
- success = False
- changed = False
- err_msg = ''
- results = dict()
-
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name, check_mode=check_mode)
- )
-
- if stream_found and current_stream.get('StreamStatus') == 'DELETING' and wait:
- wait_success, wait_msg, current_stream = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
- )
-
- if stream_found and current_stream.get('StreamStatus') != 'DELETING':
- success, changed, err_msg = update(
- client, current_stream, stream_name, number_of_shards,
- retention_period, tags, wait, wait_timeout, check_mode=check_mode
- )
- else:
- create_success, create_msg = (
- stream_action(
- client, stream_name, number_of_shards, action='create',
- check_mode=check_mode
- )
- )
- if not create_success:
- changed = True
- err_msg = 'Failed to create Kinesis stream: {0}'.format(create_msg)
- return False, True, err_msg, {}
- else:
- changed = True
- if wait:
- wait_success, wait_msg, results = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
- )
- err_msg = (
- 'Kinesis Stream {0} is in the process of being created'
- .format(stream_name)
- )
- if not wait_success:
- return wait_success, True, wait_msg, results
- else:
- err_msg = (
- 'Kinesis Stream {0} created successfully'
- .format(stream_name)
- )
-
- if tags:
- changed, err_msg = (
- tags_action(
- client, stream_name, tags, action='create',
- check_mode=check_mode
- )
- )
- if changed:
- success = True
- if not success:
- return success, changed, err_msg, results
-
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name, check_mode=check_mode)
- )
- if retention_period and current_stream.get('StreamStatus') == 'ACTIVE':
- changed, err_msg = (
- retention_action(
- client, stream_name, retention_period, action='increase',
- check_mode=check_mode
- )
- )
- if changed:
- success = True
- if not success:
- return success, changed, err_msg, results
- else:
- err_msg = (
- 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
- .format(current_stream.get('StreamStatus', 'UNKNOWN'))
- )
- success = create_success
- changed = True
-
- if success:
- stream_found, stream_msg, results = (
- find_stream(client, stream_name, check_mode=check_mode)
- )
- tag_success, tag_msg, current_tags = (
- get_tags(client, stream_name, check_mode=check_mode)
- )
- if current_tags and not check_mode:
- current_tags = make_tags_in_proper_format(current_tags)
- results['Tags'] = current_tags
- elif check_mode and tags:
- results['Tags'] = tags
- else:
- results['Tags'] = dict()
- results = convert_to_lower(results)
-
- return success, changed, err_msg, results
-
-
-def delete_stream(client, stream_name, wait=False, wait_timeout=300,
- check_mode=False):
- """Delete an Amazon Kinesis Stream.
- Args:
- client (botocore.client.EC2): Boto3 client.
- stream_name (str): The name of the kinesis stream.
-
- Kwargs:
- wait (bool): Wait until Stream is ACTIVE.
- default=False
- wait_timeout (int): How long to wait until this operation is considered failed.
- default=300
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('kinesis')
- >>> stream_name = 'test-stream'
- >>> delete_stream(client, stream_name)
-
- Returns:
- Tuple (bool, bool, str, dict)
- """
- success = False
- changed = False
- err_msg = ''
- results = dict()
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name, check_mode=check_mode)
- )
- if stream_found:
- success, err_msg = (
- stream_action(
- client, stream_name, action='delete', check_mode=check_mode
- )
- )
- if success:
- changed = True
- if wait:
- success, err_msg, results = (
- wait_for_status(
- client, stream_name, 'DELETING', wait_timeout,
- check_mode=check_mode
- )
- )
- err_msg = 'Stream {0} deleted successfully'.format(stream_name)
- if not success:
- return success, True, err_msg, results
- else:
- err_msg = (
- 'Stream {0} is in the process of being deleted'
- .format(stream_name)
- )
- else:
- success = True
- changed = False
- err_msg = 'Stream {0} does not exist'.format(stream_name)
-
- return success, changed, err_msg, results
-
-
-def start_stream_encryption(client, stream_name, encryption_type='', key_id='',
- wait=False, wait_timeout=300, check_mode=False):
- """Start encryption on an Amazon Kinesis Stream.
- Args:
- client (botocore.client.EC2): Boto3 client.
- stream_name (str): The name of the kinesis stream.
-
- Kwargs:
- encryption_type (str): KMS or NONE
- key_id (str): KMS key GUID or alias
- wait (bool): Wait until Stream is ACTIVE.
- default=False
- wait_timeout (int): How long to wait until this operation is considered failed.
- default=300
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('kinesis')
- >>> stream_name = 'test-stream'
- >>> key_id = 'alias/aws'
- >>> encryption_type = 'KMS'
- >>> start_stream_encryption(client, stream_name,encryption_type,key_id)
-
- Returns:
- Tuple (bool, bool, str, dict)
- """
- success = False
- changed = False
- err_msg = ''
- params = {
- 'StreamName': stream_name
- }
-
- results = dict()
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name, check_mode=check_mode)
- )
- if stream_found:
- success, err_msg = (
- stream_encryption_action(
- client, stream_name, action='start_encryption', encryption_type=encryption_type, key_id=key_id, check_mode=check_mode
- )
- )
- if success:
- changed = True
- if wait:
- success, err_msg, results = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
- )
- err_msg = 'Kinesis Stream {0} encryption started successfully.'.format(stream_name)
- if not success:
- return success, True, err_msg, results
- else:
- err_msg = (
- 'Kinesis Stream {0} is in the process of starting encryption.'.format(stream_name)
- )
- else:
- success = True
- changed = False
- err_msg = 'Kinesis Stream {0} does not exist'.format(stream_name)
-
- return success, changed, err_msg, results
-
-
-def stop_stream_encryption(client, stream_name, encryption_type='', key_id='',
- wait=True, wait_timeout=300, check_mode=False):
- """Stop encryption on an Amazon Kinesis Stream.
- Args:
- client (botocore.client.EC2): Boto3 client.
- stream_name (str): The name of the kinesis stream.
-
- Kwargs:
- encryption_type (str): KMS or NONE
- key_id (str): KMS key GUID or alias
- wait (bool): Wait until Stream is ACTIVE.
- default=False
- wait_timeout (int): How long to wait until this operation is considered failed.
- default=300
- check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
- default=False
-
- Basic Usage:
- >>> client = boto3.client('kinesis')
- >>> stream_name = 'test-stream'
- >>> start_stream_encryption(client, stream_name,encryption_type, key_id)
-
- Returns:
- Tuple (bool, bool, str, dict)
- """
- success = False
- changed = False
- err_msg = ''
- params = {
- 'StreamName': stream_name
- }
-
- results = dict()
- stream_found, stream_msg, current_stream = (
- find_stream(client, stream_name, check_mode=check_mode)
- )
- if stream_found:
- if current_stream.get('EncryptionType') == 'KMS':
- success, err_msg = (
- stream_encryption_action(
- client, stream_name, action='stop_encryption', key_id=key_id, encryption_type=encryption_type, check_mode=check_mode
- )
- )
- elif current_stream.get('EncryptionType') == 'NONE':
- success = True
-
- if success:
- changed = True
- if wait:
- success, err_msg, results = (
- wait_for_status(
- client, stream_name, 'ACTIVE', wait_timeout,
- check_mode=check_mode
- )
- )
- err_msg = 'Kinesis Stream {0} encryption stopped successfully.'.format(stream_name)
- if not success:
- return success, True, err_msg, results
- else:
- err_msg = (
- 'Stream {0} is in the process of stopping encryption.'.format(stream_name)
- )
- else:
- success = True
- changed = False
- err_msg = 'Stream {0} does not exist.'.format(stream_name)
-
- return success, changed, err_msg, results
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- name=dict(required=True),
- shards=dict(default=None, required=False, type='int'),
- retention_period=dict(default=None, required=False, type='int'),
- tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
- wait=dict(default=True, required=False, type='bool'),
- wait_timeout=dict(default=300, required=False, type='int'),
- state=dict(default='present', choices=['present', 'absent']),
- encryption_type=dict(required=False, choices=['NONE', 'KMS']),
- key_id=dict(required=False, type='str'),
- encryption_state=dict(required=False, choices=['enabled', 'disabled']),
- )
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- retention_period = module.params.get('retention_period')
- stream_name = module.params.get('name')
- shards = module.params.get('shards')
- state = module.params.get('state')
- tags = module.params.get('tags')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- encryption_type = module.params.get('encryption_type')
- key_id = module.params.get('key_id')
- encryption_state = module.params.get('encryption_state')
-
- if state == 'present' and not shards:
- module.fail_json(msg='Shards is required when state == present.')
-
- if retention_period:
- if retention_period < 24:
- module.fail_json(msg='Retention period can not be less than 24 hours.')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required.')
-
- check_mode = module.check_mode
- try:
- region, ec2_url, aws_connect_kwargs = (
- get_aws_connection_info(module, boto3=True)
- )
- client = (
- boto3_conn(
- module, conn_type='client', resource='kinesis',
- region=region, endpoint=ec2_url, **aws_connect_kwargs
- )
- )
- except botocore.exceptions.ClientError as e:
- err_msg = 'Boto3 Client Error - {0}'.format(to_native(e.msg))
- module.fail_json(
- success=False, changed=False, result={}, msg=err_msg
- )
-
- if state == 'present':
- success, changed, err_msg, results = (
- create_stream(
- client, stream_name, shards, retention_period, tags,
- wait, wait_timeout, check_mode
- )
- )
- if encryption_state == 'enabled':
- success, changed, err_msg, results = (
- start_stream_encryption(
- client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode
- )
- )
- elif encryption_state == 'disabled':
- success, changed, err_msg, results = (
- stop_stream_encryption(
- client, stream_name, encryption_type, key_id, wait, wait_timeout, check_mode
- )
- )
- elif state == 'absent':
- success, changed, err_msg, results = (
- delete_stream(client, stream_name, wait, wait_timeout, check_mode)
- )
-
- if success:
- module.exit_json(
- success=success, changed=changed, msg=err_msg, **results
- )
- else:
- module.fail_json(
- success=success, changed=changed, msg=err_msg, result=results
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/lambda.py b/lib/ansible/modules/cloud/amazon/lambda.py
deleted file mode 100644
index 800d81cc15..0000000000
--- a/lib/ansible/modules/cloud/amazon/lambda.py
+++ /dev/null
@@ -1,628 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: lambda
-short_description: Manage AWS Lambda functions
-description:
- - Allows for the management of Lambda functions.
-version_added: '2.2'
-requirements: [ boto3 ]
-options:
- name:
- description:
- - The name you want to assign to the function you are uploading. Cannot be changed.
- required: true
- type: str
- state:
- description:
- - Create or delete Lambda function.
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- runtime:
- description:
- - The runtime environment for the Lambda function you are uploading.
- - Required when creating a function. Uses parameters as described in boto3 docs.
- - Required when I(state=present).
- - For supported list of runtimes, see U(https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html).
- type: str
- role:
- description:
- - The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
- resources. You may use the bare ARN if the role belongs to the same AWS account.
- - Required when I(state=present).
- type: str
- handler:
- description:
- - The function within your code that Lambda calls to begin execution.
- type: str
- zip_file:
- description:
- - A .zip file containing your deployment package
- - If I(state=present) then either I(zip_file) or I(s3_bucket) must be present.
- aliases: [ 'src' ]
- type: str
- s3_bucket:
- description:
- - Amazon S3 bucket name where the .zip file containing your deployment package is stored.
- - If I(state=present) then either I(zip_file) or I(s3_bucket) must be present.
- - I(s3_bucket) and I(s3_key) are required together.
- type: str
- s3_key:
- description:
- - The Amazon S3 object (the deployment package) key name you want to upload.
- - I(s3_bucket) and I(s3_key) are required together.
- type: str
- s3_object_version:
- description:
- - The Amazon S3 object (the deployment package) version you want to upload.
- type: str
- description:
- description:
- - A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
- type: str
- timeout:
- description:
- - The function maximum execution time in seconds after which Lambda should terminate the function.
- default: 3
- type: int
- memory_size:
- description:
- - The amount of memory, in MB, your Lambda function is given.
- default: 128
- type: int
- vpc_subnet_ids:
- description:
- - List of subnet IDs to run Lambda function in.
- - Use this option if you need to access resources in your VPC. Leave empty if you don't want to run the function in a VPC.
- - If set, I(vpc_security_group_ids) must also be set.
- type: list
- elements: str
- vpc_security_group_ids:
- description:
- - List of VPC security group IDs to associate with the Lambda function.
- - Required when I(vpc_subnet_ids) is used.
- type: list
- elements: str
- environment_variables:
- description:
- - A dictionary of environment variables the Lambda function is given.
- version_added: "2.3"
- type: dict
- dead_letter_arn:
- description:
- - The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
- version_added: "2.3"
- type: str
- tracing_mode:
- description:
- - Set mode to 'Active' to sample and trace incoming requests with AWS X-Ray. Turned off (set to 'PassThrough') by default.
- choices: ['Active', 'PassThrough']
- version_added: "2.10"
- type: str
- tags:
- description:
- - tag dict to apply to the function (requires botocore 1.5.40 or above).
- version_added: "2.5"
- type: dict
-author:
- - 'Steyn Huizinga (@steynovich)'
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Create Lambda functions
-- name: looped creation
- lambda:
- name: '{{ item.name }}'
- state: present
- zip_file: '{{ item.zip_file }}'
- runtime: 'python2.7'
- role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
- handler: 'hello_python.my_handler'
- vpc_subnet_ids:
- - subnet-123abcde
- - subnet-edcba321
- vpc_security_group_ids:
- - sg-123abcde
- - sg-edcba321
- environment_variables: '{{ item.env_vars }}'
- tags:
- key1: 'value1'
- loop:
- - name: HelloWorld
- zip_file: hello-code.zip
- env_vars:
- key1: "first"
- key2: "second"
- - name: ByeBye
- zip_file: bye-code.zip
- env_vars:
- key1: "1"
- key2: "2"
-
-# To remove previously added tags pass an empty dict
-- name: remove tags
- lambda:
- name: 'Lambda function'
- state: present
- zip_file: 'code.zip'
- runtime: 'python2.7'
- role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
- handler: 'hello_python.my_handler'
- tags: {}
-
-# Basic Lambda function deletion
-- name: Delete Lambda functions HelloWorld and ByeBye
- lambda:
- name: '{{ item }}'
- state: absent
- loop:
- - HelloWorld
- - ByeBye
-'''
-
-RETURN = '''
-code:
- description: the lambda function location returned by get_function in boto3
- returned: success
- type: dict
- sample:
- {
- 'location': 'a presigned S3 URL',
- 'repository_type': 'S3',
- }
-configuration:
- description: the lambda function metadata returned by get_function in boto3
- returned: success
- type: dict
- sample:
- {
- 'code_sha256': 'zOAGfF5JLFuzZoSNirUtOrQp+S341IOA3BcoXXoaIaU=',
- 'code_size': 123,
- 'description': 'My function',
- 'environment': {
- 'variables': {
- 'key': 'value'
- }
- },
- 'function_arn': 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1',
- 'function_name': 'myFunction',
- 'handler': 'index.handler',
- 'last_modified': '2017-08-01T00:00:00.000+0000',
- 'memory_size': 128,
- 'revision_id': 'a2x9886d-d48a-4a0c-ab64-82abc005x80c',
- 'role': 'arn:aws:iam::123456789012:role/lambda_basic_execution',
- 'runtime': 'nodejs6.10',
- 'tracing_config': { 'mode': 'Active' },
- 'timeout': 3,
- 'version': '1',
- 'vpc_config': {
- 'security_group_ids': [],
- 'subnet_ids': [],
- 'vpc_id': '123'
- }
- }
-'''
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict
-from ansible.module_utils.ec2 import compare_aws_tags
-import base64
-import hashlib
-import traceback
-import re
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError, ValidationError, ParamValidationError
-except ImportError:
- pass # protected by AnsibleAWSModule
-
-
-def get_account_info(module, region=None, endpoint=None, **aws_connect_kwargs):
- """return the account information (account id and partition) we are currently working on
-
- get_account_info tries too find out the account that we are working
- on. It's not guaranteed that this will be easy so we try in
- several different ways. Giving either IAM or STS privileges to
- the account should be enough to permit this.
- """
- account_id = None
- partition = None
- try:
- sts_client = boto3_conn(module, conn_type='client', resource='sts',
- region=region, endpoint=endpoint, **aws_connect_kwargs)
- caller_id = sts_client.get_caller_identity()
- account_id = caller_id.get('Account')
- partition = caller_id.get('Arn').split(':')[1]
- except ClientError:
- try:
- iam_client = boto3_conn(module, conn_type='client', resource='iam',
- region=region, endpoint=endpoint, **aws_connect_kwargs)
- arn, partition, service, reg, account_id, resource = iam_client.get_user()['User']['Arn'].split(':')
- except ClientError as e:
- if (e.response['Error']['Code'] == 'AccessDenied'):
- except_msg = to_native(e.message)
- m = except_msg.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/")
- account_id = m.group(4)
- partition = m.group(1)
- if account_id is None:
- module.fail_json_aws(e, msg="getting account information")
- if partition is None:
- module.fail_json_aws(e, msg="getting account information: partition")
- except Exception as e:
- module.fail_json_aws(e, msg="getting account information")
-
- return account_id, partition
-
-
-def get_current_function(connection, function_name, qualifier=None):
- try:
- if qualifier is not None:
- return connection.get_function(FunctionName=function_name, Qualifier=qualifier)
- return connection.get_function(FunctionName=function_name)
- except ClientError as e:
- try:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- return None
- except (KeyError, AttributeError):
- pass
- raise e
-
-
-def sha256sum(filename):
- hasher = hashlib.sha256()
- with open(filename, 'rb') as f:
- hasher.update(f.read())
-
- code_hash = hasher.digest()
- code_b64 = base64.b64encode(code_hash)
- hex_digest = code_b64.decode('utf-8')
-
- return hex_digest
-
-
-def set_tag(client, module, tags, function):
- if not hasattr(client, "list_tags"):
- module.fail_json(msg="Using tags requires botocore 1.5.40 or above")
-
- changed = False
- arn = function['Configuration']['FunctionArn']
-
- try:
- current_tags = client.list_tags(Resource=arn).get('Tags', {})
- except ClientError as e:
- module.fail_json(msg="Unable to list tags: {0}".format(to_native(e)),
- exception=traceback.format_exc())
-
- tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=True)
-
- try:
- if tags_to_remove:
- client.untag_resource(
- Resource=arn,
- TagKeys=tags_to_remove
- )
- changed = True
-
- if tags_to_add:
- client.tag_resource(
- Resource=arn,
- Tags=tags_to_add
- )
- changed = True
-
- except ClientError as e:
- module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
- to_native(e)), exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- except BotoCoreError as e:
- module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
- to_native(e)), exception=traceback.format_exc())
-
- return changed
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- runtime=dict(),
- role=dict(),
- handler=dict(),
- zip_file=dict(aliases=['src']),
- s3_bucket=dict(),
- s3_key=dict(),
- s3_object_version=dict(),
- description=dict(default=''),
- timeout=dict(type='int', default=3),
- memory_size=dict(type='int', default=128),
- vpc_subnet_ids=dict(type='list'),
- vpc_security_group_ids=dict(type='list'),
- environment_variables=dict(type='dict'),
- dead_letter_arn=dict(),
- tracing_mode=dict(choices=['Active', 'PassThrough']),
- tags=dict(type='dict'),
- )
-
- mutually_exclusive = [['zip_file', 's3_key'],
- ['zip_file', 's3_bucket'],
- ['zip_file', 's3_object_version']]
-
- required_together = [['s3_key', 's3_bucket'],
- ['vpc_subnet_ids', 'vpc_security_group_ids']]
-
- required_if = [['state', 'present', ['runtime', 'handler', 'role']]]
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=mutually_exclusive,
- required_together=required_together,
- required_if=required_if)
-
- name = module.params.get('name')
- state = module.params.get('state').lower()
- runtime = module.params.get('runtime')
- role = module.params.get('role')
- handler = module.params.get('handler')
- s3_bucket = module.params.get('s3_bucket')
- s3_key = module.params.get('s3_key')
- s3_object_version = module.params.get('s3_object_version')
- zip_file = module.params.get('zip_file')
- description = module.params.get('description')
- timeout = module.params.get('timeout')
- memory_size = module.params.get('memory_size')
- vpc_subnet_ids = module.params.get('vpc_subnet_ids')
- vpc_security_group_ids = module.params.get('vpc_security_group_ids')
- environment_variables = module.params.get('environment_variables')
- dead_letter_arn = module.params.get('dead_letter_arn')
- tracing_mode = module.params.get('tracing_mode')
- tags = module.params.get('tags')
-
- check_mode = module.check_mode
- changed = False
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- if not region:
- module.fail_json(msg='region must be specified')
-
- try:
- client = boto3_conn(module, conn_type='client', resource='lambda',
- region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except (ClientError, ValidationError) as e:
- module.fail_json_aws(e, msg="Trying to connect to AWS")
-
- if state == 'present':
- if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role):
- role_arn = role
- else:
- # get account ID and assemble ARN
- account_id, partition = get_account_info(module, region=region, endpoint=ec2_url, **aws_connect_kwargs)
- role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(partition, account_id, role)
-
- # Get function configuration if present, False otherwise
- current_function = get_current_function(client, name)
-
- # Update existing Lambda function
- if state == 'present' and current_function:
-
- # Get current state
- current_config = current_function['Configuration']
- current_version = None
-
- # Update function configuration
- func_kwargs = {'FunctionName': name}
-
- # Update configuration if needed
- if role_arn and current_config['Role'] != role_arn:
- func_kwargs.update({'Role': role_arn})
- if handler and current_config['Handler'] != handler:
- func_kwargs.update({'Handler': handler})
- if description and current_config['Description'] != description:
- func_kwargs.update({'Description': description})
- if timeout and current_config['Timeout'] != timeout:
- func_kwargs.update({'Timeout': timeout})
- if memory_size and current_config['MemorySize'] != memory_size:
- func_kwargs.update({'MemorySize': memory_size})
- if runtime and current_config['Runtime'] != runtime:
- func_kwargs.update({'Runtime': runtime})
- if (environment_variables is not None) and (current_config.get(
- 'Environment', {}).get('Variables', {}) != environment_variables):
- func_kwargs.update({'Environment': {'Variables': environment_variables}})
- if dead_letter_arn is not None:
- if current_config.get('DeadLetterConfig'):
- if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn:
- func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
- else:
- if dead_letter_arn != "":
- func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
- if tracing_mode and (current_config.get('TracingConfig', {}).get('Mode', 'PassThrough') != tracing_mode):
- func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}})
-
- # If VPC configuration is desired
- if vpc_subnet_ids or vpc_security_group_ids:
- if not vpc_subnet_ids or not vpc_security_group_ids:
- module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
-
- if 'VpcConfig' in current_config:
- # Compare VPC config with current config
- current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
- current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']
-
- subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
- vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
-
- if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
- new_vpc_config = {'SubnetIds': vpc_subnet_ids,
- 'SecurityGroupIds': vpc_security_group_ids}
- func_kwargs.update({'VpcConfig': new_vpc_config})
- else:
- # No VPC configuration is desired, assure VPC config is empty when present in current config
- if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'):
- func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}})
-
- # Upload new configuration if configuration has changed
- if len(func_kwargs) > 1:
- try:
- if not check_mode:
- response = client.update_function_configuration(**func_kwargs)
- current_version = response['Version']
- changed = True
- except (ParamValidationError, ClientError) as e:
- module.fail_json_aws(e, msg="Trying to update lambda configuration")
-
- # Update code configuration
- code_kwargs = {'FunctionName': name, 'Publish': True}
-
- # Update S3 location
- if s3_bucket and s3_key:
- # If function is stored on S3 always update
- code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})
-
- # If S3 Object Version is given
- if s3_object_version:
- code_kwargs.update({'S3ObjectVersion': s3_object_version})
-
- # Compare local checksum, update remote code when different
- elif zip_file:
- local_checksum = sha256sum(zip_file)
- remote_checksum = current_config['CodeSha256']
-
- # Only upload new code when local code is different compared to the remote code
- if local_checksum != remote_checksum:
- try:
- with open(zip_file, 'rb') as f:
- encoded_zip = f.read()
- code_kwargs.update({'ZipFile': encoded_zip})
- except IOError as e:
- module.fail_json(msg=str(e), exception=traceback.format_exc())
-
- # Tag Function
- if tags is not None:
- if set_tag(client, module, tags, current_function):
- changed = True
-
- # Upload new code if needed (e.g. code checksum has changed)
- if len(code_kwargs) > 2:
- try:
- if not check_mode:
- response = client.update_function_code(**code_kwargs)
- current_version = response['Version']
- changed = True
- except (ParamValidationError, ClientError) as e:
- module.fail_json_aws(e, msg="Trying to upload new code")
-
- # Describe function code and configuration
- response = get_current_function(client, name, qualifier=current_version)
- if not response:
- module.fail_json(msg='Unable to get function information after updating')
-
- # We're done
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
-
- # Function doesn't exists, create new Lambda function
- elif state == 'present':
- if s3_bucket and s3_key:
- # If function is stored on S3
- code = {'S3Bucket': s3_bucket,
- 'S3Key': s3_key}
- if s3_object_version:
- code.update({'S3ObjectVersion': s3_object_version})
- elif zip_file:
- # If function is stored in local zipfile
- try:
- with open(zip_file, 'rb') as f:
- zip_content = f.read()
-
- code = {'ZipFile': zip_content}
- except IOError as e:
- module.fail_json(msg=str(e), exception=traceback.format_exc())
-
- else:
- module.fail_json(msg='Either S3 object or path to zipfile required')
-
- func_kwargs = {'FunctionName': name,
- 'Publish': True,
- 'Runtime': runtime,
- 'Role': role_arn,
- 'Code': code,
- 'Timeout': timeout,
- 'MemorySize': memory_size,
- }
-
- if description is not None:
- func_kwargs.update({'Description': description})
-
- if handler is not None:
- func_kwargs.update({'Handler': handler})
-
- if environment_variables:
- func_kwargs.update({'Environment': {'Variables': environment_variables}})
-
- if dead_letter_arn:
- func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
-
- if tracing_mode:
- func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}})
-
- # If VPC configuration is given
- if vpc_subnet_ids or vpc_security_group_ids:
- if not vpc_subnet_ids or not vpc_security_group_ids:
- module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
-
- func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
- 'SecurityGroupIds': vpc_security_group_ids}})
-
- # Finally try to create function
- current_version = None
- try:
- if not check_mode:
- response = client.create_function(**func_kwargs)
- current_version = response['Version']
- changed = True
- except (ParamValidationError, ClientError) as e:
- module.fail_json_aws(e, msg="Trying to create function")
-
- # Tag Function
- if tags is not None:
- if set_tag(client, module, tags, get_current_function(client, name)):
- changed = True
-
- response = get_current_function(client, name, qualifier=current_version)
- if not response:
- module.fail_json(msg='Unable to get function information after creating')
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
-
- # Delete existing Lambda function
- if state == 'absent' and current_function:
- try:
- if not check_mode:
- client.delete_function(FunctionName=name)
- changed = True
- except (ParamValidationError, ClientError) as e:
- module.fail_json_aws(e, msg="Trying to delete Lambda function")
-
- module.exit_json(changed=changed)
-
- # Function already absent, do nothing
- elif state == 'absent':
- module.exit_json(changed=changed)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/lambda_alias.py b/lib/ansible/modules/cloud/amazon/lambda_alias.py
deleted file mode 100644
index 54bd3b6e79..0000000000
--- a/lib/ansible/modules/cloud/amazon/lambda_alias.py
+++ /dev/null
@@ -1,389 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: lambda_alias
-short_description: Creates, updates or deletes AWS Lambda function aliases
-description:
- - This module allows the management of AWS Lambda functions aliases via the Ansible
- framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function
- itself and M(lambda_event) to manage event source mappings.
-
-version_added: "2.2"
-
-author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
-options:
- function_name:
- description:
- - The name of the function alias.
- required: true
- type: str
- state:
- description:
- - Describes the desired state.
- default: "present"
- choices: ["present", "absent"]
- type: str
- name:
- description:
- - Name of the function alias.
- required: true
- aliases: ['alias_name']
- type: str
- description:
- description:
- - A short, user-defined function alias description.
- type: str
- function_version:
- description:
- - Version associated with the Lambda function alias.
- A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
- aliases: ['version']
- type: int
-requirements:
- - boto3
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
----
-# Simple example to create a lambda function and publish a version
-- hosts: localhost
- gather_facts: no
- vars:
- state: present
- project_folder: /path/to/deployment/package
- deployment_package: lambda.zip
- account: 123456789012
- production_version: 5
- tasks:
- - name: AWS Lambda Function
- lambda:
- state: "{{ state | default('present') }}"
- name: myLambdaFunction
- publish: True
- description: lambda function description
- code_s3_bucket: package-bucket
- code_s3_key: "lambda/{{ deployment_package }}"
- local_path: "{{ project_folder }}/{{ deployment_package }}"
- runtime: python2.7
- timeout: 5
- handler: lambda.handler
- memory_size: 128
- role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
-
- - name: Get information
- lambda_info:
- name: myLambdaFunction
- register: lambda_info
- - name: show results
- debug:
- msg: "{{ lambda_info['lambda_facts'] }}"
-
-# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
- - name: "alias 'Dev' for function {{ lambda_info.lambda_facts.FunctionName }} "
- lambda_alias:
- state: "{{ state | default('present') }}"
- function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
- name: Dev
- description: Development is $LATEST version
-
-# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
- - name: "alias 'QA' for function {{ lambda_info.lambda_facts.FunctionName }} "
- lambda_alias:
- state: "{{ state | default('present') }}"
- function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
- name: QA
- version: "{{ lambda_info.lambda_facts.Version }}"
- description: "QA is version {{ lambda_info.lambda_facts.Version }}"
- when: lambda_info.lambda_facts.Version != "$LATEST"
-
-# The Prod alias will have a fixed version based on a variable
- - name: "alias 'Prod' for function {{ lambda_info.lambda_facts.FunctionName }} "
- lambda_alias:
- state: "{{ state | default('present') }}"
- function_name: "{{ lambda_info.lambda_facts.FunctionName }}"
- name: Prod
- version: "{{ production_version }}"
- description: "Production is version {{ production_version }}"
-'''
-
-RETURN = '''
----
-alias_arn:
- description: Full ARN of the function, including the alias
- returned: success
- type: str
- sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
-description:
- description: A short description of the alias
- returned: success
- type: str
- sample: The development stage for my hot new app
-function_version:
- description: The qualifier that the alias refers to
- returned: success
- type: str
- sample: $LATEST
-name:
- description: The name of the alias assigned
- returned: success
- type: str
- sample: dev
-'''
-
-import re
-
-try:
- import boto3
- from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
- get_aws_connection_info)
-
-
-class AWSConnection:
- """
- Create the connection object and client objects as required.
- """
-
- def __init__(self, ansible_obj, resources, boto3_=True):
-
- try:
- self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3_)
-
- self.resource_client = dict()
- if not resources:
- resources = ['lambda']
-
- resources.append('iam')
-
- for resource in resources:
- aws_connect_kwargs.update(dict(region=self.region,
- endpoint=self.endpoint,
- conn_type='client',
- resource=resource
- ))
- self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
-
- # if region is not provided, then get default profile/session region
- if not self.region:
- self.region = self.resource_client['lambda'].meta.region_name
-
- except (ClientError, ParamValidationError, MissingParametersError) as e:
- ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
-
- try:
- self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
- except (ClientError, ValueError, KeyError, IndexError):
- self.account_id = ''
-
- def client(self, resource='lambda'):
- return self.resource_client[resource]
-
-
-def pc(key):
- """
- Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
-
- :param key:
- :return:
- """
-
- return "".join([token.capitalize() for token in key.split('_')])
-
-
-def set_api_params(module, module_params):
- """
- Sets module parameters to those expected by the boto3 API.
-
- :param module:
- :param module_params:
- :return:
- """
-
- api_params = dict()
-
- for param in module_params:
- module_param = module.params.get(param, None)
- if module_param:
- api_params[pc(param)] = module_param
-
- return api_params
-
-
-def validate_params(module, aws):
- """
- Performs basic parameter validation.
-
- :param module: Ansible module reference
- :param aws: AWS client connection
- :return:
- """
-
- function_name = module.params['function_name']
-
- # validate function name
- if not re.search(r'^[\w\-:]+$', function_name):
- module.fail_json(
- msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
- )
- if len(function_name) > 64:
- module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
-
- # if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
- if module.params['function_version'] == 0:
- module.params['function_version'] = '$LATEST'
- else:
- module.params['function_version'] = str(module.params['function_version'])
-
- return
-
-
-def get_lambda_alias(module, aws):
- """
- Returns the lambda function alias if it exists.
-
- :param module: Ansible module reference
- :param aws: AWS client connection
- :return:
- """
-
- client = aws.client('lambda')
-
- # set API parameters
- api_params = set_api_params(module, ('function_name', 'name'))
-
- # check if alias exists and get facts
- try:
- results = client.get_alias(**api_params)
-
- except (ClientError, ParamValidationError, MissingParametersError) as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- results = None
- else:
- module.fail_json(msg='Error retrieving function alias: {0}'.format(e))
-
- return results
-
-
-def lambda_alias(module, aws):
- """
- Adds, updates or deletes lambda function aliases.
-
- :param module: Ansible module reference
- :param aws: AWS client connection
- :return dict:
- """
- client = aws.client('lambda')
- results = dict()
- changed = False
- current_state = 'absent'
- state = module.params['state']
-
- facts = get_lambda_alias(module, aws)
- if facts:
- current_state = 'present'
-
- if state == 'present':
- if current_state == 'present':
-
- # check if alias has changed -- only version and description can change
- alias_params = ('function_version', 'description')
- for param in alias_params:
- if module.params.get(param) != facts.get(pc(param)):
- changed = True
- break
-
- if changed:
- api_params = set_api_params(module, ('function_name', 'name'))
- api_params.update(set_api_params(module, alias_params))
-
- if not module.check_mode:
- try:
- results = client.update_alias(**api_params)
- except (ClientError, ParamValidationError, MissingParametersError) as e:
- module.fail_json(msg='Error updating function alias: {0}'.format(e))
-
- else:
- # create new function alias
- api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
-
- try:
- if not module.check_mode:
- results = client.create_alias(**api_params)
- changed = True
- except (ClientError, ParamValidationError, MissingParametersError) as e:
- module.fail_json(msg='Error creating function alias: {0}'.format(e))
-
- else: # state = 'absent'
- if current_state == 'present':
- # delete the function
- api_params = set_api_params(module, ('function_name', 'name'))
-
- try:
- if not module.check_mode:
- results = client.delete_alias(**api_params)
- changed = True
- except (ClientError, ParamValidationError, MissingParametersError) as e:
- module.fail_json(msg='Error deleting function alias: {0}'.format(e))
-
- return dict(changed=changed, **dict(results or facts))
-
-
-def main():
- """
- Main entry point.
-
- :return dict: ansible facts
- """
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- state=dict(required=False, default='present', choices=['present', 'absent']),
- function_name=dict(required=True),
- name=dict(required=True, aliases=['alias_name']),
- function_version=dict(type='int', required=False, default=0, aliases=['version']),
- description=dict(required=False, default=None),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[],
- required_together=[]
- )
-
- # validate dependencies
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required for this module.')
-
- aws = AWSConnection(module, ['lambda'])
-
- validate_params(module, aws)
-
- results = lambda_alias(module, aws)
-
- module.exit_json(**camel_dict_to_snake_dict(results))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/lambda_event.py b/lib/ansible/modules/cloud/amazon/lambda_event.py
deleted file mode 100644
index fcd3960f62..0000000000
--- a/lib/ansible/modules/cloud/amazon/lambda_event.py
+++ /dev/null
@@ -1,448 +0,0 @@
-#!/usr/bin/python
-# (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: lambda_event
-short_description: Creates, updates or deletes AWS Lambda function event mappings
-description:
- - This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream
- events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where
- AWS Lambda invokes the function.
- It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
- function itself and M(lambda_alias) to manage function aliases.
-
-version_added: "2.2"
-
-author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb)
-options:
- lambda_function_arn:
- description:
- - The name or ARN of the lambda function.
- required: true
- aliases: ['function_name', 'function_arn']
- type: str
- state:
- description:
- - Describes the desired state.
- default: "present"
- choices: ["present", "absent"]
- type: str
- alias:
- description:
- - Name of the function alias.
- - Mutually exclusive with I(version).
- type: str
- version:
- description:
- - Version of the Lambda function.
- - Mutually exclusive with I(alias).
- type: int
- event_source:
- description:
- - Source of the event that triggers the lambda function.
- - For DynamoDB and Kinesis events, select C(stream)
- - For SQS queues, select C(sqs)
- default: stream
- choices: ['stream', 'sqs']
- type: str
- source_params:
- description:
- - Sub-parameters required for event source.
- suboptions:
- source_arn:
- description:
- - The Amazon Resource Name (ARN) of the SQS queue, Kinesis stream or DynamoDB stream that is the event source.
- type: str
- required: true
- enabled:
- description:
- - Indicates whether AWS Lambda should begin polling or readin from the event source.
- default: true.
- type: bool
- batch_size:
- description:
- - The largest number of records that AWS Lambda will retrieve from your event source at the time of invoking your function.
- default: 100
- type: int
- starting_position:
- description:
- - The position in the stream where AWS Lambda should start reading.
- - Required when I(event_source=stream).
- choices: [TRIM_HORIZON,LATEST]
- type: str
- required: true
- type: dict
-requirements:
- - boto3
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
----
-# Example that creates a lambda event notification for a DynamoDB stream
-- hosts: localhost
- gather_facts: no
- vars:
- state: present
- tasks:
- - name: DynamoDB stream event mapping
- lambda_event:
- state: "{{ state | default('present') }}"
- event_source: stream
- function_name: "{{ function_name }}"
- alias: Dev
- source_params:
- source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457
- enabled: True
- batch_size: 100
- starting_position: TRIM_HORIZON
-
- - name: Show source event
- debug:
- var: lambda_stream_events
-'''
-
-RETURN = '''
----
-lambda_stream_events:
- description: list of dictionaries returned by the API describing stream event mappings
- returned: success
- type: list
-'''
-
-import re
-import sys
-
-try:
- import boto3
- from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
- get_aws_connection_info)
-
-
-# ---------------------------------------------------------------------------------------------------
-#
-# Helper Functions & classes
-#
-# ---------------------------------------------------------------------------------------------------
-
-
-class AWSConnection:
- """
- Create the connection object and client objects as required.
- """
-
- def __init__(self, ansible_obj, resources, use_boto3=True):
-
- try:
- self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
-
- self.resource_client = dict()
- if not resources:
- resources = ['lambda']
-
- resources.append('iam')
-
- for resource in resources:
- aws_connect_kwargs.update(dict(region=self.region,
- endpoint=self.endpoint,
- conn_type='client',
- resource=resource
- ))
- self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
-
- # if region is not provided, then get default profile/session region
- if not self.region:
- self.region = self.resource_client['lambda'].meta.region_name
-
- except (ClientError, ParamValidationError, MissingParametersError) as e:
- ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
-
- # set account ID
- try:
- self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
- except (ClientError, ValueError, KeyError, IndexError):
- self.account_id = ''
-
- def client(self, resource='lambda'):
- return self.resource_client[resource]
-
-
-def pc(key):
- """
- Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
-
- :param key:
- :return:
- """
-
- return "".join([token.capitalize() for token in key.split('_')])
-
-
-def ordered_obj(obj):
- """
- Order object for comparison purposes
-
- :param obj:
- :return:
- """
-
- if isinstance(obj, dict):
- return sorted((k, ordered_obj(v)) for k, v in obj.items())
- if isinstance(obj, list):
- return sorted(ordered_obj(x) for x in obj)
- else:
- return obj
-
-
-def set_api_sub_params(params):
- """
- Sets module sub-parameters to those expected by the boto3 API.
-
- :param params:
- :return:
- """
-
- api_params = dict()
-
- for param in params.keys():
- param_value = params.get(param, None)
- if param_value:
- api_params[pc(param)] = param_value
-
- return api_params
-
-
-def validate_params(module, aws):
- """
- Performs basic parameter validation.
-
- :param module:
- :param aws:
- :return:
- """
-
- function_name = module.params['lambda_function_arn']
-
- # validate function name
- if not re.search(r'^[\w\-:]+$', function_name):
- module.fail_json(
- msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
- )
- if len(function_name) > 64 and not function_name.startswith('arn:aws:lambda:'):
- module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
-
- elif len(function_name) > 140 and function_name.startswith('arn:aws:lambda:'):
- module.fail_json(msg='ARN "{0}" exceeds 140 character limit'.format(function_name))
-
- # check if 'function_name' needs to be expanded in full ARN format
- if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'):
- function_name = module.params['lambda_function_arn']
- module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name)
-
- qualifier = get_qualifier(module)
- if qualifier:
- function_arn = module.params['lambda_function_arn']
- module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
-
- return
-
-
-def get_qualifier(module):
- """
- Returns the function qualifier as a version or alias or None.
-
- :param module:
- :return:
- """
-
- qualifier = None
- if module.params['version'] > 0:
- qualifier = str(module.params['version'])
- elif module.params['alias']:
- qualifier = str(module.params['alias'])
-
- return qualifier
-
-
-# ---------------------------------------------------------------------------------------------------
-#
-# Lambda Event Handlers
-#
-# This section defines a lambda_event_X function where X is an AWS service capable of initiating
-# the execution of a Lambda function (pull only).
-#
-# ---------------------------------------------------------------------------------------------------
-
-def lambda_event_stream(module, aws):
- """
- Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications.
- :param module:
- :param aws:
- :return:
- """
-
- client = aws.client('lambda')
- facts = dict()
- changed = False
- current_state = 'absent'
- state = module.params['state']
-
- api_params = dict(FunctionName=module.params['lambda_function_arn'])
-
- # check if required sub-parameters are present and valid
- source_params = module.params['source_params']
-
- source_arn = source_params.get('source_arn')
- if source_arn:
- api_params.update(EventSourceArn=source_arn)
- else:
- module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
-
- # check if optional sub-parameters are valid, if present
- batch_size = source_params.get('batch_size')
- if batch_size:
- try:
- source_params['batch_size'] = int(batch_size)
- except ValueError:
- module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size']))
-
- # optional boolean value needs special treatment as not present does not imply False
- source_param_enabled = module.boolean(source_params.get('enabled', 'True'))
-
- # check if event mapping exist
- try:
- facts = client.list_event_source_mappings(**api_params)['EventSourceMappings']
- if facts:
- current_state = 'present'
- except ClientError as e:
- module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e))
-
- if state == 'present':
- if current_state == 'absent':
-
- starting_position = source_params.get('starting_position')
- if starting_position:
- api_params.update(StartingPosition=starting_position)
- elif module.params.get('event_source') == 'sqs':
- # starting position is not required for SQS
- pass
- else:
- module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
-
- if source_arn:
- api_params.update(Enabled=source_param_enabled)
- if source_params.get('batch_size'):
- api_params.update(BatchSize=source_params.get('batch_size'))
-
- try:
- if not module.check_mode:
- facts = client.create_event_source_mapping(**api_params)
- changed = True
- except (ClientError, ParamValidationError, MissingParametersError) as e:
- module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e))
-
- else:
- # current_state is 'present'
- api_params = dict(FunctionName=module.params['lambda_function_arn'])
- current_mapping = facts[0]
- api_params.update(UUID=current_mapping['UUID'])
- mapping_changed = False
-
- # check if anything changed
- if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']:
- api_params.update(BatchSize=source_params['batch_size'])
- mapping_changed = True
-
- if source_param_enabled is not None:
- if source_param_enabled:
- if current_mapping['State'] not in ('Enabled', 'Enabling'):
- api_params.update(Enabled=True)
- mapping_changed = True
- else:
- if current_mapping['State'] not in ('Disabled', 'Disabling'):
- api_params.update(Enabled=False)
- mapping_changed = True
-
- if mapping_changed:
- try:
- if not module.check_mode:
- facts = client.update_event_source_mapping(**api_params)
- changed = True
- except (ClientError, ParamValidationError, MissingParametersError) as e:
- module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e))
-
- else:
- if current_state == 'present':
- # remove the stream event mapping
- api_params = dict(UUID=facts[0]['UUID'])
-
- try:
- if not module.check_mode:
- facts = client.delete_event_source_mapping(**api_params)
- changed = True
- except (ClientError, ParamValidationError, MissingParametersError) as e:
- module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e))
-
- return camel_dict_to_snake_dict(dict(changed=changed, events=facts))
-
-
-def main():
- """Produce a list of function suffixes which handle lambda events."""
- source_choices = ["stream", "sqs"]
-
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- state=dict(required=False, default='present', choices=['present', 'absent']),
- lambda_function_arn=dict(required=True, aliases=['function_name', 'function_arn']),
- event_source=dict(required=False, default="stream", choices=source_choices),
- source_params=dict(type='dict', required=True),
- alias=dict(required=False, default=None),
- version=dict(type='int', required=False, default=0),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[['alias', 'version']],
- required_together=[]
- )
-
- # validate dependencies
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 is required for this module.')
-
- aws = AWSConnection(module, ['lambda'])
-
- validate_params(module, aws)
-
- if module.params['event_source'].lower() in ('stream', 'sqs'):
- results = lambda_event_stream(module, aws)
- else:
- module.fail_json(msg='Please select `stream` or `sqs` as the event type')
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/lambda_info.py b/lib/ansible/modules/cloud/amazon/lambda_info.py
deleted file mode 100644
index d7203c2f95..0000000000
--- a/lib/ansible/modules/cloud/amazon/lambda_info.py
+++ /dev/null
@@ -1,380 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: lambda_info
-short_description: Gathers AWS Lambda function details
-description:
- - Gathers various details related to Lambda functions, including aliases, versions and event source mappings.
- - Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases and
- M(lambda_event) to manage lambda event source mappings.
-
-version_added: "2.9"
-
-options:
- query:
- description:
- - Specifies the resource type for which to gather information. Leave blank to retrieve all information.
- choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ]
- default: "all"
- type: str
- function_name:
- description:
- - The name of the lambda function for which information is requested.
- aliases: [ "function", "name"]
- type: str
- event_source_arn:
- description:
- - When I(query=mappings), this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream.
- type: str
-author: Pierre Jodouin (@pjodouin)
-requirements:
- - boto3
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
----
-# Simple example of listing all info for a function
-- name: List all for a specific function
- lambda_info:
- query: all
- function_name: myFunction
- register: my_function_details
-# List all versions of a function
-- name: List function versions
- lambda_info:
- query: versions
- function_name: myFunction
- register: my_function_versions
-# List all lambda function versions
-- name: List all function
- lambda_info:
- query: all
- max_items: 20
- register: output
-- name: show Lambda information
- debug:
- msg: "{{ output['function'] }}"
-'''
-
-RETURN = '''
----
-function:
- description: lambda function list
- returned: success
- type: dict
-function.TheName:
- description: lambda function information, including event, mapping, and version information
- returned: success
- type: dict
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-import json
-import datetime
-import re
-
-
-try:
- from botocore.exceptions import ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def fix_return(node):
- """
- fixup returned dictionary
-
- :param node:
- :return:
- """
-
- if isinstance(node, datetime.datetime):
- node_value = str(node)
-
- elif isinstance(node, list):
- node_value = [fix_return(item) for item in node]
-
- elif isinstance(node, dict):
- node_value = dict([(item, fix_return(node[item])) for item in node.keys()])
-
- else:
- node_value = node
-
- return node_value
-
-
-def alias_details(client, module):
- """
- Returns list of aliases for a specified function.
-
- :param client: AWS API client reference (boto3)
- :param module: Ansible module reference
- :return dict:
- """
-
- lambda_info = dict()
-
- function_name = module.params.get('function_name')
- if function_name:
- params = dict()
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- if module.params.get('next_marker'):
- params['Marker'] = module.params.get('next_marker')
- try:
- lambda_info.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases'])
- except ClientError as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- lambda_info.update(aliases=[])
- else:
- module.fail_json_aws(e, msg="Trying to get aliases")
- else:
- module.fail_json(msg='Parameter function_name required for query=aliases.')
-
- return {function_name: camel_dict_to_snake_dict(lambda_info)}
-
-
-def all_details(client, module):
- """
- Returns all lambda related facts.
-
- :param client: AWS API client reference (boto3)
- :param module: Ansible module reference
- :return dict:
- """
-
- if module.params.get('max_items') or module.params.get('next_marker'):
- module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.')
-
- lambda_info = dict()
-
- function_name = module.params.get('function_name')
- if function_name:
- lambda_info[function_name] = {}
- lambda_info[function_name].update(config_details(client, module)[function_name])
- lambda_info[function_name].update(alias_details(client, module)[function_name])
- lambda_info[function_name].update(policy_details(client, module)[function_name])
- lambda_info[function_name].update(version_details(client, module)[function_name])
- lambda_info[function_name].update(mapping_details(client, module)[function_name])
- else:
- lambda_info.update(config_details(client, module))
-
- return lambda_info
-
-
-def config_details(client, module):
- """
- Returns configuration details for one or all lambda functions.
-
- :param client: AWS API client reference (boto3)
- :param module: Ansible module reference
- :return dict:
- """
-
- lambda_info = dict()
-
- function_name = module.params.get('function_name')
- if function_name:
- try:
- lambda_info.update(client.get_function_configuration(FunctionName=function_name))
- except ClientError as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- lambda_info.update(function={})
- else:
- module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name))
- else:
- params = dict()
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- if module.params.get('next_marker'):
- params['Marker'] = module.params.get('next_marker')
-
- try:
- lambda_info.update(function_list=client.list_functions(**params)['Functions'])
- except ClientError as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- lambda_info.update(function_list=[])
- else:
- module.fail_json_aws(e, msg="Trying to get function list")
-
- functions = dict()
- for func in lambda_info.pop('function_list', []):
- functions[func['FunctionName']] = camel_dict_to_snake_dict(func)
- return functions
-
- return {function_name: camel_dict_to_snake_dict(lambda_info)}
-
-
-def mapping_details(client, module):
- """
- Returns all lambda event source mappings.
-
- :param client: AWS API client reference (boto3)
- :param module: Ansible module reference
- :return dict:
- """
-
- lambda_info = dict()
- params = dict()
- function_name = module.params.get('function_name')
-
- if function_name:
- params['FunctionName'] = module.params.get('function_name')
-
- if module.params.get('event_source_arn'):
- params['EventSourceArn'] = module.params.get('event_source_arn')
-
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- if module.params.get('next_marker'):
- params['Marker'] = module.params.get('next_marker')
-
- try:
- lambda_info.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings'])
- except ClientError as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- lambda_info.update(mappings=[])
- else:
- module.fail_json_aws(e, msg="Trying to get source event mappings")
-
- if function_name:
- return {function_name: camel_dict_to_snake_dict(lambda_info)}
-
- return camel_dict_to_snake_dict(lambda_info)
-
-
-def policy_details(client, module):
- """
- Returns policy attached to a lambda function.
-
- :param client: AWS API client reference (boto3)
- :param module: Ansible module reference
- :return dict:
- """
-
- if module.params.get('max_items') or module.params.get('next_marker'):
- module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.')
-
- lambda_info = dict()
-
- function_name = module.params.get('function_name')
- if function_name:
- try:
- # get_policy returns a JSON string so must convert to dict before reassigning to its key
- lambda_info.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy']))
- except ClientError as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- lambda_info.update(policy={})
- else:
- module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name))
- else:
- module.fail_json(msg='Parameter function_name required for query=policy.')
-
- return {function_name: camel_dict_to_snake_dict(lambda_info)}
-
-
-def version_details(client, module):
- """
- Returns all lambda function versions.
-
- :param client: AWS API client reference (boto3)
- :param module: Ansible module reference
- :return dict:
- """
-
- lambda_info = dict()
-
- function_name = module.params.get('function_name')
- if function_name:
- params = dict()
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- if module.params.get('next_marker'):
- params['Marker'] = module.params.get('next_marker')
-
- try:
- lambda_info.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions'])
- except ClientError as e:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- lambda_info.update(versions=[])
- else:
- module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name))
- else:
- module.fail_json(msg='Parameter function_name required for query=versions.')
-
- return {function_name: camel_dict_to_snake_dict(lambda_info)}
-
-
-def main():
- """
- Main entry point.
-
- :return dict: ansible facts
- """
- argument_spec = dict(
- function_name=dict(required=False, default=None, aliases=['function', 'name']),
- query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'),
- event_source_arn=dict(required=False, default=None)
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[],
- required_together=[]
- )
-
- # validate function_name if present
- function_name = module.params['function_name']
- if function_name:
- if not re.search(r"^[\w\-:]+$", function_name):
- module.fail_json(
- msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
- )
- if len(function_name) > 64:
- module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
-
- client = module.client('lambda')
-
- invocations = dict(
- aliases='alias_details',
- all='all_details',
- config='config_details',
- mappings='mapping_details',
- policy='policy_details',
- versions='version_details',
- )
-
- this_module_function = globals()[invocations[module.params['query']]]
- all_facts = fix_return(this_module_function(client, module))
-
- results = dict(function=all_facts, changed=False)
-
- if module.check_mode:
- results['msg'] = 'Check mode set but ignored for fact gathering only.'
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/lambda_policy.py b/lib/ansible/modules/cloud/amazon/lambda_policy.py
deleted file mode 100644
index 2c7342411b..0000000000
--- a/lib/ansible/modules/cloud/amazon/lambda_policy.py
+++ /dev/null
@@ -1,439 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'status': ['preview'],
- 'supported_by': 'community',
- 'metadata_version': '1.1'}
-
-DOCUMENTATION = '''
----
-module: lambda_policy
-short_description: Creates, updates or deletes AWS Lambda policy statements.
-description:
- - This module allows the management of AWS Lambda policy statements.
- - It is idempotent and supports "Check" mode.
- - Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases,
- M(lambda_event) to manage event source mappings such as Kinesis streams, M(execute_lambda) to execute a
- lambda function and M(lambda_info) to gather information relating to one or more lambda functions.
-
-version_added: "2.4"
-
-author:
- - Pierre Jodouin (@pjodouin)
- - Michael De La Rue (@mikedlr)
-options:
- function_name:
- description:
- - "Name of the Lambda function whose resource policy you are updating by adding a new permission."
- - "You can specify a function name (for example, Thumbnail ) or you can specify Amazon Resource Name (ARN) of the"
- - "function (for example, C(arn:aws:lambda:us-west-2:account-id:function:ThumbNail) ). AWS Lambda also allows you to"
- - "specify partial ARN (for example, C(account-id:Thumbnail) ). Note that the length constraint applies only to the"
- - "ARN. If you specify only the function name, it is limited to 64 character in length."
- required: true
- aliases: ['lambda_function_arn', 'function_arn']
- type: str
-
- state:
- description:
- - Describes the desired state.
- default: "present"
- choices: ["present", "absent"]
- type: str
-
- alias:
- description:
- - Name of the function alias. Mutually exclusive with I(version).
- type: str
-
- version:
- description:
- - Version of the Lambda function. Mutually exclusive with I(alias).
- type: int
-
- statement_id:
- description:
- - A unique statement identifier.
- required: true
- aliases: ['sid']
- type: str
-
- action:
- description:
- - "The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with
- lambda: followed by the API name (see Operations ). For example, C(lambda:CreateFunction) . You can use wildcard
- (C(lambda:*)) to grant permission for all AWS Lambda actions."
- required: true
- type: str
-
- principal:
- description:
- - "The principal who is getting this permission. It can be Amazon S3 service Principal (s3.amazonaws.com ) if
- you want Amazon S3 to invoke the function, an AWS account ID if you are granting cross-account permission, or
- any valid AWS service principal such as sns.amazonaws.com . For example, you might want to allow a custom
- application in another AWS account to push events to AWS Lambda by invoking your function."
- required: true
- type: str
-
- source_arn:
- description:
- - This is optional; however, when granting Amazon S3 permission to invoke your function, you should specify this
- field with the bucket Amazon Resource Name (ARN) as its value. This ensures that only events generated from
- the specified bucket can invoke the function.
- type: str
-
- source_account:
- description:
- - The AWS account ID (without a hyphen) of the source owner. For example, if I(source_arn) identifies a bucket,
- then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you
- specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS
- account created the bucket). You can also use this condition to specify all sources (that is, you don't
- specify the I(source_arn) ) owned by a specific account.
- type: str
-
- event_source_token:
- description:
- - Token string representing source ARN or account. Mutually exclusive with I(source_arn) or I(source_account).
- type: str
-
-requirements:
- - boto3
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
----
-- hosts: localhost
- gather_facts: no
- vars:
- state: present
- tasks:
- - name: Lambda S3 event notification
- lambda_policy:
- state: "{{ state | default('present') }}"
- function_name: functionName
- alias: Dev
- statement_id: lambda-s3-myBucket-create-data-log
- action: lambda:InvokeFunction
- principal: s3.amazonaws.com
- source_arn: arn:aws:s3:eu-central-1:123456789012:bucketName
- source_account: 123456789012
- register: lambda_policy_action
-
- - name: show results
- debug:
- var: lambda_policy_action
-
-'''
-
-RETURN = '''
----
-lambda_policy_action:
- description: describes what action was taken
- returned: success
- type: str
-'''
-
-import json
-import re
-from ansible.module_utils._text import to_native
-from ansible.module_utils.aws.core import AnsibleAWSModule
-
-try:
- from botocore.exceptions import ClientError
-except Exception:
- pass # caught by AnsibleAWSModule
-
-
-def pc(key):
- """
- Changes python key into Pascal case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
-
- :param key:
- :return:
- """
-
- return "".join([token.capitalize() for token in key.split('_')])
-
-
-def policy_equal(module, current_statement):
- for param in ('action', 'principal', 'source_arn', 'source_account', 'event_source_token'):
- if module.params.get(param) != current_statement.get(param):
- return False
-
- return True
-
-
-def set_api_params(module, module_params):
- """
- Sets module parameters to those expected by the boto3 API.
-
- :param module:
- :param module_params:
- :return:
- """
-
- api_params = dict()
-
- for param in module_params:
- module_param = module.params.get(param)
- if module_param is not None:
- api_params[pc(param)] = module_param
-
- return api_params
-
-
-def validate_params(module):
- """
- Performs parameter validation beyond the module framework's validation.
-
- :param module:
- :return:
- """
-
- function_name = module.params['function_name']
-
- # validate function name
- if function_name.startswith('arn:'):
- if not re.search(r'^[\w\-:]+$', function_name):
- module.fail_json(
- msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name)
- )
- if len(function_name) > 140:
- module.fail_json(msg='ARN name "{0}" exceeds 140 character limit'.format(function_name))
- else:
- if not re.search(r'^[\w\-]+$', function_name):
- module.fail_json(
- msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(
- function_name)
- )
- if len(function_name) > 64:
- module.fail_json(
- msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
-
-
-def get_qualifier(module):
- """
- Returns the function qualifier as a version or alias or None.
-
- :param module:
- :return:
- """
-
- if module.params.get('version') is not None:
- return to_native(module.params['version'])
- elif module.params['alias']:
- return to_native(module.params['alias'])
-
- return None
-
-
-def extract_statement(policy, sid):
- """return flattened single policy statement from a policy
-
- If a policy statement is present in the policy extract it and
- return it in a flattened form. Otherwise return an empty
- dictionary.
- """
- if 'Statement' not in policy:
- return {}
- policy_statement = {}
- # Now that we have the policy, check if required permission statement is present and flatten to
- # simple dictionary if found.
- for statement in policy['Statement']:
- if statement['Sid'] == sid:
- policy_statement['action'] = statement['Action']
- try:
- policy_statement['principal'] = statement['Principal']['Service']
- except KeyError:
- pass
- try:
- policy_statement['principal'] = statement['Principal']['AWS']
- except KeyError:
- pass
- try:
- policy_statement['source_arn'] = statement['Condition']['ArnLike']['AWS:SourceArn']
- except KeyError:
- pass
- try:
- policy_statement['source_account'] = statement['Condition']['StringEquals']['AWS:SourceAccount']
- except KeyError:
- pass
- try:
- policy_statement['event_source_token'] = statement['Condition']['StringEquals']['lambda:EventSourceToken']
- except KeyError:
- pass
- break
-
- return policy_statement
-
-
-def get_policy_statement(module, client):
- """Checks that policy exists and if so, that statement ID is present or absent.
-
- :param module:
- :param client:
- :return:
- """
- sid = module.params['statement_id']
-
- # set API parameters
- api_params = set_api_params(module, ('function_name', ))
- qualifier = get_qualifier(module)
- if qualifier:
- api_params.update(Qualifier=qualifier)
-
- policy_results = None
- # check if function policy exists
- try:
- policy_results = client.get_policy(**api_params)
- except ClientError as e:
- try:
- if e.response['Error']['Code'] == 'ResourceNotFoundException':
- return {}
- except AttributeError: # catches ClientErrors without response, e.g. fail before connect
- pass
- module.fail_json_aws(e, msg="retrieving function policy")
- except Exception as e:
- module.fail_json_aws(e, msg="retrieving function policy")
-
- # get_policy returns a JSON string so must convert to dict before reassigning to its key
- policy = json.loads(policy_results.get('Policy', '{}'))
- return extract_statement(policy, sid)
-
-
-def add_policy_permission(module, client):
- """
- Adds a permission statement to the policy.
-
- :param module:
- :param aws:
- :return:
- """
-
- changed = False
-
- # set API parameters
- params = (
- 'function_name',
- 'statement_id',
- 'action',
- 'principal',
- 'source_arn',
- 'source_account',
- 'event_source_token')
- api_params = set_api_params(module, params)
- qualifier = get_qualifier(module)
- if qualifier:
- api_params.update(Qualifier=qualifier)
-
- if not module.check_mode:
- try:
- client.add_permission(**api_params)
- except Exception as e:
- module.fail_json_aws(e, msg="adding permission to policy")
- changed = True
-
- return changed
-
-
-def remove_policy_permission(module, client):
- """
- Removed a permission statement from the policy.
-
- :param module:
- :param aws:
- :return:
- """
-
- changed = False
-
- # set API parameters
- api_params = set_api_params(module, ('function_name', 'statement_id'))
- qualifier = get_qualifier(module)
- if qualifier:
- api_params.update(Qualifier=qualifier)
-
- try:
- if not module.check_mode:
- client.remove_permission(**api_params)
- changed = True
- except Exception as e:
- module.fail_json_aws(e, msg="removing permission from policy")
-
- return changed
-
-
-def manage_state(module, lambda_client):
- changed = False
- current_state = 'absent'
- state = module.params['state']
- action_taken = 'none'
-
- # check if the policy exists
- current_policy_statement = get_policy_statement(module, lambda_client)
- if current_policy_statement:
- current_state = 'present'
-
- if state == 'present':
- if current_state == 'present' and not policy_equal(module, current_policy_statement):
- remove_policy_permission(module, lambda_client)
- changed = add_policy_permission(module, lambda_client)
- action_taken = 'updated'
- if not current_state == 'present':
- changed = add_policy_permission(module, lambda_client)
- action_taken = 'added'
- elif current_state == 'present':
- # remove the policy statement
- changed = remove_policy_permission(module, lambda_client)
- action_taken = 'deleted'
-
- return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken))
-
-
-def setup_module_object():
- argument_spec = dict(
- state=dict(default='present', choices=['present', 'absent']),
- function_name=dict(required=True, aliases=['lambda_function_arn', 'function_arn']),
- statement_id=dict(required=True, aliases=['sid']),
- alias=dict(),
- version=dict(type='int'),
- action=dict(required=True, ),
- principal=dict(required=True, ),
- source_arn=dict(),
- source_account=dict(),
- event_source_token=dict(),
- )
-
- return AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[['alias', 'version'],
- ['event_source_token', 'source_arn'],
- ['event_source_token', 'source_account']],
- )
-
-
-def main():
- """
- Main entry point.
-
- :return dict: ansible facts
- """
-
- module = setup_module_object()
- client = module.client('lambda')
- validate_params(module)
- results = manage_state(module, client)
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/lightsail.py b/lib/ansible/modules/cloud/amazon/lightsail.py
deleted file mode 100644
index 162fca7f4b..0000000000
--- a/lib/ansible/modules/cloud/amazon/lightsail.py
+++ /dev/null
@@ -1,340 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: lightsail
-short_description: Manage instances in AWS Lightsail
-description:
- - Manage instances in AWS Lightsail.
- - Instance tagging is not yet supported in this module.
-version_added: "2.4"
-author:
- - "Nick Ball (@nickball)"
- - "Prasad Katti (@prasadkatti)"
-options:
- state:
- description:
- - Indicate desired state of the target.
- - I(rebooted) and I(restarted) are aliases.
- default: present
- choices: ['present', 'absent', 'running', 'restarted', 'rebooted', 'stopped']
- type: str
- name:
- description: Name of the instance.
- required: true
- type: str
- zone:
- description:
- - AWS availability zone in which to launch the instance.
- - Required when I(state=present)
- type: str
- blueprint_id:
- description:
- - ID of the instance blueprint image.
- - Required when I(state=present)
- type: str
- bundle_id:
- description:
- - Bundle of specification info for the instance.
- - Required when I(state=present).
- type: str
- user_data:
- description:
- - Launch script that can configure the instance with additional data.
- type: str
- key_pair_name:
- description:
- - Name of the key pair to use with the instance.
- - If I(state=present) and a key_pair_name is not provided, the default keypair from the region will be used.
- type: str
- wait:
- description:
- - Wait for the instance to be in state 'running' before returning.
- - If I(wait=false) an ip_address may not be returned.
- - Has no effect when I(state=rebooted) or I(state=absent).
- type: bool
- default: true
- wait_timeout:
- description:
- - How long before I(wait) gives up, in seconds.
- default: 300
- type: int
-
-requirements:
- - boto3
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-
-EXAMPLES = '''
-# Create a new Lightsail instance
-- lightsail:
- state: present
- name: my_instance
- region: us-east-1
- zone: us-east-1a
- blueprint_id: ubuntu_16_04
- bundle_id: nano_1_0
- key_pair_name: id_rsa
- user_data: " echo 'hello world' > /home/ubuntu/test.txt"
- register: my_instance
-
-# Delete an instance
-- lightsail:
- state: absent
- region: us-east-1
- name: my_instance
-
-'''
-
-RETURN = '''
-changed:
- description: if a snapshot has been modified/created
- returned: always
- type: bool
- sample:
- changed: true
-instance:
- description: instance data
- returned: always
- type: dict
- sample:
- arn: "arn:aws:lightsail:us-east-1:448830907657:Instance/1fef0175-d6c8-480e-84fa-214f969cda87"
- blueprint_id: "ubuntu_16_04"
- blueprint_name: "Ubuntu"
- bundle_id: "nano_1_0"
- created_at: "2017-03-27T08:38:59.714000-04:00"
- hardware:
- cpu_count: 1
- ram_size_in_gb: 0.5
- is_static_ip: false
- location:
- availability_zone: "us-east-1a"
- region_name: "us-east-1"
- name: "my_instance"
- networking:
- monthly_transfer:
- gb_per_month_allocated: 1024
- ports:
- - access_direction: "inbound"
- access_from: "Anywhere (0.0.0.0/0)"
- access_type: "public"
- common_name: ""
- from_port: 80
- protocol: tcp
- to_port: 80
- - access_direction: "inbound"
- access_from: "Anywhere (0.0.0.0/0)"
- access_type: "public"
- common_name: ""
- from_port: 22
- protocol: tcp
- to_port: 22
- private_ip_address: "172.26.8.14"
- public_ip_address: "34.207.152.202"
- resource_type: "Instance"
- ssh_key_name: "keypair"
- state:
- code: 16
- name: running
- support_code: "588307843083/i-0997c97831ee21e33"
- username: "ubuntu"
-'''
-
-import time
-
-try:
- import botocore
-except ImportError:
- # will be caught by AnsibleAWSModule
- pass
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-
-def find_instance_info(module, client, instance_name, fail_if_not_found=False):
-
- try:
- res = client.get_instance(instanceName=instance_name)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'NotFoundException' and not fail_if_not_found:
- return None
- module.fail_json_aws(e)
- return res['instance']
-
-
-def wait_for_instance_state(module, client, instance_name, states):
- """
- `states` is a list of instance states that we are waiting for.
- """
-
- wait_timeout = module.params.get('wait_timeout')
- wait_max = time.time() + wait_timeout
- while wait_max > time.time():
- try:
- instance = find_instance_info(module, client, instance_name)
- if instance['state']['name'] in states:
- break
- time.sleep(5)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
- else:
- module.fail_json(msg='Timed out waiting for instance "{0}" to get to one of the following states -'
- ' {1}'.format(instance_name, states))
-
-
-def create_instance(module, client, instance_name):
-
- inst = find_instance_info(module, client, instance_name)
- if inst:
- module.exit_json(changed=False, instance=camel_dict_to_snake_dict(inst))
- else:
- create_params = {'instanceNames': [instance_name],
- 'availabilityZone': module.params.get('zone'),
- 'blueprintId': module.params.get('blueprint_id'),
- 'bundleId': module.params.get('bundle_id'),
- 'userData': module.params.get('user_data')}
-
- key_pair_name = module.params.get('key_pair_name')
- if key_pair_name:
- create_params['keyPairName'] = key_pair_name
-
- try:
- client.create_instances(**create_params)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
- wait = module.params.get('wait')
- if wait:
- desired_states = ['running']
- wait_for_instance_state(module, client, instance_name, desired_states)
- inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
-
- module.exit_json(changed=True, instance=camel_dict_to_snake_dict(inst))
-
-
-def delete_instance(module, client, instance_name):
-
- changed = False
-
- inst = find_instance_info(module, client, instance_name)
- if inst is None:
- module.exit_json(changed=changed, instance={})
-
- # Wait for instance to exit transition state before deleting
- desired_states = ['running', 'stopped']
- wait_for_instance_state(module, client, instance_name, desired_states)
-
- try:
- client.delete_instance(instanceName=instance_name)
- changed = True
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
- module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst))
-
-
-def restart_instance(module, client, instance_name):
- """
- Reboot an existing instance
- Wait will not apply here as this is an OS-level operation
- """
-
- changed = False
-
- inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
-
- try:
- client.reboot_instance(instanceName=instance_name)
- changed = True
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
-
- module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst))
-
-
-def start_or_stop_instance(module, client, instance_name, state):
- """
- Start or stop an existing instance
- """
-
- changed = False
-
- inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
-
- # Wait for instance to exit transition state before state change
- desired_states = ['running', 'stopped']
- wait_for_instance_state(module, client, instance_name, desired_states)
-
- # Try state change
- if inst and inst['state']['name'] != state:
- try:
- if state == 'running':
- client.start_instance(instanceName=instance_name)
- else:
- client.stop_instance(instanceName=instance_name)
- except botocore.exceptions.ClientError as e:
- module.fail_json_aws(e)
- changed = True
- # Grab current instance info
- inst = find_instance_info(module, client, instance_name)
-
- wait = module.params.get('wait')
- if wait:
- desired_states = [state]
- wait_for_instance_state(module, client, instance_name, desired_states)
- inst = find_instance_info(module, client, instance_name, fail_if_not_found=True)
-
- module.exit_json(changed=changed, instance=camel_dict_to_snake_dict(inst))
-
-
-def main():
-
- argument_spec = dict(
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted',
- 'rebooted']),
- zone=dict(type='str'),
- blueprint_id=dict(type='str'),
- bundle_id=dict(type='str'),
- key_pair_name=dict(type='str'),
- user_data=dict(type='str', default=''),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(default=300, type='int'),
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- required_if=[['state', 'present', ('zone', 'blueprint_id', 'bundle_id')]])
-
- client = module.client('lightsail')
-
- name = module.params.get('name')
- state = module.params.get('state')
-
- if state == 'present':
- create_instance(module, client, name)
- elif state == 'absent':
- delete_instance(module, client, name)
- elif state in ('running', 'stopped'):
- start_or_stop_instance(module, client, name, state)
- elif state in ('restarted', 'rebooted'):
- restart_instance(module, client, name)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/rds.py b/lib/ansible/modules/cloud/amazon/rds.py
deleted file mode 100644
index 04660cb8ff..0000000000
--- a/lib/ansible/modules/cloud/amazon/rds.py
+++ /dev/null
@@ -1,1405 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: rds
-version_added: "1.3"
-short_description: create, delete, or modify Amazon rds instances, rds snapshots, and related facts
-description:
- - Creates, deletes, or modifies rds resources.
- - When creating an instance it can be either a new instance or a read-only replica of an existing instance.
- - This module has a dependency on python-boto >= 2.5 and will soon be deprecated.
- - The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0).
- - Please use boto3 based M(rds_instance) instead.
-options:
- command:
- description:
- - Specifies the action to take. The 'reboot' option is available starting at version 2.0.
- required: true
- choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
- type: str
- instance_name:
- description:
- - Database instance identifier.
- - Required except when using I(command=facts) or I(command=delete) on just a snapshot.
- type: str
- source_instance:
- description:
- - Name of the database to replicate.
- - Used only when I(command=replicate).
- type: str
- db_engine:
- description:
- - The type of database.
- - Used only when I(command=create).
- - mariadb was added in version 2.2.
- choices: ['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee',
- 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
- type: str
- size:
- description:
- - Size in gigabytes of the initial storage for the DB instance.
- - Used only when I(command=create) or I(command=modify).
- type: str
- instance_type:
- description:
- - The instance type of the database.
- - If not specified then the replica inherits the same instance type as the source instance.
- - Required when I(command=create).
- - Optional when I(command=replicate), I(command=modify) or I(command=restore).
- aliases: ['type']
- type: str
- username:
- description:
- - Master database username.
- - Used only when I(command=create).
- type: str
- password:
- description:
- - Password for the master database username.
- - Used only when I(command=create) or I(command=modify).
- type: str
- db_name:
- description:
- - Name of a database to create within the instance.
- - If not specified then no database is created.
- - Used only when I(command=create).
- type: str
- engine_version:
- description:
- - Version number of the database engine to use.
- - If not specified then the current Amazon RDS default engine version is used
- - Used only when I(command=create).
- type: str
- parameter_group:
- description:
- - Name of the DB parameter group to associate with this instance.
- - If omitted then the RDS default DBParameterGroup will be used.
- - Used only when I(command=create) or I(command=modify).
- type: str
- license_model:
- description:
- - The license model for this DB instance.
- - Used only when I(command=create) or I(command=restore).
- choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
- type: str
- multi_zone:
- description:
- - Specifies if this is a Multi-availability-zone deployment.
- - Can not be used in conjunction with I(zone) parameter.
- - Used only when I(command=create) or I(command=modify).
- type: bool
- iops:
- description:
- - Specifies the number of IOPS for the instance.
- - Used only when I(command=create) or I(command=modify).
- - Must be an integer greater than 1000.
- type: str
- security_groups:
- description:
- - Comma separated list of one or more security groups.
- - Used only when I(command=create) or I(command=modify).
- type: str
- vpc_security_groups:
- description:
- - Comma separated list of one or more vpc security group ids.
- - Also requires I(subnet) to be specified.
- - Used only when I(command=create) or I(command=modify).
- type: list
- elements: str
- port:
- description:
- - Port number that the DB instance uses for connections.
- - Used only when I(command=create) or I(command=replicate).
- - 'Defaults to the standard ports for each I(db_engine): C(3306) for MySQL and MariaDB, C(1521) for Oracle
- C(1433) for SQL Server, C(5432) for PostgreSQL.'
- type: int
- upgrade:
- description:
- - Indicates that minor version upgrades should be applied automatically.
- - Used only when I(command=create) or I(command=modify) or I(command=restore) or I(command=replicate).
- type: bool
- default: false
- option_group:
- description:
- - The name of the option group to use.
- - If not specified then the default option group is used.
- - Used only when I(command=create).
- type: str
- maint_window:
- description:
- - 'Maintenance window in format of C(ddd:hh24:mi-ddd:hh24:mi). (Example: C(Mon:22:00-Mon:23:15))'
- - Times are specified in UTC.
- - If not specified then a random maintenance window is assigned.
- - Used only when I(command=create) or I(command=modify).
- type: str
- backup_window:
- description:
- - 'Backup window in format of C(hh24:mi-hh24:mi). (Example: C(18:00-20:30))'
- - Times are specified in UTC.
- - If not specified then a random backup window is assigned.
- - Used only when command=create or command=modify.
- type: str
- backup_retention:
- description:
- - Number of days backups are retained.
- - Set to 0 to disable backups.
- - Default is 1 day.
- - 'Valid range: 0-35.'
- - Used only when I(command=create) or I(command=modify).
- type: str
- zone:
- description:
- - availability zone in which to launch the instance.
- - Used only when I(command=create), I(command=replicate) or I(command=restore).
- - Can not be used in conjunction with I(multi_zone) parameter.
- aliases: ['aws_zone', 'ec2_zone']
- type: str
- subnet:
- description:
- - VPC subnet group.
- - If specified then a VPC instance is created.
- - Used only when I(command=create).
- type: str
- snapshot:
- description:
- - Name of snapshot to take.
- - When I(command=delete), if no I(snapshot) name is provided then no snapshot is taken.
- - When I(command=delete), if no I(instance_name) is provided the snapshot is deleted.
- - Used with I(command=facts), I(command=delete) or I(command=snapshot).
- type: str
- wait:
- description:
- - When I(command=create), replicate, modify or restore then wait for the database to enter the 'available' state.
- - When I(command=delete), wait for the database to be terminated.
- type: bool
- default: false
- wait_timeout:
- description:
- - How long before wait gives up, in seconds.
- - Used when I(wait=true).
- default: 300
- type: int
- apply_immediately:
- description:
- - When I(apply_immediately=trye), the modifications will be applied as soon as possible rather than waiting for the
- next preferred maintenance window.
- - Used only when I(command=modify).
- type: bool
- default: false
- force_failover:
- description:
- - If enabled, the reboot is done using a MultiAZ failover.
- - Used only when I(command=reboot).
- type: bool
- default: false
- version_added: "2.0"
- new_instance_name:
- description:
- - Name to rename an instance to.
- - Used only when I(command=modify).
- type: str
- version_added: "1.5"
- character_set_name:
- description:
- - Associate the DB instance with a specified character set.
- - Used with I(command=create).
- version_added: "1.9"
- type: str
- publicly_accessible:
- description:
- - Explicitly set whether the resource should be publicly accessible or not.
- - Used with I(command=create), I(command=replicate).
- - Requires boto >= 2.26.0
- type: str
- version_added: "1.9"
- tags:
- description:
- - tags dict to apply to a resource.
- - Used with I(command=create), I(command=replicate), I(command=restore).
- - Requires boto >= 2.26.0
- type: dict
- version_added: "1.9"
-requirements:
- - "python >= 2.6"
- - "boto"
-author:
- - "Bruce Pennypacker (@bpennypacker)"
- - "Will Thames (@willthames)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
-
-EXAMPLES = '''
-# Basic mysql provisioning example
-- rds:
- command: create
- instance_name: new-database
- db_engine: MySQL
- size: 10
- instance_type: db.m1.small
- username: mysql_admin
- password: 1nsecure
- tags:
- Environment: testing
- Application: cms
-
-# Create a read-only replica and wait for it to become available
-- rds:
- command: replicate
- instance_name: new-database-replica
- source_instance: new_database
- wait: yes
- wait_timeout: 600
-
-# Delete an instance, but create a snapshot before doing so
-- rds:
- command: delete
- instance_name: new-database
- snapshot: new_database_snapshot
-
-# Get facts about an instance
-- rds:
- command: facts
- instance_name: new-database
- register: new_database_facts
-
-# Rename an instance and wait for the change to take effect
-- rds:
- command: modify
- instance_name: new-database
- new_instance_name: renamed-database
- wait: yes
-
-# Reboot an instance and wait for it to become available again
-- rds:
- command: reboot
- instance_name: database
- wait: yes
-
-# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
-# then modify it to add your security group. Also, display the new endpoint.
-# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
-- local_action:
- module: rds
- command: restore
- snapshot: mypostgres-snapshot
- instance_name: MyNewInstanceName
- region: us-west-2
- zone: us-west-2b
- subnet: default-vpc-xx441xxx
- publicly_accessible: yes
- wait: yes
- wait_timeout: 600
- tags:
- Name: pg1_test_name_tag
- register: rds
-
-- local_action:
- module: rds
- command: modify
- instance_name: MyNewInstanceName
- region: us-west-2
- vpc_security_groups: sg-xxx945xx
-
-- debug:
- msg: "The new db endpoint is {{ rds.instance.endpoint }}"
-'''
-
-RETURN = '''
-instance:
- description: the rds instance
- returned: always
- type: complex
- contains:
- engine:
- description: the name of the database engine
- returned: when RDS instance exists
- type: str
- sample: "oracle-se"
- engine_version:
- description: the version of the database engine
- returned: when RDS instance exists
- type: str
- sample: "11.2.0.4.v6"
- license_model:
- description: the license model information
- returned: when RDS instance exists
- type: str
- sample: "bring-your-own-license"
- character_set_name:
- description: the name of the character set that this instance is associated with
- returned: when RDS instance exists
- type: str
- sample: "AL32UTF8"
- allocated_storage:
- description: the allocated storage size in gigabytes (GB)
- returned: when RDS instance exists
- type: str
- sample: "100"
- publicly_accessible:
- description: the accessibility options for the DB instance
- returned: when RDS instance exists
- type: bool
- sample: "true"
- latest_restorable_time:
- description: the latest time to which a database can be restored with point-in-time restore
- returned: when RDS instance exists
- type: str
- sample: "1489707802.0"
- secondary_availability_zone:
- description: the name of the secondary AZ for a DB instance with multi-AZ support
- returned: when RDS instance exists and is multy-AZ
- type: str
- sample: "eu-west-1b"
- backup_window:
- description: the daily time range during which automated backups are created if automated backups are enabled
- returned: when RDS instance exists and automated backups are enabled
- type: str
- sample: "03:00-03:30"
- auto_minor_version_upgrade:
- description: indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window
- returned: when RDS instance exists
- type: bool
- sample: "true"
- read_replica_source_dbinstance_identifier:
- description: the identifier of the source DB instance if this RDS instance is a read replica
- returned: when read replica RDS instance exists
- type: str
- sample: "null"
- db_name:
- description: the name of the database to create when the DB instance is created
- returned: when RDS instance exists
- type: str
- sample: "ASERTG"
- endpoint:
- description: the endpoint uri of the database instance
- returned: when RDS instance exists
- type: str
- sample: "my-ansible-database.asdfaosdgih.us-east-1.rds.amazonaws.com"
- port:
- description: the listening port of the database instance
- returned: when RDS instance exists
- type: int
- sample: 3306
- parameter_groups:
- description: the list of DB parameter groups applied to this RDS instance
- returned: when RDS instance exists and parameter groups are defined
- type: complex
- contains:
- parameter_apply_status:
- description: the status of parameter updates
- returned: when RDS instance exists
- type: str
- sample: "in-sync"
- parameter_group_name:
- description: the name of the DP parameter group
- returned: when RDS instance exists
- type: str
- sample: "testawsrpprodb01spfile-1ujg7nrs7sgyz"
- option_groups:
- description: the list of option group memberships for this RDS instance
- returned: when RDS instance exists
- type: complex
- contains:
- option_group_name:
- description: the option group name for this RDS instance
- returned: when RDS instance exists
- type: str
- sample: "default:oracle-se-11-2"
- status:
- description: the status of the RDS instance's option group membership
- returned: when RDS instance exists
- type: str
- sample: "in-sync"
- pending_modified_values:
- description: a dictionary of changes to the RDS instance that are pending
- returned: when RDS instance exists
- type: complex
- contains:
- db_instance_class:
- description: the new DB instance class for this RDS instance that will be applied or is in progress
- returned: when RDS instance exists
- type: str
- sample: "null"
- db_instance_identifier:
- description: the new DB instance identifier this RDS instance that will be applied or is in progress
- returned: when RDS instance exists
- type: str
- sample: "null"
- allocated_storage:
- description: the new allocated storage size for this RDS instance that will be applied or is in progress
- returned: when RDS instance exists
- type: str
- sample: "null"
- backup_retention_period:
- description: the pending number of days for which automated backups are retained
- returned: when RDS instance exists
- type: str
- sample: "null"
- engine_version:
- description: indicates the database engine version
- returned: when RDS instance exists
- type: str
- sample: "null"
- iops:
- description: the new provisioned IOPS value for this RDS instance that will be applied or is being applied
- returned: when RDS instance exists
- type: str
- sample: "null"
- master_user_password:
- description: the pending or in-progress change of the master credentials for this RDS instance
- returned: when RDS instance exists
- type: str
- sample: "null"
- multi_az:
- description: indicates that the single-AZ RDS instance is to change to a multi-AZ deployment
- returned: when RDS instance exists
- type: str
- sample: "null"
- port:
- description: specifies the pending port for this RDS instance
- returned: when RDS instance exists
- type: str
- sample: "null"
- db_subnet_groups:
- description: information on the subnet group associated with this RDS instance
- returned: when RDS instance exists
- type: complex
- contains:
- description:
- description: the subnet group associated with the DB instance
- returned: when RDS instance exists
- type: str
- sample: "Subnets for the UAT RDS SQL DB Instance"
- name:
- description: the name of the DB subnet group
- returned: when RDS instance exists
- type: str
- sample: "samplesubnetgrouprds-j6paiqkxqp4z"
- status:
- description: the status of the DB subnet group
- returned: when RDS instance exists
- type: str
- sample: "complete"
- subnets:
- description: the description of the DB subnet group
- returned: when RDS instance exists
- type: complex
- contains:
- availability_zone:
- description: subnet availability zone information
- returned: when RDS instance exists
- type: complex
- contains:
- name:
- description: availability zone
- returned: when RDS instance exists
- type: str
- sample: "eu-west-1b"
- provisioned_iops_capable:
- description: whether provisioned iops are available in AZ subnet
- returned: when RDS instance exists
- type: bool
- sample: "false"
- identifier:
- description: the identifier of the subnet
- returned: when RDS instance exists
- type: str
- sample: "subnet-3fdba63e"
- status:
- description: the status of the subnet
- returned: when RDS instance exists
- type: str
- sample: "active"
-'''
-
-import time
-
-try:
- import boto.rds
- import boto.exception
-except ImportError:
- pass # Taken care of by ec2.HAS_BOTO
-
-try:
- import boto.rds2
- import boto.rds2.exceptions
- HAS_RDS2 = True
-except ImportError:
- HAS_RDS2 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import AWSRetry
-from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
-
-
-DEFAULT_PORTS = {
- 'aurora': 3306,
- 'mariadb': 3306,
- 'mysql': 3306,
- 'oracle': 1521,
- 'sqlserver': 1433,
- 'postgres': 5432,
-}
-
-
-class RDSException(Exception):
- def __init__(self, exc):
- if hasattr(exc, 'error_message') and exc.error_message:
- self.message = exc.error_message
- self.code = exc.error_code
- elif hasattr(exc, 'body') and 'Error' in exc.body:
- self.message = exc.body['Error']['Message']
- self.code = exc.body['Error']['Code']
- else:
- self.message = str(exc)
- self.code = 'Unknown Error'
-
-
-class RDSConnection:
- def __init__(self, module, region, **aws_connect_params):
- try:
- self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
- except boto.exception.BotoServerError as e:
- module.fail_json(msg=e.error_message)
-
- def get_db_instance(self, instancename):
- try:
- return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
- except boto.exception.BotoServerError:
- return None
-
- def get_db_snapshot(self, snapshotid):
- try:
- return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
- except boto.exception.BotoServerError:
- return None
-
- def create_db_instance(self, instance_name, size, instance_class, db_engine,
- username, password, **params):
- params['engine'] = db_engine
- try:
- result = self.connection.create_dbinstance(instance_name, size, instance_class,
- username, password, **params)
- return RDSDBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def create_db_instance_read_replica(self, instance_name, source_instance, **params):
- try:
- result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
- return RDSDBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def delete_db_instance(self, instance_name, **params):
- try:
- result = self.connection.delete_dbinstance(instance_name, **params)
- return RDSDBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def delete_db_snapshot(self, snapshot):
- try:
- result = self.connection.delete_dbsnapshot(snapshot)
- return RDSSnapshot(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def modify_db_instance(self, instance_name, **params):
- try:
- result = self.connection.modify_dbinstance(instance_name, **params)
- return RDSDBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def reboot_db_instance(self, instance_name, **params):
- try:
- result = self.connection.reboot_dbinstance(instance_name)
- return RDSDBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
- try:
- result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
- return RDSDBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def create_db_snapshot(self, snapshot, instance_name, **params):
- try:
- result = self.connection.create_dbsnapshot(snapshot, instance_name)
- return RDSSnapshot(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def promote_read_replica(self, instance_name, **params):
- try:
- result = self.connection.promote_read_replica(instance_name, **params)
- return RDSDBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
-
-class RDS2Connection:
- def __init__(self, module, region, **aws_connect_params):
- try:
- self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
- except boto.exception.BotoServerError as e:
- module.fail_json(msg=e.error_message)
-
- def get_db_instance(self, instancename):
- try:
- dbinstances = self.connection.describe_db_instances(
- db_instance_identifier=instancename
- )['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
- result = RDS2DBInstance(dbinstances[0])
- return result
- except boto.rds2.exceptions.DBInstanceNotFound as e:
- return None
- except Exception as e:
- raise e
-
- def get_db_snapshot(self, snapshotid):
- try:
- snapshots = self.connection.describe_db_snapshots(
- db_snapshot_identifier=snapshotid,
- snapshot_type='manual'
- )['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
- result = RDS2Snapshot(snapshots[0])
- return result
- except boto.rds2.exceptions.DBSnapshotNotFound:
- return None
-
- def create_db_instance(self, instance_name, size, instance_class, db_engine,
- username, password, **params):
- try:
- result = self.connection.create_db_instance(instance_name, size, instance_class, db_engine, username, password,
- **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
- return RDS2DBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def create_db_instance_read_replica(self, instance_name, source_instance, **params):
- try:
- result = self.connection.create_db_instance_read_replica(
- instance_name,
- source_instance,
- **params
- )['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
- return RDS2DBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def delete_db_instance(self, instance_name, **params):
- try:
- result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
- return RDS2DBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def delete_db_snapshot(self, snapshot):
- try:
- result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
- return RDS2Snapshot(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def modify_db_instance(self, instance_name, **params):
- try:
- result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
- return RDS2DBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def reboot_db_instance(self, instance_name, **params):
- try:
- result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
- return RDS2DBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
- try:
- result = self.connection.restore_db_instance_from_db_snapshot(
- instance_name,
- snapshot,
- **params
- )['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
- return RDS2DBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def create_db_snapshot(self, snapshot, instance_name, **params):
- try:
- result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
- return RDS2Snapshot(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
- def promote_read_replica(self, instance_name, **params):
- try:
- result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
- return RDS2DBInstance(result)
- except boto.exception.BotoServerError as e:
- raise RDSException(e)
-
-
-class RDSDBInstance:
- def __init__(self, dbinstance):
- self.instance = dbinstance
- self.name = dbinstance.id
- self.status = dbinstance.status
-
- def get_data(self):
- d = {
- 'id': self.name,
- 'create_time': self.instance.create_time,
- 'status': self.status,
- 'availability_zone': self.instance.availability_zone,
- 'backup_retention': self.instance.backup_retention_period,
- 'backup_window': self.instance.preferred_backup_window,
- 'maintenance_window': self.instance.preferred_maintenance_window,
- 'multi_zone': self.instance.multi_az,
- 'instance_type': self.instance.instance_class,
- 'username': self.instance.master_username,
- 'iops': self.instance.iops
- }
-
- # Only assign an Endpoint if one is available
- if hasattr(self.instance, 'endpoint'):
- d["endpoint"] = self.instance.endpoint[0]
- d["port"] = self.instance.endpoint[1]
- if self.instance.vpc_security_groups is not None:
- d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
- else:
- d["vpc_security_groups"] = None
- else:
- d["endpoint"] = None
- d["port"] = None
- d["vpc_security_groups"] = None
- d['DBName'] = self.instance.DBName if hasattr(self.instance, 'DBName') else None
- # ReadReplicaSourceDBInstanceIdentifier may or may not exist
- try:
- d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
- except Exception:
- d["replication_source"] = None
- return d
-
-
-class RDS2DBInstance:
- def __init__(self, dbinstance):
- self.instance = dbinstance
- if 'DBInstanceIdentifier' not in dbinstance:
- self.name = None
- else:
- self.name = self.instance.get('DBInstanceIdentifier')
- self.status = self.instance.get('DBInstanceStatus')
-
- def get_data(self):
- d = {
- 'id': self.name,
- 'create_time': self.instance['InstanceCreateTime'],
- 'engine': self.instance['Engine'],
- 'engine_version': self.instance['EngineVersion'],
- 'license_model': self.instance['LicenseModel'],
- 'character_set_name': self.instance['CharacterSetName'],
- 'allocated_storage': self.instance['AllocatedStorage'],
- 'publicly_accessible': self.instance['PubliclyAccessible'],
- 'latest_restorable_time': self.instance['LatestRestorableTime'],
- 'status': self.status,
- 'availability_zone': self.instance['AvailabilityZone'],
- 'secondary_availability_zone': self.instance['SecondaryAvailabilityZone'],
- 'backup_retention': self.instance['BackupRetentionPeriod'],
- 'backup_window': self.instance['PreferredBackupWindow'],
- 'maintenance_window': self.instance['PreferredMaintenanceWindow'],
- 'auto_minor_version_upgrade': self.instance['AutoMinorVersionUpgrade'],
- 'read_replica_source_dbinstance_identifier': self.instance['ReadReplicaSourceDBInstanceIdentifier'],
- 'multi_zone': self.instance['MultiAZ'],
- 'instance_type': self.instance['DBInstanceClass'],
- 'username': self.instance['MasterUsername'],
- 'db_name': self.instance['DBName'],
- 'iops': self.instance['Iops'],
- 'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
- }
- if self.instance['DBParameterGroups'] is not None:
- parameter_groups = []
- for x in self.instance['DBParameterGroups']:
- parameter_groups.append({'parameter_group_name': x['DBParameterGroupName'], 'parameter_apply_status': x['ParameterApplyStatus']})
- d['parameter_groups'] = parameter_groups
- if self.instance['OptionGroupMemberships'] is not None:
- option_groups = []
- for x in self.instance['OptionGroupMemberships']:
- option_groups.append({'status': x['Status'], 'option_group_name': x['OptionGroupName']})
- d['option_groups'] = option_groups
- if self.instance['PendingModifiedValues'] is not None:
- pdv = self.instance['PendingModifiedValues']
- d['pending_modified_values'] = {
- 'multi_az': pdv['MultiAZ'],
- 'master_user_password': pdv['MasterUserPassword'],
- 'port': pdv['Port'],
- 'iops': pdv['Iops'],
- 'allocated_storage': pdv['AllocatedStorage'],
- 'engine_version': pdv['EngineVersion'],
- 'backup_retention_period': pdv['BackupRetentionPeriod'],
- 'db_instance_class': pdv['DBInstanceClass'],
- 'db_instance_identifier': pdv['DBInstanceIdentifier']
- }
- if self.instance["DBSubnetGroup"] is not None:
- dsg = self.instance["DBSubnetGroup"]
- db_subnet_groups = {}
- db_subnet_groups['vpc_id'] = dsg['VpcId']
- db_subnet_groups['name'] = dsg['DBSubnetGroupName']
- db_subnet_groups['status'] = dsg['SubnetGroupStatus'].lower()
- db_subnet_groups['description'] = dsg['DBSubnetGroupDescription']
- db_subnet_groups['subnets'] = []
- for x in dsg["Subnets"]:
- db_subnet_groups['subnets'].append({
- 'status': x['SubnetStatus'].lower(),
- 'identifier': x['SubnetIdentifier'],
- 'availability_zone': {
- 'name': x['SubnetAvailabilityZone']['Name'],
- 'provisioned_iops_capable': x['SubnetAvailabilityZone']['ProvisionedIopsCapable']
- }
- })
- d['db_subnet_groups'] = db_subnet_groups
- if self.instance["VpcSecurityGroups"] is not None:
- d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
- if "Endpoint" in self.instance and self.instance["Endpoint"] is not None:
- d['endpoint'] = self.instance["Endpoint"].get('Address', None)
- d['port'] = self.instance["Endpoint"].get('Port', None)
- else:
- d['endpoint'] = None
- d['port'] = None
- d['DBName'] = self.instance['DBName'] if hasattr(self.instance, 'DBName') else None
- return d
-
-
-class RDSSnapshot:
- def __init__(self, snapshot):
- self.snapshot = snapshot
- self.name = snapshot.id
- self.status = snapshot.status
-
- def get_data(self):
- d = {
- 'id': self.name,
- 'create_time': self.snapshot.snapshot_create_time,
- 'status': self.status,
- 'availability_zone': self.snapshot.availability_zone,
- 'instance_id': self.snapshot.instance_id,
- 'instance_created': self.snapshot.instance_create_time,
- }
- # needs boto >= 2.21.0
- if hasattr(self.snapshot, 'snapshot_type'):
- d["snapshot_type"] = self.snapshot.snapshot_type
- if hasattr(self.snapshot, 'iops'):
- d["iops"] = self.snapshot.iops
- return d
-
-
-class RDS2Snapshot:
- def __init__(self, snapshot):
- if 'DeleteDBSnapshotResponse' in snapshot:
- self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
- else:
- self.snapshot = snapshot
- self.name = self.snapshot.get('DBSnapshotIdentifier')
- self.status = self.snapshot.get('Status')
-
- def get_data(self):
- d = {
- 'id': self.name,
- 'create_time': self.snapshot['SnapshotCreateTime'],
- 'status': self.status,
- 'availability_zone': self.snapshot['AvailabilityZone'],
- 'instance_id': self.snapshot['DBInstanceIdentifier'],
- 'instance_created': self.snapshot['InstanceCreateTime'],
- 'snapshot_type': self.snapshot['SnapshotType'],
- 'iops': self.snapshot['Iops'],
- }
- return d
-
-
-def await_resource(conn, resource, status, module):
- start_time = time.time()
- wait_timeout = module.params.get('wait_timeout') + start_time
- check_interval = 5
- while wait_timeout > time.time() and resource.status != status:
- time.sleep(check_interval)
- if wait_timeout <= time.time():
- module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name)
- if module.params.get('command') == 'snapshot':
- # Temporary until all the rds2 commands have their responses parsed
- if resource.name is None:
- module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot)
- # Back off if we're getting throttled, since we're just waiting anyway
- resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name)
- else:
- # Temporary until all the rds2 commands have their responses parsed
- if resource.name is None:
- module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance)
- # Back off if we're getting throttled, since we're just waiting anyway
- resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name)
- if resource is None:
- break
- # Some RDS resources take much longer than others to be ready. Check
- # less aggressively for slow ones to avoid throttling.
- if time.time() > start_time + 90:
- check_interval = 20
- return resource
-
-
-def create_db_instance(module, conn):
- required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
- valid_vars = ['backup_retention', 'backup_window',
- 'character_set_name', 'db_name', 'engine_version',
- 'instance_type', 'iops', 'license_model', 'maint_window',
- 'multi_zone', 'option_group', 'parameter_group', 'port',
- 'subnet', 'upgrade', 'zone']
- if module.params.get('subnet'):
- valid_vars.append('vpc_security_groups')
- else:
- valid_vars.append('security_groups')
- if HAS_RDS2:
- valid_vars.extend(['publicly_accessible', 'tags'])
- params = validate_parameters(required_vars, valid_vars, module)
- instance_name = module.params.get('instance_name')
-
- result = conn.get_db_instance(instance_name)
- if result:
- changed = False
- else:
- try:
- result = conn.create_db_instance(instance_name, module.params.get('size'),
- module.params.get('instance_type'), module.params.get('db_engine'),
- module.params.get('username'), module.params.get('password'), **params)
- changed = True
- except RDSException as e:
- module.fail_json(msg="Failed to create instance: %s" % e.message)
-
- if module.params.get('wait'):
- resource = await_resource(conn, result, 'available', module)
- else:
- resource = conn.get_db_instance(instance_name)
-
- module.exit_json(changed=changed, instance=resource.get_data())
-
-
-def replicate_db_instance(module, conn):
- required_vars = ['instance_name', 'source_instance']
- valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
- if HAS_RDS2:
- valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
- params = validate_parameters(required_vars, valid_vars, module)
- instance_name = module.params.get('instance_name')
- source_instance = module.params.get('source_instance')
-
- result = conn.get_db_instance(instance_name)
- if result:
- changed = False
- else:
- try:
- result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
- changed = True
- except RDSException as e:
- module.fail_json(msg="Failed to create replica instance: %s " % e.message)
-
- if module.params.get('wait'):
- resource = await_resource(conn, result, 'available', module)
- else:
- resource = conn.get_db_instance(instance_name)
-
- module.exit_json(changed=changed, instance=resource.get_data())
-
-
-def delete_db_instance_or_snapshot(module, conn):
- required_vars = []
- valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
- params = validate_parameters(required_vars, valid_vars, module)
- instance_name = module.params.get('instance_name')
- snapshot = module.params.get('snapshot')
-
- if not instance_name:
- result = conn.get_db_snapshot(snapshot)
- else:
- result = conn.get_db_instance(instance_name)
- if not result:
- module.exit_json(changed=False)
- if result.status == 'deleting':
- module.exit_json(changed=False)
- try:
- if instance_name:
- if snapshot:
- params["skip_final_snapshot"] = False
- if HAS_RDS2:
- params["final_db_snapshot_identifier"] = snapshot
- else:
- params["final_snapshot_id"] = snapshot
- else:
- params["skip_final_snapshot"] = True
- result = conn.delete_db_instance(instance_name, **params)
- else:
- result = conn.delete_db_snapshot(snapshot)
- except RDSException as e:
- module.fail_json(msg="Failed to delete instance: %s" % e.message)
-
- # If we're not waiting for a delete to complete then we're all done
- # so just return
- if not module.params.get('wait'):
- module.exit_json(changed=True)
- try:
- await_resource(conn, result, 'deleted', module)
- module.exit_json(changed=True)
- except RDSException as e:
- if e.code == 'DBInstanceNotFound':
- module.exit_json(changed=True)
- else:
- module.fail_json(msg=e.message)
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def facts_db_instance_or_snapshot(module, conn):
- instance_name = module.params.get('instance_name')
- snapshot = module.params.get('snapshot')
-
- if instance_name and snapshot:
- module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
- if instance_name:
- resource = conn.get_db_instance(instance_name)
- if not resource:
- module.fail_json(msg="DB instance %s does not exist" % instance_name)
- if snapshot:
- resource = conn.get_db_snapshot(snapshot)
- if not resource:
- module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
-
- module.exit_json(changed=False, instance=resource.get_data())
-
-
-def modify_db_instance(module, conn):
- required_vars = ['instance_name']
- valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
- 'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
- 'maint_window', 'multi_zone', 'new_instance_name',
- 'option_group', 'parameter_group', 'password', 'size', 'upgrade']
-
- params = validate_parameters(required_vars, valid_vars, module)
- instance_name = module.params.get('instance_name')
- new_instance_name = module.params.get('new_instance_name')
-
- try:
- result = conn.modify_db_instance(instance_name, **params)
- except RDSException as e:
- module.fail_json(msg=e.message)
- if params.get('apply_immediately'):
- if new_instance_name:
- # Wait until the new instance name is valid
- new_instance = None
- while not new_instance:
- new_instance = conn.get_db_instance(new_instance_name)
- time.sleep(5)
-
- # Found instance but it briefly flicks to available
- # before rebooting so let's wait until we see it rebooting
- # before we check whether to 'wait'
- result = await_resource(conn, new_instance, 'rebooting', module)
-
- if module.params.get('wait'):
- resource = await_resource(conn, result, 'available', module)
- else:
- resource = conn.get_db_instance(instance_name)
-
- # guess that this changed the DB, need a way to check
- module.exit_json(changed=True, instance=resource.get_data())
-
-
-def promote_db_instance(module, conn):
- required_vars = ['instance_name']
- valid_vars = ['backup_retention', 'backup_window']
- params = validate_parameters(required_vars, valid_vars, module)
- instance_name = module.params.get('instance_name')
-
- result = conn.get_db_instance(instance_name)
- if not result:
- module.fail_json(msg="DB Instance %s does not exist" % instance_name)
-
- if result.get_data().get('replication_source'):
- try:
- result = conn.promote_read_replica(instance_name, **params)
- changed = True
- except RDSException as e:
- module.fail_json(msg=e.message)
- else:
- changed = False
-
- if module.params.get('wait'):
- resource = await_resource(conn, result, 'available', module)
- else:
- resource = conn.get_db_instance(instance_name)
-
- module.exit_json(changed=changed, instance=resource.get_data())
-
-
-def snapshot_db_instance(module, conn):
- required_vars = ['instance_name', 'snapshot']
- valid_vars = ['tags']
- params = validate_parameters(required_vars, valid_vars, module)
- instance_name = module.params.get('instance_name')
- snapshot = module.params.get('snapshot')
- changed = False
- result = conn.get_db_snapshot(snapshot)
- if not result:
- try:
- result = conn.create_db_snapshot(snapshot, instance_name, **params)
- changed = True
- except RDSException as e:
- module.fail_json(msg=e.message)
-
- if module.params.get('wait'):
- resource = await_resource(conn, result, 'available', module)
- else:
- resource = conn.get_db_snapshot(snapshot)
-
- module.exit_json(changed=changed, snapshot=resource.get_data())
-
-
-def reboot_db_instance(module, conn):
- required_vars = ['instance_name']
- valid_vars = []
-
- if HAS_RDS2:
- valid_vars.append('force_failover')
-
- params = validate_parameters(required_vars, valid_vars, module)
- instance_name = module.params.get('instance_name')
- result = conn.get_db_instance(instance_name)
- changed = False
- try:
- result = conn.reboot_db_instance(instance_name, **params)
- changed = True
- except RDSException as e:
- module.fail_json(msg=e.message)
-
- if module.params.get('wait'):
- resource = await_resource(conn, result, 'available', module)
- else:
- resource = conn.get_db_instance(instance_name)
-
- module.exit_json(changed=changed, instance=resource.get_data())
-
-
-def restore_db_instance(module, conn):
- required_vars = ['instance_name', 'snapshot']
- valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
- 'option_group', 'port', 'publicly_accessible',
- 'subnet', 'tags', 'upgrade', 'zone']
- if HAS_RDS2:
- valid_vars.append('instance_type')
- else:
- required_vars.append('instance_type')
- params = validate_parameters(required_vars, valid_vars, module)
- instance_name = module.params.get('instance_name')
- instance_type = module.params.get('instance_type')
- snapshot = module.params.get('snapshot')
-
- changed = False
- result = conn.get_db_instance(instance_name)
- if not result:
- try:
- result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
- changed = True
- except RDSException as e:
- module.fail_json(msg=e.message)
-
- if module.params.get('wait'):
- resource = await_resource(conn, result, 'available', module)
- else:
- resource = conn.get_db_instance(instance_name)
-
- module.exit_json(changed=changed, instance=resource.get_data())
-
-
-def validate_parameters(required_vars, valid_vars, module):
- command = module.params.get('command')
- for v in required_vars:
- if not module.params.get(v):
- module.fail_json(msg="Parameter %s required for %s command" % (v, command))
-
- # map to convert rds module options to boto rds and rds2 options
- optional_params = {
- 'port': 'port',
- 'db_name': 'db_name',
- 'zone': 'availability_zone',
- 'maint_window': 'preferred_maintenance_window',
- 'backup_window': 'preferred_backup_window',
- 'backup_retention': 'backup_retention_period',
- 'multi_zone': 'multi_az',
- 'engine_version': 'engine_version',
- 'upgrade': 'auto_minor_version_upgrade',
- 'subnet': 'db_subnet_group_name',
- 'license_model': 'license_model',
- 'option_group': 'option_group_name',
- 'size': 'allocated_storage',
- 'iops': 'iops',
- 'new_instance_name': 'new_instance_id',
- 'apply_immediately': 'apply_immediately',
- }
- # map to convert rds module options to boto rds options
- optional_params_rds = {
- 'db_engine': 'engine',
- 'password': 'master_password',
- 'parameter_group': 'param_group',
- 'instance_type': 'instance_class',
- }
- # map to convert rds module options to boto rds2 options
- optional_params_rds2 = {
- 'tags': 'tags',
- 'publicly_accessible': 'publicly_accessible',
- 'parameter_group': 'db_parameter_group_name',
- 'character_set_name': 'character_set_name',
- 'instance_type': 'db_instance_class',
- 'password': 'master_user_password',
- 'new_instance_name': 'new_db_instance_identifier',
- 'force_failover': 'force_failover',
- }
- if HAS_RDS2:
- optional_params.update(optional_params_rds2)
- sec_group = 'db_security_groups'
- else:
- optional_params.update(optional_params_rds)
- sec_group = 'security_groups'
- # Check for options only supported with rds2
- for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
- if module.params.get(k):
- module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
-
- params = {}
- for (k, v) in optional_params.items():
- if module.params.get(k) is not None and k not in required_vars:
- if k in valid_vars:
- params[v] = module.params[k]
- else:
- if module.params.get(k) is False:
- pass
- else:
- module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
-
- if module.params.get('security_groups'):
- params[sec_group] = module.params.get('security_groups').split(',')
-
- vpc_groups = module.params.get('vpc_security_groups')
- if vpc_groups:
- if HAS_RDS2:
- params['vpc_security_group_ids'] = vpc_groups
- else:
- groups_list = []
- for x in vpc_groups:
- groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
- params['vpc_security_groups'] = groups_list
-
- # Convert tags dict to list of tuples that rds2 expects
- if 'tags' in params:
- params['tags'] = module.params['tags'].items()
- return params
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- command=dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
- instance_name=dict(required=False),
- source_instance=dict(required=False),
- db_engine=dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',
- 'sqlserver-web', 'postgres', 'aurora'], required=False),
- size=dict(required=False),
- instance_type=dict(aliases=['type'], required=False),
- username=dict(required=False),
- password=dict(no_log=True, required=False),
- db_name=dict(required=False),
- engine_version=dict(required=False),
- parameter_group=dict(required=False),
- license_model=dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
- multi_zone=dict(type='bool', required=False),
- iops=dict(required=False),
- security_groups=dict(required=False),
- vpc_security_groups=dict(type='list', required=False),
- port=dict(required=False, type='int'),
- upgrade=dict(type='bool', default=False),
- option_group=dict(required=False),
- maint_window=dict(required=False),
- backup_window=dict(required=False),
- backup_retention=dict(required=False),
- zone=dict(aliases=['aws_zone', 'ec2_zone'], required=False),
- subnet=dict(required=False),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300),
- snapshot=dict(required=False),
- apply_immediately=dict(type='bool', default=False),
- new_instance_name=dict(required=False),
- tags=dict(type='dict', required=False),
- publicly_accessible=dict(required=False),
- character_set_name=dict(required=False),
- force_failover=dict(type='bool', required=False, default=False)
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- )
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- invocations = {
- 'create': create_db_instance,
- 'replicate': replicate_db_instance,
- 'delete': delete_db_instance_or_snapshot,
- 'facts': facts_db_instance_or_snapshot,
- 'modify': modify_db_instance,
- 'promote': promote_db_instance,
- 'snapshot': snapshot_db_instance,
- 'reboot': reboot_db_instance,
- 'restore': restore_db_instance,
- }
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
- if not region:
- module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
-
- # set port to per db defaults if not specified
- if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create':
- if '-' in module.params['db_engine']:
- engine = module.params['db_engine'].split('-')[0]
- else:
- engine = module.params['db_engine']
- module.params['port'] = DEFAULT_PORTS[engine.lower()]
-
- # connect to the rds endpoint
- if HAS_RDS2:
- conn = RDS2Connection(module, region, **aws_connect_params)
- else:
- conn = RDSConnection(module, region, **aws_connect_params)
-
- invocations[module.params.get('command')](module, conn)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/rds_instance.py b/lib/ansible/modules/cloud/amazon/rds_instance.py
deleted file mode 100644
index 8515aa0735..0000000000
--- a/lib/ansible/modules/cloud/amazon/rds_instance.py
+++ /dev/null
@@ -1,1226 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = '''
----
-module: rds_instance
-version_added: "2.7"
-short_description: Manage RDS instances
-description:
- - Create, modify, and delete RDS instances.
-
-requirements:
- - botocore
- - boto3 >= 1.5.0
-extends_documentation_fragment:
- - aws
- - ec2
-author:
- - Sloane Hertel (@s-hertel)
-
-options:
- # General module options
- state:
- description:
- - Whether the snapshot should exist or not. I(rebooted) is not idempotent and will leave the DB instance in a running state
- and start it prior to rebooting if it was stopped. I(present) will leave the DB instance in the current running/stopped state,
- (running if creating the DB instance).
- - I(state=running) and I(state=started) are synonyms, as are I(state=rebooted) and I(state=restarted). Note - rebooting the instance
- is not idempotent.
- choices: ['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted']
- default: 'present'
- type: str
- creation_source:
- description: Which source to use if restoring from a template (an existing instance, S3 bucket, or snapshot).
- choices: ['snapshot', 's3', 'instance']
- type: str
- force_update_password:
- description:
- - Set to True to update your cluster password with I(master_user_password). Since comparing passwords to determine
- if it needs to be updated is not possible this is set to False by default to allow idempotence.
- type: bool
- default: False
- purge_cloudwatch_logs_exports:
- description: Set to False to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance.
- type: bool
- default: True
- purge_tags:
- description: Set to False to retain any tags that aren't specified in task and are associated with the instance.
- type: bool
- default: True
- read_replica:
- description:
- - Set to False to promote a read replica cluster or true to create one. When creating a read replica C(creation_source) should
- be set to 'instance' or not provided. C(source_db_instance_identifier) must be provided with this option.
- type: bool
- wait:
- description:
- - Whether to wait for the cluster to be available, stopped, or deleted. At a later time a wait_timeout option may be added.
- Following each API call to create/modify/delete the instance a waiter is used with a 60 second delay 30 times until the instance reaches
- the expected state (available/stopped/deleted). The total task time may also be influenced by AWSRetry which helps stabilize if the
- instance is in an invalid state to operate on to begin with (such as if you try to stop it when it is in the process of rebooting).
- If setting this to False task retries and delays may make your playbook execution better handle timeouts for major modifications.
- type: bool
- default: True
-
- # Options that have a corresponding boto3 parameter
- allocated_storage:
- description:
- - The amount of storage (in gibibytes) to allocate for the DB instance.
- type: int
- allow_major_version_upgrade:
- description:
- - Whether to allow major version upgrades.
- type: bool
- apply_immediately:
- description:
- - A value that specifies whether modifying a cluster with I(new_db_instance_identifier) and I(master_user_password)
- should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If false, changes
- are applied during the next maintenance window.
- type: bool
- default: False
- auto_minor_version_upgrade:
- description:
- - Whether minor version upgrades are applied automatically to the DB instance during the maintenance window.
- type: bool
- availability_zone:
- description:
- - A list of EC2 Availability Zones that instances in the DB cluster can be created in.
- May be used when creating a cluster or when restoring from S3 or a snapshot. Mutually exclusive with I(multi_az).
- aliases:
- - az
- - zone
- type: str
- backup_retention_period:
- description:
- - The number of days for which automated backups are retained (must be greater or equal to 1).
- May be used when creating a new cluster, when restoring from S3, or when modifying a cluster.
- type: int
- ca_certificate_identifier:
- description:
- - The identifier of the CA certificate for the DB instance.
- type: str
- character_set_name:
- description:
- - The character set to associate with the DB cluster.
- type: str
- copy_tags_to_snapshot:
- description:
- - Whether or not to copy all tags from the DB instance to snapshots of the instance. When initially creating
- a DB instance the RDS API defaults this to false if unspecified.
- type: bool
- db_cluster_identifier:
- description:
- - The DB cluster (lowercase) identifier to add the aurora DB instance to. The identifier must contain from 1 to
- 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or
- contain consecutive hyphens.
- aliases:
- - cluster_id
- type: str
- db_instance_class:
- description:
- - The compute and memory capacity of the DB instance, for example db.t2.micro.
- aliases:
- - class
- - instance_type
- type: str
- db_instance_identifier:
- description:
- - The DB instance (lowercase) identifier. The identifier must contain from 1 to 63 letters, numbers, or
- hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens.
- aliases:
- - instance_id
- - id
- required: True
- type: str
- db_name:
- description:
- - The name for your database. If a name is not provided Amazon RDS will not create a database.
- type: str
- db_parameter_group_name:
- description:
- - The name of the DB parameter group to associate with this DB instance. When creating the DB instance if this
- argument is omitted the default DBParameterGroup for the specified engine is used.
- type: str
- db_security_groups:
- description:
- - (EC2-Classic platform) A list of DB security groups to associate with this DB instance.
- type: list
- db_snapshot_identifier:
- description:
- - The identifier for the DB snapshot to restore from if using I(creation_source=snapshot).
- type: str
- db_subnet_group_name:
- description:
- - The DB subnet group name to use for the DB instance.
- aliases:
- - subnet_group
- type: str
- domain:
- description:
- - The Active Directory Domain to restore the instance in.
- type: str
- domain_iam_role_name:
- description:
- - The name of the IAM role to be used when making API calls to the Directory Service.
- type: str
- enable_cloudwatch_logs_exports:
- description:
- - A list of log types that need to be enabled for exporting to CloudWatch Logs.
- aliases:
- - cloudwatch_log_exports
- type: list
- enable_iam_database_authentication:
- description:
- - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts.
- If this option is omitted when creating the cluster, Amazon RDS sets this to False.
- type: bool
- enable_performance_insights:
- description:
- - Whether to enable Performance Insights for the DB instance.
- type: bool
- engine:
- description:
- - The name of the database engine to be used for this DB instance. This is required to create an instance.
- Valid choices are aurora | aurora-mysql | aurora-postgresql | mariadb | mysql | oracle-ee | oracle-se |
- oracle-se1 | oracle-se2 | postgres | sqlserver-ee | sqlserver-ex | sqlserver-se | sqlserver-web
- type: str
- engine_version:
- description:
- - The version number of the database engine to use. For Aurora MySQL that could be 5.6.10a , 5.7.12.
- Aurora PostgreSQL example, 9.6.3
- type: str
- final_db_snapshot_identifier:
- description:
- - The DB instance snapshot identifier of the new DB instance snapshot created when I(skip_final_snapshot) is false.
- aliases:
- - final_snapshot_identifier
- type: str
- force_failover:
- description:
- - Set to true to conduct the reboot through a MultiAZ failover.
- type: bool
- iops:
- description:
- - The Provisioned IOPS (I/O operations per second) value. Is only set when using I(storage_type) is set to io1.
- type: int
- kms_key_id:
- description:
- - The ARN of the AWS KMS key identifier for an encrypted DB instance. If you are creating a DB instance with the
- same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key
- alias instead of the ARN for the KM encryption key.
- - If I(storage_encrypted) is true and and this option is not provided, the default encryption key is used.
- type: str
- license_model:
- description:
- - The license model for the DB instance.
- - Several options are license-included, bring-your-own-license, and general-public-license.
- - This option can also be omitted to default to an accepted value.
- type: str
- master_user_password:
- description:
- - An 8-41 character password for the master database user. The password can contain any printable ASCII character
- except "/", """, or "@". To modify the password use I(force_password_update). Use I(apply immediately) to change
- the password immediately, otherwise it is updated during the next maintenance window.
- aliases:
- - password
- type: str
- master_username:
- description:
- - The name of the master user for the DB cluster. Must be 1-16 letters or numbers and begin with a letter.
- aliases:
- - username
- type: str
- max_allocated_storage:
- description:
- - The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.
- type: int
- version_added: "2.9"
- monitoring_interval:
- description:
- - The interval, in seconds, when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting
- metrics, specify 0. Amazon RDS defaults this to 0 if omitted when initially creating a DB instance.
- type: int
- monitoring_role_arn:
- description:
- - The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs.
- type: str
- multi_az:
- description:
- - Specifies if the DB instance is a Multi-AZ deployment. Mutually exclusive with I(availability_zone).
- type: bool
- new_db_instance_identifier:
- description:
- - The new DB cluster (lowercase) identifier for the DB cluster when renaming a DB instance. The identifier must contain
- from 1 to 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or
- contain consecutive hyphens. Use I(apply_immediately) to rename immediately, otherwise it is updated during the
- next maintenance window.
- aliases:
- - new_instance_id
- - new_id
- type: str
- option_group_name:
- description:
- - The option group to associate with the DB instance.
- type: str
- performance_insights_kms_key_id:
- description:
- - The AWS KMS key identifier (ARN, name, or alias) for encryption of Performance Insights data.
- type: str
- performance_insights_retention_period:
- description:
- - The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731.
- type: int
- port:
- description:
- - The port number on which the instances accept connections.
- type: int
- preferred_backup_window:
- description:
- - The daily time range (in UTC) of at least 30 minutes, during which automated backups are created if automated backups are
- enabled using I(backup_retention_period). The option must be in the format of "hh24:mi-hh24:mi" and not conflict with
- I(preferred_maintenance_window).
- aliases:
- - backup_window
- type: str
- preferred_maintenance_window:
- description:
- - The weekly time range (in UTC) of at least 30 minutes, during which system maintenance can occur. The option must
- be in the format "ddd:hh24:mi-ddd:hh24:mi" where ddd is one of Mon, Tue, Wed, Thu, Fri, Sat, Sun.
- aliases:
- - maintenance_window
- type: str
- processor_features:
- description:
- - A dictionary of Name, Value pairs to indicate the number of CPU cores and the number of threads per core for the
- DB instance class of the DB instance. Names are threadsPerCore and coreCount.
- Set this option to an empty dictionary to use the default processor features.
- suboptions:
- threadsPerCore:
- description: The number of threads per core
- coreCount:
- description: The number of CPU cores
- type: dict
- promotion_tier:
- description:
- - An integer that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of
- the existing primary instance.
- type: str
- publicly_accessible:
- description:
- - Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with
- a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal
- instance with a DNS name that resolves to a private IP address.
- type: bool
- restore_time:
- description:
- - If using I(creation_source=instance) this indicates the UTC date and time to restore from the source instance.
- For example, "2009-09-07T23:45:00Z".
- - May alternatively set I(use_latest_restore_time=True).
- - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided.
- type: str
- s3_bucket_name:
- description:
- - The name of the Amazon S3 bucket that contains the data used to create the Amazon DB instance.
- type: str
- s3_ingestion_role_arn:
- description:
- - The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access
- the Amazon S3 bucket on your behalf.
- type: str
- s3_prefix:
- description:
- - The prefix for all of the file names that contain the data used to create the Amazon DB instance. If you do not
- specify a SourceS3Prefix value, then the Amazon DB instance is created by using all of the files in the Amazon S3 bucket.
- type: str
- skip_final_snapshot:
- description:
- - Whether a final DB cluster snapshot is created before the DB cluster is deleted. If this is false I(final_db_snapshot_identifier)
- must be provided.
- type: bool
- default: false
- snapshot_identifier:
- description:
- - The ARN of the DB snapshot to restore from when using I(creation_source=snapshot).
- type: str
- source_db_instance_identifier:
- description:
- - The identifier or ARN of the source DB instance from which to restore when creating a read replica or spinning up a point-in-time
- DB instance using I(creation_source=instance). If the source DB is not in the same region this should be an ARN.
- type: str
- source_engine:
- description:
- - The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket.
- choices:
- - mysql
- type: str
- source_engine_version:
- description:
- - The version of the database that the backup files were created from.
- type: str
- source_region:
- description:
- - The region of the DB instance from which the replica is created.
- type: str
- storage_encrypted:
- description:
- - Whether the DB instance is encrypted.
- type: bool
- storage_type:
- description:
- - The storage type to be associated with the DB instance. I(storage_type) does not apply to Aurora DB instances.
- choices:
- - standard
- - gp2
- - io1
- type: str
- tags:
- description:
- - A dictionary of key value pairs to assign the DB cluster.
- type: dict
- tde_credential_arn:
- description:
- - The ARN from the key store with which to associate the instance for Transparent Data Encryption. This is
- supported by Oracle or SQL Server DB instances and may be used in conjunction with C(storage_encrypted)
- though it might slightly affect the performance of your database.
- aliases:
- - transparent_data_encryption_arn
- type: str
- tde_credential_password:
- description:
- - The password for the given ARN from the key store in order to access the device.
- aliases:
- - transparent_data_encryption_password
- type: str
- timezone:
- description:
- - The time zone of the DB instance.
- type: str
- use_latest_restorable_time:
- description:
- - Whether to restore the DB instance to the latest restorable backup time.
- - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided.
- type: bool
- aliases:
- - restore_from_latest
- vpc_security_group_ids:
- description:
- - A list of EC2 VPC security groups to associate with the DB cluster.
- type: list
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-- name: create minimal aurora instance in default VPC and default subnet group
- rds_instance:
- engine: aurora
- db_instance_identifier: ansible-test-aurora-db-instance
- instance_type: db.t2.small
- password: "{{ password }}"
- username: "{{ username }}"
- cluster_id: ansible-test-cluster # This cluster must exist - see rds_cluster to manage it
-
-- name: Create a DB instance using the default AWS KMS encryption key
- rds_instance:
- id: test-encrypted-db
- state: present
- engine: mariadb
- storage_encrypted: True
- db_instance_class: db.t2.medium
- username: "{{ username }}"
- password: "{{ password }}"
- allocated_storage: "{{ allocated_storage }}"
-
-- name: remove the DB instance without a final snapshot
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
-
-- name: remove the DB instance with a final snapshot
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- final_snapshot_identifier: "{{ snapshot_id }}"
-'''
-
-RETURN = '''
-allocated_storage:
- description: The allocated storage size in gibibytes. This is always 1 for aurora database engines.
- returned: always
- type: int
- sample: 20
-auto_minor_version_upgrade:
- description: Whether minor engine upgrades are applied automatically to the DB instance during the maintenance window.
- returned: always
- type: bool
- sample: true
-availability_zone:
- description: The availability zone for the DB instance.
- returned: always
- type: str
- sample: us-east-1f
-backup_retention_period:
- description: The number of days for which automated backups are retained.
- returned: always
- type: int
- sample: 1
-ca_certificate_identifier:
- description: The identifier of the CA certificate for the DB instance.
- returned: always
- type: str
- sample: rds-ca-2015
-copy_tags_to_snapshot:
- description: Whether tags are copied from the DB instance to snapshots of the DB instance.
- returned: always
- type: bool
- sample: false
-db_instance_arn:
- description: The Amazon Resource Name (ARN) for the DB instance.
- returned: always
- type: str
- sample: arn:aws:rds:us-east-1:123456789012:db:ansible-test
-db_instance_class:
- description: The name of the compute and memory capacity class of the DB instance.
- returned: always
- type: str
- sample: db.m4.large
-db_instance_identifier:
- description: The identifier of the DB instance
- returned: always
- type: str
- sample: ansible-test
-db_instance_port:
- description: The port that the DB instance listens on.
- returned: always
- type: int
- sample: 0
-db_instance_status:
- description: The current state of this database.
- returned: always
- type: str
- sample: stopped
-db_parameter_groups:
- description: The list of DB parameter groups applied to this DB instance.
- returned: always
- type: complex
- contains:
- db_parameter_group_name:
- description: The name of the DP parameter group.
- returned: always
- type: str
- sample: default.mariadb10.0
- parameter_apply_status:
- description: The status of parameter updates.
- returned: always
- type: str
- sample: in-sync
-db_security_groups:
- description: A list of DB security groups associated with this DB instance.
- returned: always
- type: list
- sample: []
-db_subnet_group:
- description: The subnet group associated with the DB instance.
- returned: always
- type: complex
- contains:
- db_subnet_group_description:
- description: The description of the DB subnet group.
- returned: always
- type: str
- sample: default
- db_subnet_group_name:
- description: The name of the DB subnet group.
- returned: always
- type: str
- sample: default
- subnet_group_status:
- description: The status of the DB subnet group.
- returned: always
- type: str
- sample: Complete
- subnets:
- description: A list of Subnet elements.
- returned: always
- type: complex
- contains:
- subnet_availability_zone:
- description: The availability zone of the subnet.
- returned: always
- type: complex
- contains:
- name:
- description: The name of the Availability Zone.
- returned: always
- type: str
- sample: us-east-1c
- subnet_identifier:
- description: The ID of the subnet.
- returned: always
- type: str
- sample: subnet-12345678
- subnet_status:
- description: The status of the subnet.
- returned: always
- type: str
- sample: Active
- vpc_id:
- description: The VpcId of the DB subnet group.
- returned: always
- type: str
- sample: vpc-12345678
-dbi_resource_id:
- description: The AWS Region-unique, immutable identifier for the DB instance.
- returned: always
- type: str
- sample: db-UHV3QRNWX4KB6GALCIGRML6QFA
-domain_memberships:
- description: The Active Directory Domain membership records associated with the DB instance.
- returned: always
- type: list
- sample: []
-endpoint:
- description: The connection endpoint.
- returned: always
- type: complex
- contains:
- address:
- description: The DNS address of the DB instance.
- returned: always
- type: str
- sample: ansible-test.cvlrtwiennww.us-east-1.rds.amazonaws.com
- hosted_zone_id:
- description: The ID that Amazon Route 53 assigns when you create a hosted zone.
- returned: always
- type: str
- sample: ZTR2ITUGPA61AM
- port:
- description: The port that the database engine is listening on.
- returned: always
- type: int
- sample: 3306
-engine:
- description: The database engine version.
- returned: always
- type: str
- sample: mariadb
-engine_version:
- description: The database engine version.
- returned: always
- type: str
- sample: 10.0.35
-iam_database_authentication_enabled:
- description: Whether mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled.
- returned: always
- type: bool
- sample: false
-instance_create_time:
- description: The date and time the DB instance was created.
- returned: always
- type: str
- sample: '2018-07-04T16:48:35.332000+00:00'
-kms_key_id:
- description: The AWS KMS key identifier for the encrypted DB instance when storage_encrypted is true.
- returned: When storage_encrypted is true
- type: str
- sample: arn:aws:kms:us-east-1:123456789012:key/70c45553-ad2e-4a85-9f14-cfeb47555c33
-latest_restorable_time:
- description: The latest time to which a database can be restored with point-in-time restore.
- returned: always
- type: str
- sample: '2018-07-04T16:50:50.642000+00:00'
-license_model:
- description: The License model information for this DB instance.
- returned: always
- type: str
- sample: general-public-license
-master_username:
- description: The master username for the DB instance.
- returned: always
- type: str
- sample: test
-max_allocated_storage:
- description: The upper limit to which Amazon RDS can automatically scale the storage of the DB instance.
- returned: When max allocated storage is present.
- type: int
- sample: 100
-monitoring_interval:
- description:
- - The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.
- 0 means collecting Enhanced Monitoring metrics is disabled.
- returned: always
- type: int
- sample: 0
-multi_az:
- description: Whether the DB instance is a Multi-AZ deployment.
- returned: always
- type: bool
- sample: false
-option_group_memberships:
- description: The list of option group memberships for this DB instance.
- returned: always
- type: complex
- contains:
- option_group_name:
- description: The name of the option group that the instance belongs to.
- returned: always
- type: str
- sample: default:mariadb-10-0
- status:
- description: The status of the DB instance's option group membership.
- returned: always
- type: str
- sample: in-sync
-pending_modified_values:
- description: The changes to the DB instance that are pending.
- returned: always
- type: complex
- contains: {}
-performance_insights_enabled:
- description: True if Performance Insights is enabled for the DB instance, and otherwise false.
- returned: always
- type: bool
- sample: false
-preferred_backup_window:
- description: The daily time range during which automated backups are created if automated backups are enabled.
- returned: always
- type: str
- sample: 07:01-07:31
-preferred_maintenance_window:
- description: The weekly time range (in UTC) during which system maintenance can occur.
- returned: always
- type: str
- sample: sun:09:31-sun:10:01
-publicly_accessible:
- description:
- - True for an Internet-facing instance with a publicly resolvable DNS name, False to indicate an
- internal instance with a DNS name that resolves to a private IP address.
- returned: always
- type: bool
- sample: true
-read_replica_db_instance_identifiers:
- description: Identifiers of the Read Replicas associated with this DB instance.
- returned: always
- type: list
- sample: []
-storage_encrypted:
- description: Whether the DB instance is encrypted.
- returned: always
- type: bool
- sample: false
-storage_type:
- description: The storage type to be associated with the DB instance.
- returned: always
- type: str
- sample: standard
-tags:
- description: A dictionary of tags associated with the DB instance.
- returned: always
- type: complex
- contains: {}
-vpc_security_groups:
- description: A list of VPC security group elements that the DB instance belongs to.
- returned: always
- type: complex
- contains:
- status:
- description: The status of the VPC security group.
- returned: always
- type: str
- sample: active
- vpc_security_group_id:
- description: The name of the VPC security group.
- returned: always
- type: str
- sample: sg-12345678
-'''
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters
-from ansible.module_utils.aws.rds import ensure_tags, arg_spec_to_rds_params, call_method, get_rds_method_attribute, get_tags, get_final_identifier
-from ansible.module_utils.aws.waiters import get_waiter
-from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry
-from ansible.module_utils.six import string_types
-
-from time import sleep
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError, WaiterError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def get_rds_method_attribute_name(instance, state, creation_source, read_replica):
- method_name = None
- if state == 'absent' or state == 'terminated':
- if instance and instance['DBInstanceStatus'] not in ['deleting', 'deleted']:
- method_name = 'delete_db_instance'
- else:
- if instance:
- method_name = 'modify_db_instance'
- elif read_replica is True:
- method_name = 'create_db_instance_read_replica'
- elif creation_source == 'snapshot':
- method_name = 'restore_db_instance_from_db_snapshot'
- elif creation_source == 's3':
- method_name = 'restore_db_instance_from_s3'
- elif creation_source == 'instance':
- method_name = 'restore_db_instance_to_point_in_time'
- else:
- method_name = 'create_db_instance'
- return method_name
-
-
-def get_instance(client, module, db_instance_id):
- try:
- for i in range(3):
- try:
- instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)['DBInstances'][0]
- instance['Tags'] = get_tags(client, module, instance['DBInstanceArn'])
- if instance.get('ProcessorFeatures'):
- instance['ProcessorFeatures'] = dict((feature['Name'], feature['Value']) for feature in instance['ProcessorFeatures'])
- if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'):
- instance['PendingModifiedValues']['ProcessorFeatures'] = dict(
- (feature['Name'], feature['Value'])
- for feature in instance['PendingModifiedValues']['ProcessorFeatures']
- )
- break
- except is_boto3_error_code('DBInstanceNotFound'):
- sleep(3)
- else:
- instance = {}
- except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Failed to describe DB instances')
- return instance
-
-
-def get_final_snapshot(client, module, snapshot_identifier):
- try:
- snapshots = AWSRetry.jittered_backoff()(client.describe_db_snapshots)(DBSnapshotIdentifier=snapshot_identifier)
- if len(snapshots.get('DBSnapshots', [])) == 1:
- return snapshots['DBSnapshots'][0]
- return {}
- except is_boto3_error_code('DBSnapshotNotFound') as e: # May not be using wait: True
- return {}
- except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg='Failed to retrieve information about the final snapshot')
-
-
-def get_parameters(client, module, parameters, method_name):
- if method_name == 'restore_db_instance_to_point_in_time':
- parameters['TargetDBInstanceIdentifier'] = module.params['db_instance_identifier']
-
- required_options = get_boto3_client_method_parameters(client, method_name, required=True)
- if any([parameters.get(k) is None for k in required_options]):
- module.fail_json(msg='To {0} requires the parameters: {1}'.format(
- get_rds_method_attribute(method_name, module).operation_description, required_options))
- options = get_boto3_client_method_parameters(client, method_name)
- parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None)
-
- if parameters.get('ProcessorFeatures') is not None:
- parameters['ProcessorFeatures'] = [{'Name': k, 'Value': to_text(v)} for k, v in parameters['ProcessorFeatures'].items()]
-
- # If this parameter is an empty list it can only be used with modify_db_instance (as the parameter UseDefaultProcessorFeatures)
- if parameters.get('ProcessorFeatures') == [] and not method_name == 'modify_db_instance':
- parameters.pop('ProcessorFeatures')
-
- if method_name == 'create_db_instance' and parameters.get('Tags'):
- parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags'])
- if method_name == 'modify_db_instance':
- parameters = get_options_with_changing_values(client, module, parameters)
-
- return parameters
-
-
-def get_options_with_changing_values(client, module, parameters):
- instance_id = module.params['db_instance_identifier']
- purge_cloudwatch_logs = module.params['purge_cloudwatch_logs_exports']
- force_update_password = module.params['force_update_password']
- port = module.params['port']
- apply_immediately = parameters.pop('ApplyImmediately', None)
- cloudwatch_logs_enabled = module.params['enable_cloudwatch_logs_exports']
-
- if port:
- parameters['DBPortNumber'] = port
- if not force_update_password:
- parameters.pop('MasterUserPassword', None)
- if cloudwatch_logs_enabled:
- parameters['CloudwatchLogsExportConfiguration'] = cloudwatch_logs_enabled
- if not module.params['storage_type']:
- parameters.pop('Iops', None)
-
- instance = get_instance(client, module, instance_id)
- updated_parameters = get_changing_options_with_inconsistent_keys(parameters, instance, purge_cloudwatch_logs)
- updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance))
- parameters = updated_parameters
-
- if parameters.get('NewDBInstanceIdentifier') and instance.get('PendingModifiedValues', {}).get('DBInstanceIdentifier'):
- if parameters['NewDBInstanceIdentifier'] == instance['PendingModifiedValues']['DBInstanceIdentifier'] and not apply_immediately:
- parameters.pop('NewDBInstanceIdentifier')
-
- if parameters:
- parameters['DBInstanceIdentifier'] = instance_id
- if apply_immediately is not None:
- parameters['ApplyImmediately'] = apply_immediately
-
- return parameters
-
-
-def get_current_attributes_with_inconsistent_keys(instance):
- options = {}
- if instance.get('PendingModifiedValues', {}).get('PendingCloudwatchLogsExports', {}).get('LogTypesToEnable', []):
- current_enabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToEnable']
- current_disabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToDisable']
- options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': current_enabled, 'LogTypesToDisable': current_disabled}
- else:
- options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': instance.get('EnabledCloudwatchLogsExports', []), 'LogTypesToDisable': []}
- if instance.get('PendingModifiedValues', {}).get('Port'):
- options['DBPortNumber'] = instance['PendingModifiedValues']['Port']
- else:
- options['DBPortNumber'] = instance['Endpoint']['Port']
- if instance.get('PendingModifiedValues', {}).get('DBSubnetGroupName'):
- options['DBSubnetGroupName'] = instance['PendingModifiedValues']['DBSubnetGroupName']
- else:
- options['DBSubnetGroupName'] = instance['DBSubnetGroup']['DBSubnetGroupName']
- if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'):
- options['ProcessorFeatures'] = instance['PendingModifiedValues']['ProcessorFeatures']
- else:
- options['ProcessorFeatures'] = instance.get('ProcessorFeatures', {})
- options['OptionGroupName'] = [g['OptionGroupName'] for g in instance['OptionGroupMemberships']]
- options['DBSecurityGroups'] = [sg['DBSecurityGroupName'] for sg in instance['DBSecurityGroups'] if sg['Status'] in ['adding', 'active']]
- options['VpcSecurityGroupIds'] = [sg['VpcSecurityGroupId'] for sg in instance['VpcSecurityGroups'] if sg['Status'] in ['adding', 'active']]
- options['DBParameterGroupName'] = [parameter_group['DBParameterGroupName'] for parameter_group in instance['DBParameterGroups']]
- options['AllowMajorVersionUpgrade'] = None
- options['EnableIAMDatabaseAuthentication'] = instance['IAMDatabaseAuthenticationEnabled']
- # PerformanceInsightsEnabled is not returned on older RDS instances it seems
- options['EnablePerformanceInsights'] = instance.get('PerformanceInsightsEnabled', False)
- options['MasterUserPassword'] = None
- options['NewDBInstanceIdentifier'] = instance['DBInstanceIdentifier']
-
- return options
-
-
-def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs):
- changing_params = {}
- current_options = get_current_attributes_with_inconsistent_keys(instance)
-
- if current_options.get("MaxAllocatedStorage") is None:
- current_options["MaxAllocatedStorage"] = None
-
- for option in current_options:
- current_option = current_options[option]
- desired_option = modify_params.pop(option, None)
- if desired_option is None:
- continue
-
- # TODO: allow other purge_option module parameters rather than just checking for things to add
- if isinstance(current_option, list):
- if isinstance(desired_option, list):
- if set(desired_option) <= set(current_option):
- continue
- elif isinstance(desired_option, string_types):
- if desired_option in current_option:
- continue
-
- if current_option == desired_option:
- continue
-
- if option == 'ProcessorFeatures' and desired_option == []:
- changing_params['UseDefaultProcessorFeatures'] = True
- elif option == 'CloudwatchLogsExportConfiguration':
- current_option = set(current_option.get('LogTypesToEnable', []))
- desired_option = set(desired_option)
- format_option = {'EnableLogTypes': [], 'DisableLogTypes': []}
- format_option['EnableLogTypes'] = list(desired_option.difference(current_option))
- if purge_cloudwatch_logs:
- format_option['DisableLogTypes'] = list(current_option.difference(desired_option))
- if format_option['EnableLogTypes'] or format_option['DisableLogTypes']:
- changing_params[option] = format_option
- else:
- changing_params[option] = desired_option
-
- return changing_params
-
-
-def get_changing_options_with_consistent_keys(modify_params, instance):
- inconsistent_parameters = list(modify_params.keys())
- changing_params = {}
-
- for param in modify_params:
- current_option = instance.get('PendingModifiedValues', {}).get(param)
- if current_option is None:
- current_option = instance[param]
- if modify_params[param] != current_option:
- changing_params[param] = modify_params[param]
-
- return changing_params
-
-
-def validate_options(client, module, instance):
- state = module.params['state']
- skip_final_snapshot = module.params['skip_final_snapshot']
- snapshot_id = module.params['final_db_snapshot_identifier']
- modified_id = module.params['new_db_instance_identifier']
- engine = module.params['engine']
- tde_options = bool(module.params['tde_credential_password'] or module.params['tde_credential_arn'])
- read_replica = module.params['read_replica']
- creation_source = module.params['creation_source']
- source_instance = module.params['source_db_instance_identifier']
- if module.params['source_region'] is not None:
- same_region = bool(module.params['source_region'] == module.params['region'])
- else:
- same_region = True
-
- if modified_id:
- modified_instance = get_instance(client, module, modified_id)
- else:
- modified_instance = {}
-
- if modified_id and instance and modified_instance:
- module.fail_json(msg='A new instance ID {0} was provided but it already exists'.format(modified_id))
- if modified_id and not instance and modified_instance:
- module.fail_json(msg='A new instance ID {0} was provided but the instance to be renamed does not exist'.format(modified_id))
- if state in ('absent', 'terminated') and instance and not skip_final_snapshot and snapshot_id is None:
- module.fail_json(msg='skip_final_snapshot is false but all of the following are missing: final_db_snapshot_identifier')
- if engine is not None and not (engine.startswith('mysql') or engine.startswith('oracle')) and tde_options:
- module.fail_json(msg='TDE is available for MySQL and Oracle DB instances')
- if read_replica is True and not instance and creation_source not in [None, 'instance']:
- module.fail_json(msg='Cannot create a read replica from {0}. You must use a source DB instance'.format(creation_source))
- if read_replica is True and not instance and not source_instance:
- module.fail_json(msg='read_replica is true and the instance does not exist yet but all of the following are missing: source_db_instance_identifier')
-
-
-def update_instance(client, module, instance, instance_id):
- changed = False
-
- # Get newly created DB instance
- if not instance:
- instance = get_instance(client, module, instance_id)
-
- # Check tagging/promoting/rebooting/starting/stopping instance
- changed |= ensure_tags(
- client, module, instance['DBInstanceArn'], instance['Tags'], module.params['tags'], module.params['purge_tags']
- )
- changed |= promote_replication_instance(client, module, instance, module.params['read_replica'])
- changed |= update_instance_state(client, module, instance, module.params['state'])
-
- return changed
-
-
-def promote_replication_instance(client, module, instance, read_replica):
- changed = False
- if read_replica is False:
- changed = bool(instance.get('ReadReplicaSourceDBInstanceIdentifier') or instance.get('StatusInfos'))
- if changed:
- try:
- call_method(client, module, method_name='promote_read_replica', parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']})
- changed = True
- except is_boto3_error_code('InvalidDBInstanceState') as e:
- if 'DB Instance is not a read replica' in e.response['Error']['Message']:
- pass
- else:
- raise e
- return changed
-
-
-def update_instance_state(client, module, instance, state):
- changed = False
- if state in ['rebooted', 'restarted']:
- changed |= reboot_running_db_instance(client, module, instance)
- if state in ['started', 'running', 'stopped']:
- changed |= start_or_stop_instance(client, module, instance, state)
- return changed
-
-
-def reboot_running_db_instance(client, module, instance):
- parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']}
- if instance['DBInstanceStatus'] in ['stopped', 'stopping']:
- call_method(client, module, 'start_db_instance', parameters)
- if module.params.get('force_failover') is not None:
- parameters['ForceFailover'] = module.params['force_failover']
- results, changed = call_method(client, module, 'reboot_db_instance', parameters)
- return changed
-
-
-def start_or_stop_instance(client, module, instance, state):
- changed = False
- parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']}
- if state == 'stopped' and instance['DBInstanceStatus'] not in ['stopping', 'stopped']:
- if module.params['db_snapshot_identifier']:
- parameters['DBSnapshotIdentifier'] = module.params['db_snapshot_identifier']
- result, changed = call_method(client, module, 'stop_db_instance', parameters)
- elif state == 'started' and instance['DBInstanceStatus'] not in ['available', 'starting', 'restarting']:
- result, changed = call_method(client, module, 'start_db_instance', parameters)
- return changed
-
-
-def main():
- arg_spec = dict(
- state=dict(choices=['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'], default='present'),
- creation_source=dict(choices=['snapshot', 's3', 'instance']),
- force_update_password=dict(type='bool', default=False),
- purge_cloudwatch_logs_exports=dict(type='bool', default=True),
- purge_tags=dict(type='bool', default=True),
- read_replica=dict(type='bool'),
- wait=dict(type='bool', default=True),
- )
-
- parameter_options = dict(
- allocated_storage=dict(type='int'),
- allow_major_version_upgrade=dict(type='bool'),
- apply_immediately=dict(type='bool', default=False),
- auto_minor_version_upgrade=dict(type='bool'),
- availability_zone=dict(aliases=['az', 'zone']),
- backup_retention_period=dict(type='int'),
- ca_certificate_identifier=dict(),
- character_set_name=dict(),
- copy_tags_to_snapshot=dict(type='bool'),
- db_cluster_identifier=dict(aliases=['cluster_id']),
- db_instance_class=dict(aliases=['class', 'instance_type']),
- db_instance_identifier=dict(required=True, aliases=['instance_id', 'id']),
- db_name=dict(),
- db_parameter_group_name=dict(),
- db_security_groups=dict(type='list'),
- db_snapshot_identifier=dict(),
- db_subnet_group_name=dict(aliases=['subnet_group']),
- domain=dict(),
- domain_iam_role_name=dict(),
- enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports']),
- enable_iam_database_authentication=dict(type='bool'),
- enable_performance_insights=dict(type='bool'),
- engine=dict(),
- engine_version=dict(),
- final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']),
- force_failover=dict(type='bool'),
- iops=dict(type='int'),
- kms_key_id=dict(),
- license_model=dict(),
- master_user_password=dict(aliases=['password'], no_log=True),
- master_username=dict(aliases=['username']),
- max_allocated_storage=dict(type='int'),
- monitoring_interval=dict(type='int'),
- monitoring_role_arn=dict(),
- multi_az=dict(type='bool'),
- new_db_instance_identifier=dict(aliases=['new_instance_id', 'new_id']),
- option_group_name=dict(),
- performance_insights_kms_key_id=dict(),
- performance_insights_retention_period=dict(type='int'),
- port=dict(type='int'),
- preferred_backup_window=dict(aliases=['backup_window']),
- preferred_maintenance_window=dict(aliases=['maintenance_window']),
- processor_features=dict(type='dict'),
- promotion_tier=dict(),
- publicly_accessible=dict(type='bool'),
- restore_time=dict(),
- s3_bucket_name=dict(),
- s3_ingestion_role_arn=dict(),
- s3_prefix=dict(),
- skip_final_snapshot=dict(type='bool', default=False),
- snapshot_identifier=dict(),
- source_db_instance_identifier=dict(),
- source_engine=dict(choices=['mysql']),
- source_engine_version=dict(),
- source_region=dict(),
- storage_encrypted=dict(type='bool'),
- storage_type=dict(choices=['standard', 'gp2', 'io1']),
- tags=dict(type='dict'),
- tde_credential_arn=dict(aliases=['transparent_data_encryption_arn']),
- tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']),
- timezone=dict(),
- use_latest_restorable_time=dict(type='bool', aliases=['restore_from_latest']),
- vpc_security_group_ids=dict(type='list')
- )
- arg_spec.update(parameter_options)
-
- required_if = [
- ('engine', 'aurora', ('db_cluster_identifier',)),
- ('engine', 'aurora-mysql', ('db_cluster_identifier',)),
- ('engine', 'aurora-postresql', ('db_cluster_identifier',)),
- ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')),
- ('creation_source', 's3', (
- 's3_bucket_name', 'engine', 'master_username', 'master_user_password',
- 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')),
- ]
- mutually_exclusive = [
- ('s3_bucket_name', 'source_db_instance_identifier', 'snapshot_identifier'),
- ('use_latest_restorable_time', 'restore_time'),
- ('availability_zone', 'multi_az'),
- ]
-
- module = AnsibleAWSModule(
- argument_spec=arg_spec,
- required_if=required_if,
- mutually_exclusive=mutually_exclusive,
- supports_check_mode=True
- )
-
- if not module.boto3_at_least('1.5.0'):
- module.fail_json(msg="rds_instance requires boto3 > 1.5.0")
-
- # Sanitize instance identifiers
- module.params['db_instance_identifier'] = module.params['db_instance_identifier'].lower()
- if module.params['new_db_instance_identifier']:
- module.params['new_db_instance_identifier'] = module.params['new_db_instance_identifier'].lower()
-
- # Sanitize processor features
- if module.params['processor_features'] is not None:
- module.params['processor_features'] = dict((k, to_text(v)) for k, v in module.params['processor_features'].items())
-
- client = module.client('rds')
- changed = False
- state = module.params['state']
- instance_id = module.params['db_instance_identifier']
- instance = get_instance(client, module, instance_id)
- validate_options(client, module, instance)
- method_name = get_rds_method_attribute_name(instance, state, module.params['creation_source'], module.params['read_replica'])
-
- if method_name:
- raw_parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options))
- parameters = get_parameters(client, module, raw_parameters, method_name)
-
- if parameters:
- result, changed = call_method(client, module, method_name, parameters)
-
- instance_id = get_final_identifier(method_name, module)
-
- # Check tagging/promoting/rebooting/starting/stopping instance
- if state != 'absent' and (not module.check_mode or instance):
- changed |= update_instance(client, module, instance, instance_id)
-
- if changed:
- instance = get_instance(client, module, instance_id)
- if state != 'absent' and (instance or not module.check_mode):
- for attempt_to_wait in range(0, 10):
- instance = get_instance(client, module, instance_id)
- if instance:
- break
- else:
- sleep(5)
-
- if state == 'absent' and changed and not module.params['skip_final_snapshot']:
- instance.update(FinalSnapshot=get_final_snapshot(client, module, module.params['final_db_snapshot_identifier']))
-
- pending_processor_features = None
- if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'):
- pending_processor_features = instance['PendingModifiedValues'].pop('ProcessorFeatures')
- instance = camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])
- if pending_processor_features is not None:
- instance['pending_modified_values']['processor_features'] = pending_processor_features
-
- module.exit_json(changed=changed, **instance)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/rds_instance_info.py b/lib/ansible/modules/cloud/amazon/rds_instance_info.py
deleted file mode 100644
index 082fc84d35..0000000000
--- a/lib/ansible/modules/cloud/amazon/rds_instance_info.py
+++ /dev/null
@@ -1,407 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2017, 2018 Michael De La Rue
-# Copyright (c) 2017, 2018 Will Thames
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'status': ['preview'],
- 'supported_by': 'community',
- 'metadata_version': '1.1'}
-
-DOCUMENTATION = '''
----
-module: rds_instance_info
-version_added: "2.6"
-short_description: obtain information about one or more RDS instances
-description:
- - Obtain information about one or more RDS instances.
- - This module was called C(rds_instance_facts) before Ansible 2.9. The usage did not change.
-options:
- db_instance_identifier:
- description:
- - The RDS instance's unique identifier.
- required: false
- aliases:
- - id
- type: str
- filters:
- description:
- - A filter that specifies one or more DB instances to describe.
- See U(https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html)
- type: dict
-requirements:
- - "python >= 2.7"
- - "boto3"
-author:
- - "Will Thames (@willthames)"
- - "Michael De La Rue (@mikedlr)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Get information about an instance
-- rds_instance_info:
- db_instance_identifier: new-database
- register: new_database_info
-
-# Get all RDS instances
-- rds_instance_info:
-'''
-
-RETURN = '''
-instances:
- description: List of RDS instances
- returned: always
- type: complex
- contains:
- allocated_storage:
- description: Gigabytes of storage allocated to the database
- returned: always
- type: int
- sample: 10
- auto_minor_version_upgrade:
- description: Whether minor version upgrades happen automatically
- returned: always
- type: bool
- sample: true
- availability_zone:
- description: Availability Zone in which the database resides
- returned: always
- type: str
- sample: us-west-2b
- backup_retention_period:
- description: Days for which backups are retained
- returned: always
- type: int
- sample: 7
- ca_certificate_identifier:
- description: ID for the CA certificate
- returned: always
- type: str
- sample: rds-ca-2015
- copy_tags_to_snapshot:
- description: Whether DB tags should be copied to the snapshot
- returned: always
- type: bool
- sample: false
- db_instance_arn:
- description: ARN of the database instance
- returned: always
- type: str
- sample: arn:aws:rds:us-west-2:111111111111:db:helloworld-rds
- db_instance_class:
- description: Instance class of the database instance
- returned: always
- type: str
- sample: db.t2.small
- db_instance_identifier:
- description: Database instance identifier
- returned: always
- type: str
- sample: helloworld-rds
- db_instance_port:
- description: Port used by the database instance
- returned: always
- type: int
- sample: 0
- db_instance_status:
- description: Status of the database instance
- returned: always
- type: str
- sample: available
- db_name:
- description: Name of the database
- returned: always
- type: str
- sample: management
- db_parameter_groups:
- description: List of database parameter groups
- returned: always
- type: complex
- contains:
- db_parameter_group_name:
- description: Name of the database parameter group
- returned: always
- type: str
- sample: psql-pg-helloworld
- parameter_apply_status:
- description: Whether the parameter group has been applied
- returned: always
- type: str
- sample: in-sync
- db_security_groups:
- description: List of security groups used by the database instance
- returned: always
- type: list
- sample: []
- db_subnet_group:
- description: list of subnet groups
- returned: always
- type: complex
- contains:
- db_subnet_group_description:
- description: Description of the DB subnet group
- returned: always
- type: str
- sample: My database subnet group
- db_subnet_group_name:
- description: Name of the database subnet group
- returned: always
- type: str
- sample: my-subnet-group
- subnet_group_status:
- description: Subnet group status
- returned: always
- type: str
- sample: Complete
- subnets:
- description: List of subnets in the subnet group
- returned: always
- type: complex
- contains:
- subnet_availability_zone:
- description: Availability zone of the subnet
- returned: always
- type: complex
- contains:
- name:
- description: Name of the availability zone
- returned: always
- type: str
- sample: us-west-2c
- subnet_identifier:
- description: Subnet ID
- returned: always
- type: str
- sample: subnet-abcd1234
- subnet_status:
- description: Subnet status
- returned: always
- type: str
- sample: Active
- vpc_id:
- description: VPC id of the subnet group
- returned: always
- type: str
- sample: vpc-abcd1234
- dbi_resource_id:
- description: AWS Region-unique, immutable identifier for the DB instance
- returned: always
- type: str
- sample: db-AAAAAAAAAAAAAAAAAAAAAAAAAA
- domain_memberships:
- description: List of domain memberships
- returned: always
- type: list
- sample: []
- endpoint:
- description: Database endpoint
- returned: always
- type: complex
- contains:
- address:
- description: Database endpoint address
- returned: always
- type: str
- sample: helloworld-rds.ctrqpe3so1sf.us-west-2.rds.amazonaws.com
- hosted_zone_id:
- description: Route53 hosted zone ID
- returned: always
- type: str
- sample: Z1PABCD0000000
- port:
- description: Database endpoint port
- returned: always
- type: int
- sample: 5432
- engine:
- description: Database engine
- returned: always
- type: str
- sample: postgres
- engine_version:
- description: Database engine version
- returned: always
- type: str
- sample: 9.5.10
- iam_database_authentication_enabled:
- description: Whether database authentication through IAM is enabled
- returned: always
- type: bool
- sample: false
- instance_create_time:
- description: Date and time the instance was created
- returned: always
- type: str
- sample: '2017-10-10T04:00:07.434000+00:00'
- kms_key_id:
- description: KMS Key ID
- returned: always
- type: str
- sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-0000-abcd-1111-0123456789ab
- latest_restorable_time:
- description: Latest time to which a database can be restored with point-in-time restore
- returned: always
- type: str
- sample: '2018-05-17T00:03:56+00:00'
- license_model:
- description: License model
- returned: always
- type: str
- sample: postgresql-license
- master_username:
- description: Database master username
- returned: always
- type: str
- sample: dbadmin
- monitoring_interval:
- description: Interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance
- returned: always
- type: int
- sample: 0
- multi_az:
- description: Whether Multi-AZ is on
- returned: always
- type: bool
- sample: false
- option_group_memberships:
- description: List of option groups
- returned: always
- type: complex
- contains:
- option_group_name:
- description: Option group name
- returned: always
- type: str
- sample: default:postgres-9-5
- status:
- description: Status of option group
- returned: always
- type: str
- sample: in-sync
- pending_modified_values:
- description: Modified values pending application
- returned: always
- type: complex
- contains: {}
- performance_insights_enabled:
- description: Whether performance insights are enabled
- returned: always
- type: bool
- sample: false
- preferred_backup_window:
- description: Preferred backup window
- returned: always
- type: str
- sample: 04:00-05:00
- preferred_maintenance_window:
- description: Preferred maintenance window
- returned: always
- type: str
- sample: mon:05:00-mon:05:30
- publicly_accessible:
- description: Whether the DB is publicly accessible
- returned: always
- type: bool
- sample: false
- read_replica_db_instance_identifiers:
- description: List of database instance read replicas
- returned: always
- type: list
- sample: []
- storage_encrypted:
- description: Whether the storage is encrypted
- returned: always
- type: bool
- sample: true
- storage_type:
- description: Storage type of the Database instance
- returned: always
- type: str
- sample: gp2
- tags:
- description: Tags used by the database instance
- returned: always
- type: complex
- contains: {}
- vpc_security_groups:
- description: List of VPC security groups
- returned: always
- type: complex
- contains:
- status:
- description: Status of the VPC security group
- returned: always
- type: str
- sample: active
- vpc_security_group_id:
- description: VPC Security Group ID
- returned: always
- type: str
- sample: sg-abcd1234
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, AWSRetry, camel_dict_to_snake_dict
-
-
-try:
- import botocore
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-
-def instance_info(module, conn):
- instance_name = module.params.get('db_instance_identifier')
- filters = module.params.get('filters')
-
- params = dict()
- if instance_name:
- params['DBInstanceIdentifier'] = instance_name
- if filters:
- params['Filters'] = ansible_dict_to_boto3_filter_list(filters)
-
- paginator = conn.get_paginator('describe_db_instances')
- try:
- results = paginator.paginate(**params).build_full_result()['DBInstances']
- except is_boto3_error_code('DBInstanceNotFound'):
- results = []
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, "Couldn't get instance information")
-
- for instance in results:
- try:
- instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'],
- aws_retry=True)['TagList'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier'])
-
- return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results])
-
-
-def main():
- argument_spec = dict(
- db_instance_identifier=dict(aliases=['id']),
- filters=dict(type='dict')
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
- if module._name == 'rds_instance_facts':
- module.deprecate("The 'rds_instance_facts' module has been renamed to 'rds_instance_info'", version='2.13')
-
- conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
-
- module.exit_json(**instance_info(module, conn))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/rds_param_group.py b/lib/ansible/modules/cloud/amazon/rds_param_group.py
deleted file mode 100644
index 973fe20f91..0000000000
--- a/lib/ansible/modules/cloud/amazon/rds_param_group.py
+++ /dev/null
@@ -1,356 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: rds_param_group
-version_added: "1.5"
-short_description: manage RDS parameter groups
-description:
- - Creates, modifies, and deletes RDS parameter groups.
-requirements: [ boto3 ]
-options:
- state:
- description:
- - Specifies whether the group should be present or absent.
- required: true
- choices: [ 'present' , 'absent' ]
- type: str
- name:
- description:
- - Database parameter group identifier.
- required: true
- type: str
- description:
- description:
- - Database parameter group description. Only set when a new group is added.
- type: str
- engine:
- description:
- - The type of database for this group.
- - Please use following command to get list of all supported db engines and their respective versions.
- - '# aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily"'
- - Required for I(state=present).
- type: str
- immediate:
- description:
- - Whether to apply the changes immediately, or after the next reboot of any associated instances.
- aliases:
- - apply_immediately
- type: bool
- params:
- description:
- - Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3),
- or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group.
- aliases: [parameters]
- type: dict
- tags:
- description:
- - Dictionary of tags to attach to the parameter group.
- version_added: "2.4"
- type: dict
- purge_tags:
- description:
- - Whether or not to remove tags that do not appear in the M(tags) list.
- version_added: "2.4"
- type: bool
- default: False
-author:
- - "Scott Anderson (@tastychutney)"
- - "Will Thames (@willthames)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024
-- rds_param_group:
- state: present
- name: norwegian-blue
- description: 'My Fancy Ex Parrot Group'
- engine: 'mysql5.6'
- params:
- auto_increment_increment: "42K"
- tags:
- Environment: production
- Application: parrot
-
-# Remove a parameter group
-- rds_param_group:
- state: absent
- name: norwegian-blue
-'''
-
-RETURN = '''
-db_parameter_group_name:
- description: Name of DB parameter group
- type: str
- returned: when state is present
-db_parameter_group_family:
- description: DB parameter group family that this DB parameter group is compatible with.
- type: str
- returned: when state is present
-db_parameter_group_arn:
- description: ARN of the DB parameter group
- type: str
- returned: when state is present
-description:
- description: description of the DB parameter group
- type: str
- returned: when state is present
-errors:
- description: list of errors from attempting to modify parameters that are not modifiable
- type: list
- returned: when state is present
-tags:
- description: dictionary of tags
- type: dict
- returned: when state is present
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, HAS_BOTO3, compare_aws_tags
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
-from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
-from ansible.module_utils.six import string_types
-from ansible.module_utils._text import to_native
-
-import traceback
-
-try:
- import botocore
-except ImportError:
- pass # caught by imported HAS_BOTO3
-
-INT_MODIFIERS = {
- 'K': 1024,
- 'M': pow(1024, 2),
- 'G': pow(1024, 3),
- 'T': pow(1024, 4),
-}
-
-
-def convert_parameter(param, value):
- """
- Allows setting parameters with 10M = 10* 1024 * 1024 and so on.
- """
- converted_value = value
-
- if param['DataType'] == 'integer':
- if isinstance(value, string_types):
- try:
- for modifier in INT_MODIFIERS.keys():
- if value.endswith(modifier):
- converted_value = int(value[:-1]) * INT_MODIFIERS[modifier]
- except ValueError:
- # may be based on a variable (ie. {foo*3/4}) so
- # just pass it on through to boto
- pass
- elif isinstance(value, bool):
- converted_value = 1 if value else 0
-
- elif param['DataType'] == 'boolean':
- if isinstance(value, string_types):
- converted_value = to_native(value) in BOOLEANS_TRUE
- # convert True/False to 1/0
- converted_value = 1 if converted_value else 0
- return str(converted_value)
-
-
-def update_parameters(module, connection):
- groupname = module.params['name']
- desired = module.params['params']
- apply_method = 'immediate' if module.params['immediate'] else 'pending-reboot'
- errors = []
- modify_list = []
- parameters_paginator = connection.get_paginator('describe_db_parameters')
- existing = parameters_paginator.paginate(DBParameterGroupName=groupname).build_full_result()['Parameters']
- lookup = dict((param['ParameterName'], param) for param in existing)
- for param_key, param_value in desired.items():
- if param_key not in lookup:
- errors.append("Parameter %s is not an available parameter for the %s engine" %
- (param_key, module.params.get('engine')))
- else:
- converted_value = convert_parameter(lookup[param_key], param_value)
- # engine-default parameters do not have a ParameterValue, so we'll always override those.
- if converted_value != lookup[param_key].get('ParameterValue'):
- if lookup[param_key]['IsModifiable']:
- modify_list.append(dict(ParameterValue=converted_value, ParameterName=param_key, ApplyMethod=apply_method))
- else:
- errors.append("Parameter %s is not modifiable" % param_key)
-
- # modify_db_parameters takes at most 20 parameters
- if modify_list:
- try:
- from itertools import izip_longest as zip_longest # python 2
- except ImportError:
- from itertools import zip_longest # python 3
- for modify_slice in zip_longest(*[iter(modify_list)] * 20, fillvalue=None):
- non_empty_slice = [item for item in modify_slice if item]
- try:
- connection.modify_db_parameter_group(DBParameterGroupName=groupname, Parameters=non_empty_slice)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't update parameters: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- return True, errors
- return False, errors
-
-
-def update_tags(module, connection, group, tags):
- changed = False
- existing_tags = connection.list_tags_for_resource(ResourceName=group['DBParameterGroupArn'])['TagList']
- to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags),
- tags, module.params['purge_tags'])
- if to_update:
- try:
- connection.add_tags_to_resource(ResourceName=group['DBParameterGroupArn'],
- Tags=ansible_dict_to_boto3_tag_list(to_update))
- changed = True
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't add tags to parameter group: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- except botocore.exceptions.ParamValidationError as e:
- # Usually a tag value has been passed as an int or bool, needs to be a string
- # The AWS exception message is reasonably ok for this purpose
- module.fail_json(msg="Couldn't add tags to parameter group: %s." % str(e),
- exception=traceback.format_exc())
- if to_delete:
- try:
- connection.remove_tags_from_resource(ResourceName=group['DBParameterGroupArn'],
- TagKeys=to_delete)
- changed = True
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't remove tags from parameter group: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- return changed
-
-
-def ensure_present(module, connection):
- groupname = module.params['name']
- tags = module.params.get('tags')
- changed = False
- errors = []
- try:
- response = connection.describe_db_parameter_groups(DBParameterGroupName=groupname)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'DBParameterGroupNotFound':
- response = None
- else:
- module.fail_json(msg="Couldn't access parameter group information: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- if not response:
- params = dict(DBParameterGroupName=groupname,
- DBParameterGroupFamily=module.params['engine'],
- Description=module.params['description'])
- if tags:
- params['Tags'] = ansible_dict_to_boto3_tag_list(tags)
- try:
- response = connection.create_db_parameter_group(**params)
- changed = True
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't create parameter group: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- else:
- group = response['DBParameterGroups'][0]
- if tags:
- changed = update_tags(module, connection, group, tags)
-
- if module.params.get('params'):
- params_changed, errors = update_parameters(module, connection)
- changed = changed or params_changed
-
- try:
- response = connection.describe_db_parameter_groups(DBParameterGroupName=groupname)
- group = camel_dict_to_snake_dict(response['DBParameterGroups'][0])
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't obtain parameter group information: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- try:
- tags = connection.list_tags_for_resource(ResourceName=group['db_parameter_group_arn'])['TagList']
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't obtain parameter group tags: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- group['tags'] = boto3_tag_list_to_ansible_dict(tags)
-
- module.exit_json(changed=changed, errors=errors, **group)
-
-
-def ensure_absent(module, connection):
- group = module.params['name']
- try:
- response = connection.describe_db_parameter_groups(DBParameterGroupName=group)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'DBParameterGroupNotFound':
- module.exit_json(changed=False)
- else:
- module.fail_json(msg="Couldn't access parameter group information: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
- try:
- response = connection.delete_db_parameter_group(DBParameterGroupName=group)
- module.exit_json(changed=True)
- except botocore.exceptions.ClientError as e:
- module.fail_json(msg="Couldn't delete parameter group: %s" % str(e),
- exception=traceback.format_exc(),
- **camel_dict_to_snake_dict(e.response))
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- state=dict(required=True, choices=['present', 'absent']),
- name=dict(required=True),
- engine=dict(),
- description=dict(),
- params=dict(aliases=['parameters'], type='dict'),
- immediate=dict(type='bool', aliases=['apply_immediately']),
- tags=dict(type='dict', default={}),
- purge_tags=dict(type='bool', default=False)
- )
- )
- module = AnsibleModule(argument_spec=argument_spec,
- required_if=[['state', 'present', ['description', 'engine']]])
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 and botocore are required for this module')
-
- # Retrieve any AWS settings from the environment.
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
-
- if not region:
- module.fail_json(msg="Region must be present")
-
- try:
- conn = boto3_conn(module, conn_type='client', resource='rds', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- except botocore.exceptions.NoCredentialsError as e:
- module.fail_json(msg="Couldn't connect to AWS: %s" % str(e))
-
- state = module.params.get('state')
- if state == 'present':
- ensure_present(module, conn)
- if state == 'absent':
- ensure_absent(module, conn)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/rds_snapshot.py b/lib/ansible/modules/cloud/amazon/rds_snapshot.py
deleted file mode 100644
index 5df2808fee..0000000000
--- a/lib/ansible/modules/cloud/amazon/rds_snapshot.py
+++ /dev/null
@@ -1,352 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2014 Ansible Project
-# Copyright (c) 2017, 2018, 2019 Will Thames
-# Copyright (c) 2017, 2018 Michael De La Rue
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'status': ['preview'],
- 'supported_by': 'community',
- 'metadata_version': '1.1'}
-
-DOCUMENTATION = '''
----
-module: rds_snapshot
-version_added: "2.9"
-short_description: manage Amazon RDS snapshots.
-description:
- - Creates or deletes RDS snapshots.
-options:
- state:
- description:
- - Specify the desired state of the snapshot.
- default: present
- choices: [ 'present', 'absent']
- type: str
- db_snapshot_identifier:
- description:
- - The snapshot to manage.
- required: true
- aliases:
- - id
- - snapshot_id
- type: str
- db_instance_identifier:
- description:
- - Database instance identifier. Required when state is present.
- aliases:
- - instance_id
- type: str
- wait:
- description:
- - Whether or not to wait for snapshot creation or deletion.
- type: bool
- default: 'no'
- wait_timeout:
- description:
- - how long before wait gives up, in seconds.
- default: 300
- type: int
- tags:
- description:
- - tags dict to apply to a snapshot.
- type: dict
- purge_tags:
- description:
- - whether to remove tags not present in the C(tags) parameter.
- default: True
- type: bool
-requirements:
- - "python >= 2.6"
- - "boto3"
-author:
- - "Will Thames (@willthames)"
- - "Michael De La Rue (@mikedlr)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Create snapshot
-- rds_snapshot:
- db_instance_identifier: new-database
- db_snapshot_identifier: new-database-snapshot
-
-# Delete snapshot
-- rds_snapshot:
- db_snapshot_identifier: new-database-snapshot
- state: absent
-'''
-
-RETURN = '''
-allocated_storage:
- description: How much storage is allocated in GB.
- returned: always
- type: int
- sample: 20
-availability_zone:
- description: Availability zone of the database from which the snapshot was created.
- returned: always
- type: str
- sample: us-west-2a
-db_instance_identifier:
- description: Database from which the snapshot was created.
- returned: always
- type: str
- sample: ansible-test-16638696
-db_snapshot_arn:
- description: Amazon Resource Name for the snapshot.
- returned: always
- type: str
- sample: arn:aws:rds:us-west-2:123456789012:snapshot:ansible-test-16638696-test-snapshot
-db_snapshot_identifier:
- description: Name of the snapshot.
- returned: always
- type: str
- sample: ansible-test-16638696-test-snapshot
-dbi_resource_id:
- description: The identifier for the source DB instance, which can't be changed and which is unique to an AWS Region.
- returned: always
- type: str
- sample: db-MM4P2U35RQRAMWD3QDOXWPZP4U
-encrypted:
- description: Whether the snapshot is encrypted.
- returned: always
- type: bool
- sample: false
-engine:
- description: Engine of the database from which the snapshot was created.
- returned: always
- type: str
- sample: mariadb
-engine_version:
- description: Version of the database from which the snapshot was created.
- returned: always
- type: str
- sample: 10.2.21
-iam_database_authentication_enabled:
- description: Whether IAM database authentication is enabled.
- returned: always
- type: bool
- sample: false
-instance_create_time:
- description: Creation time of the instance from which the snapshot was created.
- returned: always
- type: str
- sample: '2019-06-15T10:15:56.221000+00:00'
-license_model:
- description: License model of the database.
- returned: always
- type: str
- sample: general-public-license
-master_username:
- description: Master username of the database.
- returned: always
- type: str
- sample: test
-option_group_name:
- description: Option group of the database.
- returned: always
- type: str
- sample: default:mariadb-10-2
-percent_progress:
- description: How much progress has been made taking the snapshot. Will be 100 for an available snapshot.
- returned: always
- type: int
- sample: 100
-port:
- description: Port on which the database is listening.
- returned: always
- type: int
- sample: 3306
-processor_features:
- description: List of processor features of the database.
- returned: always
- type: list
- sample: []
-snapshot_create_time:
- description: Creation time of the snapshot.
- returned: always
- type: str
- sample: '2019-06-15T10:46:23.776000+00:00'
-snapshot_type:
- description: How the snapshot was created (always manual for this module!).
- returned: always
- type: str
- sample: manual
-status:
- description: Status of the snapshot.
- returned: always
- type: str
- sample: available
-storage_type:
- description: Storage type of the database.
- returned: always
- type: str
- sample: gp2
-tags:
- description: Tags applied to the snapshot.
- returned: always
- type: complex
- contains: {}
-vpc_id:
- description: ID of the VPC in which the DB lives.
- returned: always
- type: str
- sample: vpc-09ff232e222710ae0
-'''
-
-try:
- import botocore
-except ImportError:
- pass # protected by AnsibleAWSModule
-
-# import module snippets
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, compare_aws_tags
-from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
-
-
-def get_snapshot(client, module, snapshot_id):
- try:
- response = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id)
- except client.exceptions.DBSnapshotNotFoundFault:
- return None
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id))
- return response['DBSnapshots'][0]
-
-
-def snapshot_to_facts(client, module, snapshot):
- try:
- snapshot['Tags'] = boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn'],
- aws_retry=True)['TagList'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['DBSnapshotIdentifier'])
- except KeyError:
- module.fail_json(msg=str(snapshot))
-
- return camel_dict_to_snake_dict(snapshot, ignore_list=['Tags'])
-
-
-def wait_for_snapshot_status(client, module, db_snapshot_id, waiter_name):
- if not module.params['wait']:
- return
- timeout = module.params['wait_timeout']
- try:
- client.get_waiter(waiter_name).wait(DBSnapshotIdentifier=db_snapshot_id,
- WaiterConfig=dict(
- Delay=5,
- MaxAttempts=int((timeout + 2.5) / 5)
- ))
- except botocore.exceptions.WaiterError as e:
- if waiter_name == 'db_snapshot_deleted':
- msg = "Failed to wait for DB snapshot {0} to be deleted".format(db_snapshot_id)
- else:
- msg = "Failed to wait for DB snapshot {0} to be available".format(db_snapshot_id)
- module.fail_json_aws(e, msg=msg)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_snapshot_id))
-
-
-def ensure_snapshot_absent(client, module):
- snapshot_name = module.params.get('db_snapshot_identifier')
- changed = False
-
- snapshot = get_snapshot(client, module, snapshot_name)
- if snapshot and snapshot['Status'] != 'deleting':
- try:
- client.delete_db_snapshot(DBSnapshotIdentifier=snapshot_name)
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="trying to delete snapshot")
-
- # If we're not waiting for a delete to complete then we're all done
- # so just return
- if not snapshot or not module.params.get('wait'):
- return dict(changed=changed)
- try:
- wait_for_snapshot_status(client, module, snapshot_name, 'db_snapshot_deleted')
- return dict(changed=changed)
- except client.exceptions.DBSnapshotNotFoundFault:
- return dict(changed=changed)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "awaiting snapshot deletion")
-
-
-def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
- if tags is None:
- return False
- tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags)
- changed = bool(tags_to_add or tags_to_remove)
- if tags_to_add:
- try:
- client.add_tags_to_resource(ResourceName=resource_arn, Tags=ansible_dict_to_boto3_tag_list(tags_to_add))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Couldn't add tags to snapshot {0}".format(resource_arn))
- if tags_to_remove:
- try:
- client.remove_tags_from_resource(ResourceName=resource_arn, TagKeys=tags_to_remove)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Couldn't remove tags from snapshot {0}".format(resource_arn))
- return changed
-
-
-def ensure_snapshot_present(client, module):
- db_instance_identifier = module.params.get('db_instance_identifier')
- snapshot_name = module.params.get('db_snapshot_identifier')
- changed = False
- snapshot = get_snapshot(client, module, snapshot_name)
- if not snapshot:
- try:
- snapshot = client.create_db_snapshot(DBSnapshotIdentifier=snapshot_name,
- DBInstanceIdentifier=db_instance_identifier)['DBSnapshot']
- changed = True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="trying to create db snapshot")
-
- if module.params.get('wait'):
- wait_for_snapshot_status(client, module, snapshot_name, 'db_snapshot_available')
-
- existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn'],
- aws_retry=True)['TagList'])
- desired_tags = module.params['tags']
- purge_tags = module.params['purge_tags']
- changed |= ensure_tags(client, module, snapshot['DBSnapshotArn'], existing_tags, desired_tags, purge_tags)
-
- snapshot = get_snapshot(client, module, snapshot_name)
-
- return dict(changed=changed, **snapshot_to_facts(client, module, snapshot))
-
-
-def main():
-
- module = AnsibleAWSModule(
- argument_spec=dict(
- state=dict(choices=['present', 'absent'], default='present'),
- db_snapshot_identifier=dict(aliases=['id', 'snapshot_id'], required=True),
- db_instance_identifier=dict(aliases=['instance_id']),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300),
- tags=dict(type='dict'),
- purge_tags=dict(type='bool', default=True),
- ),
- required_if=[['state', 'present', ['db_instance_identifier']]]
- )
-
- client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=['DBSnapshotNotFound']))
-
- if module.params['state'] == 'absent':
- ret_dict = ensure_snapshot_absent(client, module)
- else:
- ret_dict = ensure_snapshot_present(client, module)
-
- module.exit_json(**ret_dict)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/rds_snapshot_info.py b/lib/ansible/modules/cloud/amazon/rds_snapshot_info.py
deleted file mode 100644
index 96ea044850..0000000000
--- a/lib/ansible/modules/cloud/amazon/rds_snapshot_info.py
+++ /dev/null
@@ -1,396 +0,0 @@
-#!/usr/bin/python
-# Copyright (c) 2014-2017 Ansible Project
-# Copyright (c) 2017, 2018 Will Thames
-# Copyright (c) 2017, 2018 Michael De La Rue
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'status': ['preview'],
- 'supported_by': 'community',
- 'metadata_version': '1.1'}
-
-DOCUMENTATION = '''
----
-module: rds_snapshot_info
-version_added: "2.6"
-short_description: obtain information about one or more RDS snapshots
-description:
- - Obtain information about one or more RDS snapshots. These can be for unclustered snapshots or snapshots of clustered DBs (Aurora).
- - Aurora snapshot information may be obtained if no identifier parameters are passed or if one of the cluster parameters are passed.
- - This module was called C(rds_snapshot_facts) before Ansible 2.9. The usage did not change.
-options:
- db_snapshot_identifier:
- description:
- - Name of an RDS (unclustered) snapshot.
- - Mutually exclusive with I(db_instance_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier)
- required: false
- aliases:
- - snapshot_name
- type: str
- db_instance_identifier:
- description:
- - RDS instance name for which to find snapshots.
- - Mutually exclusive with I(db_snapshot_identifier), I(db_cluster_identifier), I(db_cluster_snapshot_identifier)
- required: false
- type: str
- db_cluster_identifier:
- description:
- - RDS cluster name for which to find snapshots.
- - Mutually exclusive with I(db_snapshot_identifier), I(db_instance_identifier), I(db_cluster_snapshot_identifier)
- required: false
- type: str
- db_cluster_snapshot_identifier:
- description:
- - Name of an RDS cluster snapshot.
- - Mutually exclusive with I(db_instance_identifier), I(db_snapshot_identifier), I(db_cluster_identifier)
- required: false
- type: str
- snapshot_type:
- description:
- - Type of snapshot to find.
- - By default both automated and manual snapshots will be returned.
- required: false
- choices: ['automated', 'manual', 'shared', 'public']
- type: str
-requirements:
- - "python >= 2.6"
- - "boto3"
-author:
- - "Will Thames (@willthames)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Get information about an snapshot
-- rds_snapshot_info:
- db_snapshot_identifier: snapshot_name
- register: new_database_info
-
-# Get all RDS snapshots for an RDS instance
-- rds_snapshot_info:
- db_instance_identifier: helloworld-rds-master
-'''
-
-RETURN = '''
-snapshots:
- description: List of non-clustered snapshots
- returned: When cluster parameters are not passed
- type: complex
- contains:
- allocated_storage:
- description: How many gigabytes of storage are allocated
- returned: always
- type: int
- sample: 10
- availability_zone:
- description: The availability zone of the database from which the snapshot was taken
- returned: always
- type: str
- sample: us-west-2b
- db_instance_identifier:
- description: Database instance identifier
- returned: always
- type: str
- sample: hello-world-rds
- db_snapshot_arn:
- description: Snapshot ARN
- returned: always
- type: str
- sample: arn:aws:rds:us-west-2:111111111111:snapshot:rds:hello-world-rds-us1-2018-05-16-04-03
- db_snapshot_identifier:
- description: Snapshot name
- returned: always
- type: str
- sample: rds:hello-world-rds-us1-2018-05-16-04-03
- encrypted:
- description: Whether the snapshot was encrypted
- returned: always
- type: bool
- sample: true
- engine:
- description: Database engine
- returned: always
- type: str
- sample: postgres
- engine_version:
- description: Database engine version
- returned: always
- type: str
- sample: 9.5.10
- iam_database_authentication_enabled:
- description: Whether database authentication through IAM is enabled
- returned: always
- type: bool
- sample: false
- instance_create_time:
- description: Time the Instance was created
- returned: always
- type: str
- sample: '2017-10-10T04:00:07.434000+00:00'
- kms_key_id:
- description: ID of the KMS Key encrypting the snapshot
- returned: always
- type: str
- sample: arn:aws:kms:us-west-2:111111111111:key/abcd1234-1234-aaaa-0000-1234567890ab
- license_model:
- description: License model
- returned: always
- type: str
- sample: postgresql-license
- master_username:
- description: Database master username
- returned: always
- type: str
- sample: dbadmin
- option_group_name:
- description: Database option group name
- returned: always
- type: str
- sample: default:postgres-9-5
- percent_progress:
- description: Percent progress of snapshot
- returned: always
- type: int
- sample: 100
- snapshot_create_time:
- description: Time snapshot was created
- returned: always
- type: str
- sample: '2018-05-16T04:03:33.871000+00:00'
- snapshot_type:
- description: Type of snapshot
- returned: always
- type: str
- sample: automated
- status:
- description: Status of snapshot
- returned: always
- type: str
- sample: available
- storage_type:
- description: Storage type of underlying DB
- returned: always
- type: str
- sample: gp2
- tags:
- description: Snapshot tags
- returned: when snapshot is not shared
- type: complex
- contains: {}
- vpc_id:
- description: ID of VPC containing the DB
- returned: always
- type: str
- sample: vpc-abcd1234
-cluster_snapshots:
- description: List of cluster snapshots
- returned: always
- type: complex
- contains:
- allocated_storage:
- description: How many gigabytes of storage are allocated
- returned: always
- type: int
- sample: 1
- availability_zones:
- description: The availability zones of the database from which the snapshot was taken
- returned: always
- type: list
- sample:
- - ca-central-1a
- - ca-central-1b
- cluster_create_time:
- description: Date and time the cluster was created
- returned: always
- type: str
- sample: '2018-05-17T00:13:40.223000+00:00'
- db_cluster_identifier:
- description: Database cluster identifier
- returned: always
- type: str
- sample: test-aurora-cluster
- db_cluster_snapshot_arn:
- description: ARN of the database snapshot
- returned: always
- type: str
- sample: arn:aws:rds:ca-central-1:111111111111:cluster-snapshot:test-aurora-snapshot
- db_cluster_snapshot_identifier:
- description: Snapshot identifier
- returned: always
- type: str
- sample: test-aurora-snapshot
- engine:
- description: Database engine
- returned: always
- type: str
- sample: aurora
- engine_version:
- description: Database engine version
- returned: always
- type: str
- sample: 5.6.10a
- iam_database_authentication_enabled:
- description: Whether database authentication through IAM is enabled
- returned: always
- type: bool
- sample: false
- kms_key_id:
- description: ID of the KMS Key encrypting the snapshot
- returned: always
- type: str
- sample: arn:aws:kms:ca-central-1:111111111111:key/abcd1234-abcd-1111-aaaa-0123456789ab
- license_model:
- description: License model
- returned: always
- type: str
- sample: aurora
- master_username:
- description: Database master username
- returned: always
- type: str
- sample: shertel
- percent_progress:
- description: Percent progress of snapshot
- returned: always
- type: int
- sample: 0
- port:
- description: Database port
- returned: always
- type: int
- sample: 0
- snapshot_create_time:
- description: Date and time when the snapshot was created
- returned: always
- type: str
- sample: '2018-05-17T00:23:23.731000+00:00'
- snapshot_type:
- description: Type of snapshot
- returned: always
- type: str
- sample: manual
- status:
- description: Status of snapshot
- returned: always
- type: str
- sample: creating
- storage_encrypted:
- description: Whether the snapshot is encrypted
- returned: always
- type: bool
- sample: true
- tags:
- description: Tags of the snapshot
- returned: when snapshot is not shared
- type: complex
- contains: {}
- vpc_id:
- description: VPC of the database
- returned: always
- type: str
- sample: vpc-abcd1234
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
-
-try:
- import botocore
-except Exception:
- pass # caught by AnsibleAWSModule
-
-
-def common_snapshot_info(module, conn, method, prefix, params):
- paginator = conn.get_paginator(method)
- try:
- results = paginator.paginate(**params).build_full_result()['%ss' % prefix]
- except is_boto3_error_code('%sNotFound' % prefix):
- results = []
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, "trying to get snapshot information")
-
- for snapshot in results:
- try:
- if snapshot['SnapshotType'] != 'shared':
- snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix],
- aws_retry=True)['TagList'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix])
-
- return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results]
-
-
-def cluster_snapshot_info(module, conn):
- snapshot_name = module.params.get('db_cluster_snapshot_identifier')
- snapshot_type = module.params.get('snapshot_type')
- instance_name = module.params.get('db_cluster_identifier')
-
- params = dict()
- if snapshot_name:
- params['DBClusterSnapshotIdentifier'] = snapshot_name
- if instance_name:
- params['DBClusterIdentifier'] = instance_name
- if snapshot_type:
- params['SnapshotType'] = snapshot_type
- if snapshot_type == 'public':
- params['IncludePublic'] = True
- elif snapshot_type == 'shared':
- params['IncludeShared'] = True
-
- return common_snapshot_info(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params)
-
-
-def standalone_snapshot_info(module, conn):
- snapshot_name = module.params.get('db_snapshot_identifier')
- snapshot_type = module.params.get('snapshot_type')
- instance_name = module.params.get('db_instance_identifier')
-
- params = dict()
- if snapshot_name:
- params['DBSnapshotIdentifier'] = snapshot_name
- if instance_name:
- params['DBInstanceIdentifier'] = instance_name
- if snapshot_type:
- params['SnapshotType'] = snapshot_type
- if snapshot_type == 'public':
- params['IncludePublic'] = True
- elif snapshot_type == 'shared':
- params['IncludeShared'] = True
-
- return common_snapshot_info(module, conn, 'describe_db_snapshots', 'DBSnapshot', params)
-
-
-def main():
- argument_spec = dict(
- db_snapshot_identifier=dict(aliases=['snapshot_name']),
- db_instance_identifier=dict(),
- db_cluster_identifier=dict(),
- db_cluster_snapshot_identifier=dict(),
- snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public'])
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']]
- )
- if module._name == 'rds_snapshot_facts':
- module.deprecate("The 'rds_snapshot_facts' module has been renamed to 'rds_snapshot_info'", version='2.13')
-
- conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10))
- results = dict()
- if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']:
- results['snapshots'] = standalone_snapshot_info(module, conn)
- if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']:
- results['cluster_snapshots'] = cluster_snapshot_info(module, conn)
-
- module.exit_json(changed=False, **results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/rds_subnet_group.py b/lib/ansible/modules/cloud/amazon/rds_subnet_group.py
deleted file mode 100644
index 008a7dc259..0000000000
--- a/lib/ansible/modules/cloud/amazon/rds_subnet_group.py
+++ /dev/null
@@ -1,202 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: rds_subnet_group
-version_added: "1.5"
-short_description: manage RDS database subnet groups
-description:
- - Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5.
-options:
- state:
- description:
- - Specifies whether the subnet should be present or absent.
- required: true
- choices: [ 'present' , 'absent' ]
- type: str
- name:
- description:
- - Database subnet group identifier.
- required: true
- type: str
- description:
- description:
- - Database subnet group description.
- - Required when I(state=present).
- type: str
- subnets:
- description:
- - List of subnet IDs that make up the database subnet group.
- - Required when I(state=present).
- type: list
-author: "Scott Anderson (@tastychutney)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Add or change a subnet group
-- rds_subnet_group:
- state: present
- name: norwegian-blue
- description: My Fancy Ex Parrot Subnet Group
- subnets:
- - subnet-aaaaaaaa
- - subnet-bbbbbbbb
-
-# Remove a subnet group
-- rds_subnet_group:
- state: absent
- name: norwegian-blue
-'''
-
-RETURN = '''
-subnet_group:
- description: Dictionary of DB subnet group values
- returned: I(state=present)
- type: complex
- contains:
- name:
- description: The name of the DB subnet group
- returned: I(state=present)
- type: str
- description:
- description: The description of the DB subnet group
- returned: I(state=present)
- type: str
- vpc_id:
- description: The VpcId of the DB subnet group
- returned: I(state=present)
- type: str
- subnet_ids:
- description: Contains a list of Subnet IDs
- returned: I(state=present)
- type: list
- status:
- description: The status of the DB subnet group
- returned: I(state=present)
- type: str
-'''
-
-try:
- import boto.rds
- from boto.exception import BotoServerError
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
-
-
-def get_subnet_group_info(subnet_group):
- return dict(
- name=subnet_group.name,
- description=subnet_group.description,
- vpc_id=subnet_group.vpc_id,
- subnet_ids=subnet_group.subnet_ids,
- status=subnet_group.status
- )
-
-
-def create_result(changed, subnet_group=None):
- if subnet_group is None:
- return dict(
- changed=changed
- )
- else:
- return dict(
- changed=changed,
- subnet_group=get_subnet_group_info(subnet_group)
- )
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(required=True, choices=['present', 'absent']),
- name=dict(required=True),
- description=dict(required=False),
- subnets=dict(required=False, type='list'),
- )
- )
- module = AnsibleModule(argument_spec=argument_spec)
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- state = module.params.get('state')
- group_name = module.params.get('name').lower()
- group_description = module.params.get('description')
- group_subnets = module.params.get('subnets') or {}
-
- if state == 'present':
- for required in ['description', 'subnets']:
- if not module.params.get(required):
- module.fail_json(msg=str("Parameter %s required for state='present'" % required))
- else:
- for not_allowed in ['description', 'subnets']:
- if module.params.get(not_allowed):
- module.fail_json(msg=str("Parameter %s not allowed for state='absent'" % not_allowed))
-
- # Retrieve any AWS settings from the environment.
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
-
- if not region:
- module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
-
- try:
- conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs)
- except BotoServerError as e:
- module.fail_json(msg=e.error_message)
-
- try:
- exists = False
- result = create_result(False)
-
- try:
- matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100)
- exists = len(matching_groups) > 0
- except BotoServerError as e:
- if e.error_code != 'DBSubnetGroupNotFoundFault':
- module.fail_json(msg=e.error_message)
-
- if state == 'absent':
- if exists:
- conn.delete_db_subnet_group(group_name)
- result = create_result(True)
- else:
- if not exists:
- new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets)
- result = create_result(True, new_group)
- else:
- # Sort the subnet groups before we compare them
- matching_groups[0].subnet_ids.sort()
- group_subnets.sort()
- if (matching_groups[0].name != group_name or
- matching_groups[0].description != group_description or
- matching_groups[0].subnet_ids != group_subnets):
- changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
- result = create_result(True, changed_group)
- else:
- result = create_result(False, matching_groups[0])
- except BotoServerError as e:
- module.fail_json(msg=e.error_message)
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/redshift.py b/lib/ansible/modules/cloud/amazon/redshift.py
deleted file mode 100644
index 48d1d46abc..0000000000
--- a/lib/ansible/modules/cloud/amazon/redshift.py
+++ /dev/null
@@ -1,625 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2014 Jens Carl, Hothead Games Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-author:
- - "Jens Carl (@j-carl), Hothead Games Inc."
- - "Rafael Driutti (@rafaeldriutti)"
-module: redshift
-version_added: "2.2"
-short_description: create, delete, or modify an Amazon Redshift instance
-description:
- - Creates, deletes, or modifies Amazon Redshift cluster instances.
-options:
- command:
- description:
- - Specifies the action to take.
- required: true
- choices: [ 'create', 'facts', 'delete', 'modify' ]
- type: str
- identifier:
- description:
- - Redshift cluster identifier.
- required: true
- type: str
- node_type:
- description:
- - The node type of the cluster.
- - Require when I(command=create).
- choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large','dc2.large',
- 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge']
- type: str
- username:
- description:
- - Master database username.
- - Used only when I(command=create).
- type: str
- password:
- description:
- - Master database password.
- - Used only when I(command=create).
- type: str
- cluster_type:
- description:
- - The type of cluster.
- choices: ['multi-node', 'single-node' ]
- default: 'single-node'
- type: str
- db_name:
- description:
- - Name of the database.
- type: str
- availability_zone:
- description:
- - Availability zone in which to launch cluster.
- aliases: ['zone', 'aws_zone']
- type: str
- number_of_nodes:
- description:
- - Number of nodes.
- - Only used when I(cluster_type=multi-node).
- type: int
- cluster_subnet_group_name:
- description:
- - Which subnet to place the cluster.
- aliases: ['subnet']
- type: str
- cluster_security_groups:
- description:
- - In which security group the cluster belongs.
- type: list
- elements: str
- aliases: ['security_groups']
- vpc_security_group_ids:
- description:
- - VPC security group
- aliases: ['vpc_security_groups']
- type: list
- elements: str
- skip_final_cluster_snapshot:
- description:
- - Skip a final snapshot before deleting the cluster.
- - Used only when I(command=delete).
- aliases: ['skip_final_snapshot']
- default: false
- version_added: "2.4"
- type: bool
- final_cluster_snapshot_identifier:
- description:
- - Identifier of the final snapshot to be created before deleting the cluster.
- - If this parameter is provided, I(skip_final_cluster_snapshot) must be C(false).
- - Used only when I(command=delete).
- aliases: ['final_snapshot_id']
- version_added: "2.4"
- type: str
- preferred_maintenance_window:
- description:
- - 'Maintenance window in format of C(ddd:hh24:mi-ddd:hh24:mi). (Example: C(Mon:22:00-Mon:23:15))'
- - Times are specified in UTC.
- - If not specified then a random 30 minute maintenance window is assigned.
- aliases: ['maintance_window', 'maint_window']
- type: str
- cluster_parameter_group_name:
- description:
- - Name of the cluster parameter group.
- aliases: ['param_group_name']
- type: str
- automated_snapshot_retention_period:
- description:
- - The number of days that automated snapshots are retained.
- aliases: ['retention_period']
- type: int
- port:
- description:
- - Which port the cluster is listening on.
- type: int
- cluster_version:
- description:
- - Which version the cluster should have.
- aliases: ['version']
- choices: ['1.0']
- type: str
- allow_version_upgrade:
- description:
- - When I(allow_version_upgrade=true) the cluster may be automatically
- upgraded during the maintenance window.
- aliases: ['version_upgrade']
- default: true
- type: bool
- publicly_accessible:
- description:
- - If the cluster is accessible publicly or not.
- default: false
- type: bool
- encrypted:
- description:
- - If the cluster is encrypted or not.
- default: false
- type: bool
- elastic_ip:
- description:
- - An Elastic IP to use for the cluster.
- type: str
- new_cluster_identifier:
- description:
- - Only used when command=modify.
- aliases: ['new_identifier']
- type: str
- wait:
- description:
- - When I(command=create), I(command=modify) or I(command=restore) then wait for the database to enter the 'available' state.
- - When I(command=delete) wait for the database to be terminated.
- type: bool
- default: false
- wait_timeout:
- description:
- - When I(wait=true) defines how long in seconds before giving up.
- default: 300
- type: int
- enhanced_vpc_routing:
- description:
- - Whether the cluster should have enhanced VPC routing enabled.
- default: false
- type: bool
-requirements: [ 'boto3' ]
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Basic cluster provisioning example
-- redshift: >
- command=create
- node_type=ds1.xlarge
- identifier=new_cluster
- username=cluster_admin
- password=1nsecure
-
-# Cluster delete example
-- redshift:
- command: delete
- identifier: new_cluster
- skip_final_cluster_snapshot: true
- wait: true
-'''
-
-RETURN = '''
-cluster:
- description: dictionary containing all the cluster information
- returned: success
- type: complex
- contains:
- identifier:
- description: Id of the cluster.
- returned: success
- type: str
- sample: "new_redshift_cluster"
- create_time:
- description: Time of the cluster creation as timestamp.
- returned: success
- type: float
- sample: 1430158536.308
- status:
- description: Status of the cluster.
- returned: success
- type: str
- sample: "available"
- db_name:
- description: Name of the database.
- returned: success
- type: str
- sample: "new_db_name"
- availability_zone:
- description: Amazon availability zone where the cluster is located. "None" until cluster is available.
- returned: success
- type: str
- sample: "us-east-1b"
- maintenance_window:
- description: Time frame when maintenance/upgrade are done.
- returned: success
- type: str
- sample: "sun:09:30-sun:10:00"
- private_ip_address:
- description: Private IP address of the main node.
- returned: success
- type: str
- sample: "10.10.10.10"
- public_ip_address:
- description: Public IP address of the main node. "None" when enhanced_vpc_routing is enabled.
- returned: success
- type: str
- sample: "0.0.0.0"
- port:
- description: Port of the cluster. "None" until cluster is available.
- returned: success
- type: int
- sample: 5439
- url:
- description: FQDN of the main cluster node. "None" until cluster is available.
- returned: success
- type: str
- sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com"
- enhanced_vpc_routing:
- description: status of the enhanced vpc routing feature.
- returned: success
- type: bool
-'''
-
-try:
- import botocore
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.ec2 import AWSRetry, snake_dict_to_camel_dict
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-
-
-def _collect_facts(resource):
- """Transform cluster information to dict."""
- facts = {
- 'identifier': resource['ClusterIdentifier'],
- 'status': resource['ClusterStatus'],
- 'username': resource['MasterUsername'],
- 'db_name': resource['DBName'],
- 'maintenance_window': resource['PreferredMaintenanceWindow'],
- 'enhanced_vpc_routing': resource['EnhancedVpcRouting']
-
- }
-
- for node in resource['ClusterNodes']:
- if node['NodeRole'] in ('SHARED', 'LEADER'):
- facts['private_ip_address'] = node['PrivateIPAddress']
- if facts['enhanced_vpc_routing'] is False:
- facts['public_ip_address'] = node['PublicIPAddress']
- else:
- facts['public_ip_address'] = None
- break
-
- # Some parameters are not ready instantly if you don't wait for available
- # cluster status
- facts['create_time'] = None
- facts['url'] = None
- facts['port'] = None
- facts['availability_zone'] = None
-
- if resource['ClusterStatus'] != "creating":
- facts['create_time'] = resource['ClusterCreateTime']
- facts['url'] = resource['Endpoint']['Address']
- facts['port'] = resource['Endpoint']['Port']
- facts['availability_zone'] = resource['AvailabilityZone']
-
- return facts
-
-
-@AWSRetry.jittered_backoff()
-def _describe_cluster(redshift, identifier):
- '''
- Basic wrapper around describe_clusters with a retry applied
- '''
- return redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
-
-
-@AWSRetry.jittered_backoff()
-def _create_cluster(redshift, **kwargs):
- '''
- Basic wrapper around create_cluster with a retry applied
- '''
- return redshift.create_cluster(**kwargs)
-
-
-# Simple wrapper around delete, try to avoid throwing an error if some other
-# operation is in progress
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState'])
-def _delete_cluster(redshift, **kwargs):
- '''
- Basic wrapper around delete_cluster with a retry applied.
- Explicitly catches 'InvalidClusterState' (~ Operation in progress) so that
- we can still delete a cluster if some kind of change operation was in
- progress.
- '''
- return redshift.delete_cluster(**kwargs)
-
-
-@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidClusterState'])
-def _modify_cluster(redshift, **kwargs):
- '''
- Basic wrapper around modify_cluster with a retry applied.
- Explicitly catches 'InvalidClusterState' (~ Operation in progress) for cases
- where another modification is still in progress
- '''
- return redshift.modify_cluster(**kwargs)
-
-
-def create_cluster(module, redshift):
- """
- Create a new cluster
-
- module: AnsibleModule object
- redshift: authenticated redshift connection object
-
- Returns:
- """
-
- identifier = module.params.get('identifier')
- node_type = module.params.get('node_type')
- username = module.params.get('username')
- password = module.params.get('password')
- d_b_name = module.params.get('db_name')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- changed = True
- # Package up the optional parameters
- params = {}
- for p in ('cluster_type', 'cluster_security_groups',
- 'vpc_security_group_ids', 'cluster_subnet_group_name',
- 'availability_zone', 'preferred_maintenance_window',
- 'cluster_parameter_group_name',
- 'automated_snapshot_retention_period', 'port',
- 'cluster_version', 'allow_version_upgrade',
- 'number_of_nodes', 'publicly_accessible',
- 'encrypted', 'elastic_ip', 'enhanced_vpc_routing'):
- # https://github.com/boto/boto3/issues/400
- if module.params.get(p) is not None:
- params[p] = module.params.get(p)
-
- if d_b_name:
- params['d_b_name'] = d_b_name
-
- try:
- _describe_cluster(redshift, identifier)
- changed = False
- except is_boto3_error_code('ClusterNotFound'):
- try:
- _create_cluster(redshift,
- ClusterIdentifier=identifier,
- NodeType=node_type,
- MasterUsername=username,
- MasterUserPassword=password,
- **snake_dict_to_camel_dict(params, capitalize_first=True))
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed to create cluster")
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Failed to describe cluster")
- if wait:
- attempts = wait_timeout // 60
- waiter = redshift.get_waiter('cluster_available')
- try:
- waiter.wait(
- ClusterIdentifier=identifier,
- WaiterConfig=dict(MaxAttempts=attempts)
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Timeout waiting for the cluster creation")
- try:
- resource = _describe_cluster(redshift, identifier)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Failed to describe cluster")
-
- return(changed, _collect_facts(resource))
-
-
-def describe_cluster(module, redshift):
- """
- Collect data about the cluster.
-
- module: Ansible module object
- redshift: authenticated redshift connection object
- """
- identifier = module.params.get('identifier')
-
- try:
- resource = _describe_cluster(redshift, identifier)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Error describing cluster")
-
- return(True, _collect_facts(resource))
-
-
-def delete_cluster(module, redshift):
- """
- Delete a cluster.
-
- module: Ansible module object
- redshift: authenticated redshift connection object
- """
-
- identifier = module.params.get('identifier')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- params = {}
- for p in ('skip_final_cluster_snapshot',
- 'final_cluster_snapshot_identifier'):
- if p in module.params:
- # https://github.com/boto/boto3/issues/400
- if module.params.get(p) is not None:
- params[p] = module.params.get(p)
-
- try:
- _delete_cluster(
- redshift,
- ClusterIdentifier=identifier,
- **snake_dict_to_camel_dict(params, capitalize_first=True))
- except is_boto3_error_code('ClusterNotFound'):
- return(False, {})
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- module.fail_json_aws(e, msg="Failed to delete cluster")
-
- if wait:
- attempts = wait_timeout // 60
- waiter = redshift.get_waiter('cluster_deleted')
- try:
- waiter.wait(
- ClusterIdentifier=identifier,
- WaiterConfig=dict(MaxAttempts=attempts)
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Timeout deleting the cluster")
-
- return(True, {})
-
-
-def modify_cluster(module, redshift):
- """
- Modify an existing cluster.
-
- module: Ansible module object
- redshift: authenticated redshift connection object
- """
-
- identifier = module.params.get('identifier')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- # Package up the optional parameters
- params = {}
- for p in ('cluster_type', 'cluster_security_groups',
- 'vpc_security_group_ids', 'cluster_subnet_group_name',
- 'availability_zone', 'preferred_maintenance_window',
- 'cluster_parameter_group_name',
- 'automated_snapshot_retention_period', 'port', 'cluster_version',
- 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'):
- # https://github.com/boto/boto3/issues/400
- if module.params.get(p) is not None:
- params[p] = module.params.get(p)
-
- # enhanced_vpc_routing parameter change needs an exclusive request
- if module.params.get('enhanced_vpc_routing') is not None:
- try:
- _modify_cluster(
- redshift,
- ClusterIdentifier=identifier,
- EnhancedVpcRouting=module.params.get('enhanced_vpc_routing'))
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier)
- if wait:
- attempts = wait_timeout // 60
- waiter = redshift.get_waiter('cluster_available')
- try:
- waiter.wait(
- ClusterIdentifier=identifier,
- WaiterConfig=dict(MaxAttempts=attempts)
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e,
- msg="Timeout waiting for cluster enhanced vpc routing modification"
- )
-
- # change the rest
- try:
- _modify_cluster(
- redshift,
- ClusterIdentifier=identifier,
- **snake_dict_to_camel_dict(params, capitalize_first=True))
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier)
-
- if module.params.get('new_cluster_identifier'):
- identifier = module.params.get('new_cluster_identifier')
-
- if wait:
- attempts = wait_timeout // 60
- waiter2 = redshift.get_waiter('cluster_available')
- try:
- waiter2.wait(
- ClusterIdentifier=identifier,
- WaiterConfig=dict(MaxAttempts=attempts)
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Timeout waiting for cluster modification")
- try:
- resource = _describe_cluster(redshift, identifier)
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
- module.fail_json(e, msg="Couldn't modify redshift cluster %s " % identifier)
-
- return(True, _collect_facts(resource))
-
-
-def main():
- argument_spec = dict(
- command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
- identifier=dict(required=True),
- node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge',
- 'ds2.8xlarge', 'dc1.large', 'dc2.large',
- 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge',
- 'dw2.large', 'dw2.8xlarge'], required=False),
- username=dict(required=False),
- password=dict(no_log=True, required=False),
- db_name=dict(required=False),
- cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'),
- cluster_security_groups=dict(aliases=['security_groups'], type='list'),
- vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list'),
- skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'],
- type='bool', default=False),
- final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False),
- cluster_subnet_group_name=dict(aliases=['subnet']),
- availability_zone=dict(aliases=['aws_zone', 'zone']),
- preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']),
- cluster_parameter_group_name=dict(aliases=['param_group_name']),
- automated_snapshot_retention_period=dict(aliases=['retention_period'], type='int'),
- port=dict(type='int'),
- cluster_version=dict(aliases=['version'], choices=['1.0']),
- allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True),
- number_of_nodes=dict(type='int'),
- publicly_accessible=dict(type='bool', default=False),
- encrypted=dict(type='bool', default=False),
- elastic_ip=dict(required=False),
- new_cluster_identifier=dict(aliases=['new_identifier']),
- enhanced_vpc_routing=dict(type='bool', default=False),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300),
- )
-
- required_if = [
- ('command', 'delete', ['skip_final_cluster_snapshot']),
- ('command', 'create', ['node_type',
- 'username',
- 'password'])
- ]
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- required_if=required_if
- )
-
- command = module.params.get('command')
- skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot')
- final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier')
- # can't use module basic required_if check for this case
- if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None:
- module.fail_json(msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False")
-
- conn = module.client('redshift')
-
- changed = True
- if command == 'create':
- (changed, cluster) = create_cluster(module, conn)
-
- elif command == 'facts':
- (changed, cluster) = describe_cluster(module, conn)
-
- elif command == 'delete':
- (changed, cluster) = delete_cluster(module, conn)
-
- elif command == 'modify':
- (changed, cluster) = modify_cluster(module, conn)
-
- module.exit_json(changed=changed, cluster=cluster)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/redshift_cross_region_snapshots.py b/lib/ansible/modules/cloud/amazon/redshift_cross_region_snapshots.py
deleted file mode 100644
index 42d93228e6..0000000000
--- a/lib/ansible/modules/cloud/amazon/redshift_cross_region_snapshots.py
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/usr/bin/python
-
-# Copyright: (c) 2018, JR Kerkstra <jrkerkstra@example.org>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'status': ['preview'],
- 'supported_by': 'community',
- 'metadata_version': '1.1'}
-
-DOCUMENTATION = '''
----
-module: redshift_cross_region_snapshots
-short_description: Manage Redshift Cross Region Snapshots
-description:
- - Manage Redshift Cross Region Snapshots. Supports KMS-Encrypted Snapshots.
- - For more information, see U(https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-snapshots.html#cross-region-snapshot-copy)
-version_added: "2.8"
-author: JR Kerkstra (@captainkerk)
-options:
- cluster_name:
- description:
- - The name of the cluster to configure cross-region snapshots for.
- required: true
- aliases: [ "cluster" ]
- type: str
- state:
- description:
- - Create or remove the cross-region snapshot configuration.
- choices: [ "present", "absent" ]
- default: present
- type: str
- region:
- description:
- - "The cluster's region."
- required: true
- aliases: [ "source" ]
- type: str
- destination_region:
- description:
- - The region to copy snapshots to.
- required: true
- aliases: [ "destination" ]
- type: str
- snapshot_copy_grant:
- description:
- - A grant for Amazon Redshift to use a master key in the I(destination_region).
- - See U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.create_snapshot_copy_grant)
- aliases: [ "copy_grant" ]
- type: str
- snapshot_retention_period:
- description:
- - The number of days to keep cross-region snapshots for.
- required: true
- aliases: [ "retention_period" ]
- type: int
-requirements: [ "botocore", "boto3" ]
-extends_documentation_fragment:
- - ec2
- - aws
-'''
-
-EXAMPLES = '''
-- name: configure cross-region snapshot on cluster `johniscool`
- redshift_cross_region_snapshots:
- cluster_name: johniscool
- state: present
- region: us-east-1
- destination_region: us-west-2
- retention_period: 1
-
-- name: configure cross-region snapshot on kms-encrypted cluster
- redshift_cross_region_snapshots:
- cluster_name: whatever
- state: present
- region: us-east-1
- destination: us-west-2
- copy_grant: 'my-grant-in-destination'
- retention_period: 10
-
-- name: disable cross-region snapshots, necessary before most cluster modifications (rename, resize)
- redshift_cross_region_snapshots:
- cluster_name: whatever
- state: absent
- region: us-east-1
- destination_region: us-west-2
-'''
-
-RETURN = ''' # '''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-
-
-class SnapshotController(object):
-
- def __init__(self, client, cluster_name):
- self.client = client
- self.cluster_name = cluster_name
-
- def get_cluster_snapshot_copy_status(self):
- response = self.client.describe_clusters(
- ClusterIdentifier=self.cluster_name
- )
- return response['Clusters'][0].get('ClusterSnapshotCopyStatus')
-
- def enable_snapshot_copy(self, destination_region, grant_name, retention_period):
- if grant_name:
- self.client.enable_snapshot_copy(
- ClusterIdentifier=self.cluster_name,
- DestinationRegion=destination_region,
- RetentionPeriod=retention_period,
- SnapshotCopyGrantName=grant_name,
- )
- else:
- self.client.enable_snapshot_copy(
- ClusterIdentifier=self.cluster_name,
- DestinationRegion=destination_region,
- RetentionPeriod=retention_period,
- )
-
- def disable_snapshot_copy(self):
- self.client.disable_snapshot_copy(
- ClusterIdentifier=self.cluster_name
- )
-
- def modify_snapshot_copy_retention_period(self, retention_period):
- self.client.modify_snapshot_copy_retention_period(
- ClusterIdentifier=self.cluster_name,
- RetentionPeriod=retention_period
- )
-
-
-def requesting_unsupported_modifications(actual, requested):
- if (actual['SnapshotCopyGrantName'] != requested['snapshot_copy_grant'] or
- actual['DestinationRegion'] != requested['destination_region']):
- return True
- return False
-
-
-def needs_update(actual, requested):
- if actual['RetentionPeriod'] != requested['snapshot_retention_period']:
- return True
- return False
-
-
-def run_module():
- argument_spec = dict(
- cluster_name=dict(type='str', required=True, aliases=['cluster']),
- state=dict(type='str', choices=['present', 'absent'], default='present'),
- region=dict(type='str', required=True, aliases=['source']),
- destination_region=dict(type='str', required=True, aliases=['destination']),
- snapshot_copy_grant=dict(type='str', aliases=['copy_grant']),
- snapshot_retention_period=dict(type='int', required=True, aliases=['retention_period']),
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- result = dict(
- changed=False,
- message=''
- )
- connection = module.client('redshift')
-
- snapshot_controller = SnapshotController(client=connection,
- cluster_name=module.params.get('cluster_name'))
-
- current_config = snapshot_controller.get_cluster_snapshot_copy_status()
- if current_config is not None:
- if module.params.get('state') == 'present':
- if requesting_unsupported_modifications(current_config, module.params):
- message = 'Cannot modify destination_region or grant_name. ' \
- 'Please disable cross-region snapshots, and re-run.'
- module.fail_json(msg=message, **result)
- if needs_update(current_config, module.params):
- result['changed'] = True
- if not module.check_mode:
- snapshot_controller.modify_snapshot_copy_retention_period(
- module.params.get('snapshot_retention_period')
- )
- else:
- result['changed'] = True
- if not module.check_mode:
- snapshot_controller.disable_snapshot_copy()
- else:
- if module.params.get('state') == 'present':
- result['changed'] = True
- if not module.check_mode:
- snapshot_controller.enable_snapshot_copy(module.params.get('destination_region'),
- module.params.get('snapshot_copy_grant'),
- module.params.get('snapshot_retention_period'))
- module.exit_json(**result)
-
-
-def main():
- run_module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/redshift_info.py b/lib/ansible/modules/cloud/amazon/redshift_info.py
deleted file mode 100644
index 693576a2a5..0000000000
--- a/lib/ansible/modules/cloud/amazon/redshift_info.py
+++ /dev/null
@@ -1,354 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: redshift_info
-author: "Jens Carl (@j-carl)"
-short_description: Gather information about Redshift cluster(s)
-description:
- - Gather information about Redshift cluster(s).
- - This module was called C(redshift_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.4"
-requirements: [ boto3 ]
-options:
- cluster_identifier:
- description:
- - The prefix of cluster identifier of the Redshift cluster you are searching for.
- - "This is a regular expression match with implicit '^'. Append '$' for a complete match."
- required: false
- aliases: ['name', 'identifier']
- type: str
- tags:
- description:
- - "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' }
- to match against the security group(s) you are searching for."
- required: false
- type: dict
-extends_documentation_fragment:
- - ec2
- - aws
-'''
-
-EXAMPLES = '''
-# Note: These examples do net set authentication details, see the AWS guide for details.
-
-# Find all clusters
-- redshift_info:
- register: redshift
-
-# Find cluster(s) with matching tags
-- redshift_info:
- tags:
- env: prd
- stack: monitoring
- register: redshift_tags
-
-# Find cluster(s) with matching name/prefix and tags
-- redshift_info:
- tags:
- env: dev
- stack: web
- name: user-
- register: redshift_web
-
-# Fail if no cluster(s) is/are found
-- redshift_info:
- tags:
- env: stg
- stack: db
- register: redshift_user
- failed_when: "{{ redshift_user.results | length == 0 }}"
-'''
-
-RETURN = '''
-# For more information see U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_clusters)
----
-cluster_identifier:
- description: Unique key to identify the cluster.
- returned: success
- type: str
- sample: "redshift-identifier"
-node_type:
- description: The node type for nodes in the cluster.
- returned: success
- type: str
- sample: "ds2.xlarge"
-cluster_status:
- description: Current state of the cluster.
- returned: success
- type: str
- sample: "available"
-modify_status:
- description: The status of a modify operation.
- returned: optional
- type: str
- sample: ""
-master_username:
- description: The master user name for the cluster.
- returned: success
- type: str
- sample: "admin"
-db_name:
- description: The name of the initial database that was created when the cluster was created.
- returned: success
- type: str
- sample: "dev"
-endpoint:
- description: The connection endpoint.
- returned: success
- type: str
- sample: {
- "address": "cluster-ds2.ocmugla0rf.us-east-1.redshift.amazonaws.com",
- "port": 5439
- }
-cluster_create_time:
- description: The date and time that the cluster was created.
- returned: success
- type: str
- sample: "2016-05-10T08:33:16.629000+00:00"
-automated_snapshot_retention_period:
- description: The number of days that automatic cluster snapshots are retained.
- returned: success
- type: int
- sample: 1
-cluster_security_groups:
- description: A list of cluster security groups that are associated with the cluster.
- returned: success
- type: list
- sample: []
-vpc_security_groups:
- description: A list of VPC security groups the are associated with the cluster.
- returned: success
- type: list
- sample: [
- {
- "status": "active",
- "vpc_security_group_id": "sg-12cghhg"
- }
- ]
-cluster_paramater_groups:
- description: The list of cluster parameters that are associated with this cluster.
- returned: success
- type: list
- sample: [
- {
- "cluster_parameter_status_list": [
- {
- "parameter_apply_status": "in-sync",
- "parameter_name": "statement_timeout"
- },
- {
- "parameter_apply_status": "in-sync",
- "parameter_name": "require_ssl"
- }
- ],
- "parameter_apply_status": "in-sync",
- "parameter_group_name": "tuba"
- }
- ]
-cluster_subnet_group_name:
- description: The name of the subnet group that is associated with the cluster.
- returned: success
- type: str
- sample: "redshift-subnet"
-vpc_id:
- description: The identifier of the VPC the cluster is in, if the cluster is in a VPC.
- returned: success
- type: str
- sample: "vpc-1234567"
-availability_zone:
- description: The name of the Availability Zone in which the cluster is located.
- returned: success
- type: str
- sample: "us-east-1b"
-preferred_maintenance_window:
- description: The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
- returned: success
- type: str
- sample: "tue:07:30-tue:08:00"
-pending_modified_values:
- description: A value that, if present, indicates that changes to the cluster are pending.
- returned: success
- type: dict
- sample: {}
-cluster_version:
- description: The version ID of the Amazon Redshift engine that is running on the cluster.
- returned: success
- type: str
- sample: "1.0"
-allow_version_upgrade:
- description: >
- A Boolean value that, if true, indicates that major version upgrades will be applied
- automatically to the cluster during the maintenance window.
- returned: success
- type: bool
- sample: true|false
-number_of_nodes:
- description: The number of compute nodes in the cluster.
- returned: success
- type: int
- sample: 12
-publicly_accessible:
- description: A Boolean value that, if true , indicates that the cluster can be accessed from a public network.
- returned: success
- type: bool
- sample: true|false
-encrypted:
- description: Boolean value that, if true , indicates that data in the cluster is encrypted at rest.
- returned: success
- type: bool
- sample: true|false
-restore_status:
- description: A value that describes the status of a cluster restore action.
- returned: success
- type: dict
- sample: {}
-hsm_status:
- description: >
- A value that reports whether the Amazon Redshift cluster has finished applying any hardware
- security module (HSM) settings changes specified in a modify cluster command.
- returned: success
- type: dict
- sample: {}
-cluster_snapshot_copy_status:
- description: A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
- returned: success
- type: dict
- sample: {}
-cluster_public_keys:
- description: The public key for the cluster.
- returned: success
- type: str
- sample: "ssh-rsa anjigfam Amazon-Redshift\n"
-cluster_nodes:
- description: The nodes in the cluster.
- returned: success
- type: list
- sample: [
- {
- "node_role": "LEADER",
- "private_ip_address": "10.0.0.1",
- "public_ip_address": "x.x.x.x"
- },
- {
- "node_role": "COMPUTE-1",
- "private_ip_address": "10.0.0.3",
- "public_ip_address": "x.x.x.x"
- }
- ]
-elastic_ip_status:
- description: The status of the elastic IP (EIP) address.
- returned: success
- type: dict
- sample: {}
-cluster_revision_number:
- description: The specific revision number of the database in the cluster.
- returned: success
- type: str
- sample: "1231"
-tags:
- description: The list of tags for the cluster.
- returned: success
- type: list
- sample: []
-kms_key_id:
- description: The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
- returned: success
- type: str
- sample: ""
-enhanced_vpc_routing:
- description: An option that specifies whether to create the cluster with enhanced VPC routing enabled.
- returned: success
- type: bool
- sample: true|false
-iam_roles:
- description: List of IAM roles attached to the cluster.
- returned: success
- type: list
- sample: []
-'''
-
-import re
-
-try:
- from botocore.exception import BotoCoreError, ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-
-def match_tags(tags_to_match, cluster):
- for key, value in tags_to_match.items():
- for tag in cluster['Tags']:
- if key == tag['Key'] and value == tag['Value']:
- return True
-
- return False
-
-
-def find_clusters(conn, module, identifier=None, tags=None):
-
- try:
- cluster_paginator = conn.get_paginator('describe_clusters')
- clusters = cluster_paginator.paginate().build_full_result()
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to fetch clusters.')
-
- matched_clusters = []
-
- if identifier is not None:
- identifier_prog = re.compile('^' + identifier)
-
- for cluster in clusters['Clusters']:
-
- matched_identifier = True
- if identifier:
- matched_identifier = identifier_prog.search(cluster['ClusterIdentifier'])
-
- matched_tags = True
- if tags:
- matched_tags = match_tags(tags, cluster)
-
- if matched_identifier and matched_tags:
- matched_clusters.append(camel_dict_to_snake_dict(cluster))
-
- return matched_clusters
-
-
-def main():
-
- argument_spec = dict(
- cluster_identifier=dict(type='str', aliases=['identifier', 'name']),
- tags=dict(type='dict')
- )
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
- if module._name == 'redshift_facts':
- module.deprecate("The 'redshift_facts' module has been renamed to 'redshift_info'", version='2.13')
-
- cluster_identifier = module.params.get('cluster_identifier')
- cluster_tags = module.params.get('tags')
-
- redshift = module.client('redshift')
-
- results = find_clusters(redshift, module, identifier=cluster_identifier, tags=cluster_tags)
- module.exit_json(results=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/redshift_subnet_group.py b/lib/ansible/modules/cloud/amazon/redshift_subnet_group.py
deleted file mode 100644
index 7885494af8..0000000000
--- a/lib/ansible/modules/cloud/amazon/redshift_subnet_group.py
+++ /dev/null
@@ -1,182 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2014 Jens Carl, Hothead Games Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-author:
- - "Jens Carl (@j-carl), Hothead Games Inc."
-module: redshift_subnet_group
-version_added: "2.2"
-short_description: manage Redshift cluster subnet groups
-description:
- - Create, modifies, and deletes Redshift cluster subnet groups.
-options:
- state:
- description:
- - Specifies whether the subnet should be present or absent.
- required: true
- choices: ['present', 'absent' ]
- type: str
- group_name:
- description:
- - Cluster subnet group name.
- required: true
- aliases: ['name']
- type: str
- group_description:
- description:
- - Database subnet group description.
- aliases: ['description']
- type: str
- group_subnets:
- description:
- - List of subnet IDs that make up the cluster subnet group.
- aliases: ['subnets']
- type: list
- elements: str
-requirements: [ 'boto' ]
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Create a Redshift subnet group
-- local_action:
- module: redshift_subnet_group
- state: present
- group_name: redshift-subnet
- group_description: Redshift subnet
- group_subnets:
- - 'subnet-aaaaa'
- - 'subnet-bbbbb'
-
-# Remove subnet group
-- redshift_subnet_group:
- state: absent
- group_name: redshift-subnet
-'''
-
-RETURN = '''
-group:
- description: dictionary containing all Redshift subnet group information
- returned: success
- type: complex
- contains:
- name:
- description: name of the Redshift subnet group
- returned: success
- type: str
- sample: "redshift_subnet_group_name"
- vpc_id:
- description: Id of the VPC where the subnet is located
- returned: success
- type: str
- sample: "vpc-aabb1122"
-'''
-
-try:
- import boto
- import boto.redshift
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(required=True, choices=['present', 'absent']),
- group_name=dict(required=True, aliases=['name']),
- group_description=dict(required=False, aliases=['description']),
- group_subnets=dict(required=False, aliases=['subnets'], type='list'),
- ))
- module = AnsibleModule(argument_spec=argument_spec)
-
- if not HAS_BOTO:
- module.fail_json(msg='boto v2.9.0+ required for this module')
-
- state = module.params.get('state')
- group_name = module.params.get('group_name')
- group_description = module.params.get('group_description')
- group_subnets = module.params.get('group_subnets')
-
- if state == 'present':
- for required in ('group_name', 'group_description', 'group_subnets'):
- if not module.params.get(required):
- module.fail_json(msg=str("parameter %s required for state='present'" % required))
- else:
- for not_allowed in ('group_description', 'group_subnets'):
- if module.params.get(not_allowed):
- module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed))
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
- if not region:
- module.fail_json(msg=str("Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file"))
-
- # Connect to the Redshift endpoint.
- try:
- conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
- except boto.exception.JSONResponseError as e:
- module.fail_json(msg=str(e))
-
- try:
- changed = False
- exists = False
- group = None
-
- try:
- matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100)
- exists = len(matching_groups) > 0
- except boto.exception.JSONResponseError as e:
- if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault':
- # if e.code != 'ClusterSubnetGroupNotFoundFault':
- module.fail_json(msg=str(e))
-
- if state == 'absent':
- if exists:
- conn.delete_cluster_subnet_group(group_name)
- changed = True
-
- else:
- if not exists:
- new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets)
- group = {
- 'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
- ['ClusterSubnetGroup']['ClusterSubnetGroupName'],
- 'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
- ['ClusterSubnetGroup']['VpcId'],
- }
- else:
- changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description)
- group = {
- 'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
- ['ClusterSubnetGroup']['ClusterSubnetGroupName'],
- 'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
- ['ClusterSubnetGroup']['VpcId'],
- }
-
- changed = True
-
- except boto.exception.JSONResponseError as e:
- module.fail_json(msg=str(e))
-
- module.exit_json(changed=changed, group=group)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/route53.py b/lib/ansible/modules/cloud/amazon/route53.py
deleted file mode 100644
index 6d64893700..0000000000
--- a/lib/ansible/modules/cloud/amazon/route53.py
+++ /dev/null
@@ -1,721 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: route53
-version_added: "1.3"
-short_description: add or delete entries in Amazons Route53 DNS service
-description:
- - Creates and deletes DNS records in Amazons Route53 service
-options:
- state:
- description:
- - Specifies the state of the resource record. As of Ansible 2.4, the I(command) option has been changed
- to I(state) as default and the choices 'present' and 'absent' have been added, but I(command) still works as well.
- required: true
- aliases: [ 'command' ]
- choices: [ 'present', 'absent', 'get', 'create', 'delete' ]
- type: str
- zone:
- description:
- - The DNS zone to modify.
- - This is a required parameter, if parameter I(hosted_zone_id) is not supplied.
- type: str
- hosted_zone_id:
- description:
- - The Hosted Zone ID of the DNS zone to modify.
- - This is a required parameter, if parameter I(zone) is not supplied.
- version_added: "2.0"
- type: str
- record:
- description:
- - The full DNS record to create or delete.
- required: true
- type: str
- ttl:
- description:
- - The TTL, in second, to give the new record.
- default: 3600
- type: int
- type:
- description:
- - The type of DNS record to create.
- required: true
- choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'SOA' ]
- type: str
- alias:
- description:
- - Indicates if this is an alias record.
- version_added: "1.9"
- type: bool
- default: false
- alias_hosted_zone_id:
- description:
- - The hosted zone identifier.
- version_added: "1.9"
- type: str
- alias_evaluate_target_health:
- description:
- - Whether or not to evaluate an alias target health. Useful for aliases to Elastic Load Balancers.
- type: bool
- default: false
- version_added: "2.1"
- value:
- description:
- - The new value when creating a DNS record. YAML lists or multiple comma-spaced values are allowed for non-alias records.
- - When deleting a record all values for the record must be specified or Route53 will not delete it.
- type: list
- overwrite:
- description:
- - Whether an existing record should be overwritten on create if values do not match.
- type: bool
- retry_interval:
- description:
- - In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds.
- If you have many domain names, the default of 500 seconds may be too long.
- default: 500
- type: int
- private_zone:
- description:
- - If set to C(yes), the private zone matching the requested name within the domain will be used if there are both public and private zones.
- The default is to use the public zone.
- type: bool
- default: false
- version_added: "1.9"
- identifier:
- description:
- - Have to be specified for Weighted, latency-based and failover resource record sets only.
- An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type.
- version_added: "2.0"
- type: str
- weight:
- description:
- - Weighted resource record sets only. Among resource record sets that
- have the same combination of DNS name and type, a value that
- determines what portion of traffic for the current resource record set
- is routed to the associated location.
- version_added: "2.0"
- type: int
- region:
- description:
- - Latency-based resource record sets only Among resource record sets
- that have the same combination of DNS name and type, a value that
- determines which region this should be associated with for the
- latency-based routing
- version_added: "2.0"
- type: str
- health_check:
- description:
- - Health check to associate with this record
- version_added: "2.0"
- type: str
- failover:
- description:
- - Failover resource record sets only. Whether this is the primary or
- secondary resource record set. Allowed values are PRIMARY and SECONDARY
- version_added: "2.0"
- type: str
- choices: ['SECONDARY', 'PRIMARY']
- vpc_id:
- description:
- - "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC."
- - This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs.
- version_added: "2.0"
- type: str
- wait:
- description:
- - Wait until the changes have been replicated to all Amazon Route 53 DNS servers.
- type: bool
- default: false
- version_added: "2.1"
- wait_timeout:
- description:
- - How long to wait for the changes to be replicated, in seconds.
- default: 300
- version_added: "2.1"
- type: int
-author:
-- Bruce Pennypacker (@bpennypacker)
-- Mike Buzzetti (@jimbydamonk)
-extends_documentation_fragment: aws
-'''
-
-RETURN = '''
-nameservers:
- description: Nameservers associated with the zone.
- returned: when state is 'get'
- type: list
- sample:
- - ns-1036.awsdns-00.org.
- - ns-516.awsdns-00.net.
- - ns-1504.awsdns-00.co.uk.
- - ns-1.awsdns-00.com.
-set:
- description: Info specific to the resource record.
- returned: when state is 'get'
- type: complex
- contains:
- alias:
- description: Whether this is an alias.
- returned: always
- type: bool
- sample: false
- failover:
- description: Whether this is the primary or secondary resource record set.
- returned: always
- type: str
- sample: PRIMARY
- health_check:
- description: health_check associated with this record.
- returned: always
- type: str
- identifier:
- description: An identifier that differentiates among multiple resource record sets that have the same combination of DNS name and type.
- returned: always
- type: str
- record:
- description: Domain name for the record set.
- returned: always
- type: str
- sample: new.foo.com.
- region:
- description: Which region this should be associated with for latency-based routing.
- returned: always
- type: str
- sample: us-west-2
- ttl:
- description: Resource record cache TTL.
- returned: always
- type: str
- sample: '3600'
- type:
- description: Resource record set type.
- returned: always
- type: str
- sample: A
- value:
- description: Record value.
- returned: always
- type: str
- sample: 52.43.18.27
- values:
- description: Record Values.
- returned: always
- type: list
- sample:
- - 52.43.18.27
- weight:
- description: Weight of the record.
- returned: always
- type: str
- sample: '3'
- zone:
- description: Zone this record set belongs to.
- returned: always
- type: str
- sample: foo.bar.com.
-'''
-
-EXAMPLES = '''
-# Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated
-- route53:
- state: present
- zone: foo.com
- record: new.foo.com
- type: A
- ttl: 7200
- value: 1.1.1.1,2.2.2.2,3.3.3.3
- wait: yes
-
-# Update new.foo.com as an A record with a list of 3 IPs and wait until the changes have been replicated
-- route53:
- state: present
- zone: foo.com
- record: new.foo.com
- type: A
- ttl: 7200
- value:
- - 1.1.1.1
- - 2.2.2.2
- - 3.3.3.3
- wait: yes
-
-# Retrieve the details for new.foo.com
-- route53:
- state: get
- zone: foo.com
- record: new.foo.com
- type: A
- register: rec
-
-# Delete new.foo.com A record using the results from the get command
-- route53:
- state: absent
- zone: foo.com
- record: "{{ rec.set.record }}"
- ttl: "{{ rec.set.ttl }}"
- type: "{{ rec.set.type }}"
- value: "{{ rec.set.value }}"
-
-# Add an AAAA record. Note that because there are colons in the value
-# that the IPv6 address must be quoted. Also shows using the old form command=create.
-- route53:
- command: create
- zone: foo.com
- record: localhost.foo.com
- type: AAAA
- ttl: 7200
- value: "::1"
-
-# Add a SRV record with multiple fields for a service on port 22222
-# For more information on SRV records see:
-# https://en.wikipedia.org/wiki/SRV_record
-- route53:
- state: present
- zone: foo.com
- record: "_example-service._tcp.foo.com"
- type: SRV
- value: "0 0 22222 host1.foo.com,0 0 22222 host2.foo.com"
-
-# Add a TXT record. Note that TXT and SPF records must be surrounded
-# by quotes when sent to Route 53:
-- route53:
- state: present
- zone: foo.com
- record: localhost.foo.com
- type: TXT
- ttl: 7200
- value: '"bar"'
-
-# Add an alias record that points to an Amazon ELB:
-- route53:
- state: present
- zone: foo.com
- record: elb.foo.com
- type: A
- value: "{{ elb_dns_name }}"
- alias: True
- alias_hosted_zone_id: "{{ elb_zone_id }}"
-
-# Retrieve the details for elb.foo.com
-- route53:
- state: get
- zone: foo.com
- record: elb.foo.com
- type: A
- register: rec
-
-# Delete an alias record using the results from the get command
-- route53:
- state: absent
- zone: foo.com
- record: "{{ rec.set.record }}"
- ttl: "{{ rec.set.ttl }}"
- type: "{{ rec.set.type }}"
- value: "{{ rec.set.value }}"
- alias: True
- alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}"
-
-# Add an alias record that points to an Amazon ELB and evaluates it health:
-- route53:
- state: present
- zone: foo.com
- record: elb.foo.com
- type: A
- value: "{{ elb_dns_name }}"
- alias: True
- alias_hosted_zone_id: "{{ elb_zone_id }}"
- alias_evaluate_target_health: True
-
-# Add an AAAA record with Hosted Zone ID.
-- route53:
- state: present
- zone: foo.com
- hosted_zone_id: Z2AABBCCDDEEFF
- record: localhost.foo.com
- type: AAAA
- ttl: 7200
- value: "::1"
-
-# Use a routing policy to distribute traffic:
-- route53:
- state: present
- zone: foo.com
- record: www.foo.com
- type: CNAME
- value: host1.foo.com
- ttl: 30
- # Routing policy
- identifier: "host1@www"
- weight: 100
- health_check: "d994b780-3150-49fd-9205-356abdd42e75"
-
-# Add a CAA record (RFC 6844):
-- route53:
- state: present
- zone: example.com
- record: example.com
- type: CAA
- value:
- - 0 issue "ca.example.net"
- - 0 issuewild ";"
- - 0 iodef "mailto:security@example.com"
-
-'''
-
-import time
-import distutils.version
-
-try:
- import boto
- import boto.ec2
- from boto.route53 import Route53Connection
- from boto.route53.record import Record, ResourceRecordSets
- from boto.route53.status import Status
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
-
-
-MINIMUM_BOTO_VERSION = '2.28.0'
-WAIT_RETRY_SLEEP = 5 # how many seconds to wait between propagation status polls
-
-
-class TimeoutError(Exception):
- pass
-
-
-def get_zone_id_by_name(conn, module, zone_name, want_private, want_vpc_id):
- """Finds a zone by name or zone_id"""
- for zone in invoke_with_throttling_retries(conn.get_zones):
- # only save this zone id if the private status of the zone matches
- # the private_zone_in boolean specified in the params
- private_zone = module.boolean(zone.config.get('PrivateZone', False))
- if private_zone == want_private and zone.name == zone_name:
- if want_vpc_id:
- # NOTE: These details aren't available in other boto methods, hence the necessary
- # extra API call
- hosted_zone = invoke_with_throttling_retries(conn.get_hosted_zone, zone.id)
- zone_details = hosted_zone['GetHostedZoneResponse']
- # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
- if isinstance(zone_details['VPCs'], dict):
- if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id:
- return zone.id
- else: # Forward compatibility for when boto fixes that bug
- if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
- return zone.id
- else:
- return zone.id
- return None
-
-
-def commit(changes, retry_interval, wait, wait_timeout):
- """Commit changes, but retry PriorRequestNotComplete errors."""
- result = None
- retry = 10
- while True:
- try:
- retry -= 1
- result = changes.commit()
- break
- except boto.route53.exception.DNSServerError as e:
- code = e.body.split("<Code>")[1]
- code = code.split("</Code>")[0]
- if code != 'PriorRequestNotComplete' or retry < 0:
- raise e
- time.sleep(float(retry_interval))
-
- if wait:
- timeout_time = time.time() + wait_timeout
- connection = changes.connection
- change = result['ChangeResourceRecordSetsResponse']['ChangeInfo']
- status = Status(connection, change)
- while status.status != 'INSYNC' and time.time() < timeout_time:
- time.sleep(WAIT_RETRY_SLEEP)
- status.update()
- if time.time() >= timeout_time:
- raise TimeoutError()
- return result
-
-
-# Shamelessly copied over from https://git.io/vgmDG
-IGNORE_CODE = 'Throttling'
-MAX_RETRIES = 5
-
-
-def invoke_with_throttling_retries(function_ref, *argv, **kwargs):
- retries = 0
- while True:
- try:
- retval = function_ref(*argv, **kwargs)
- return retval
- except boto.exception.BotoServerError as e:
- if e.code != IGNORE_CODE or retries == MAX_RETRIES:
- raise e
- time.sleep(5 * (2**retries))
- retries += 1
-
-
-def decode_name(name):
- # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round
- # tripping of things like * and @.
- return name.encode().decode('unicode_escape')
-
-
-def to_dict(rset, zone_in, zone_id):
- record = dict()
- record['zone'] = zone_in
- record['type'] = rset.type
- record['record'] = decode_name(rset.name)
- record['ttl'] = str(rset.ttl)
- record['identifier'] = rset.identifier
- record['weight'] = rset.weight
- record['region'] = rset.region
- record['failover'] = rset.failover
- record['health_check'] = rset.health_check
- record['hosted_zone_id'] = zone_id
- if rset.alias_dns_name:
- record['alias'] = True
- record['value'] = rset.alias_dns_name
- record['values'] = [rset.alias_dns_name]
- record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id
- record['alias_evaluate_target_health'] = rset.alias_evaluate_target_health
- else:
- record['alias'] = False
- record['value'] = ','.join(sorted(rset.resource_records))
- record['values'] = sorted(rset.resource_records)
- return record
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(type='str', required=True, choices=['absent', 'create', 'delete', 'get', 'present'], aliases=['command']),
- zone=dict(type='str'),
- hosted_zone_id=dict(type='str'),
- record=dict(type='str', required=True),
- ttl=dict(type='int', default=3600),
- type=dict(type='str', required=True, choices=['A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SPF', 'SRV', 'TXT']),
- alias=dict(type='bool'),
- alias_hosted_zone_id=dict(type='str'),
- alias_evaluate_target_health=dict(type='bool', default=False),
- value=dict(type='list'),
- overwrite=dict(type='bool'),
- retry_interval=dict(type='int', default=500),
- private_zone=dict(type='bool', default=False),
- identifier=dict(type='str'),
- weight=dict(type='int'),
- region=dict(type='str'),
- health_check=dict(type='str'),
- failover=dict(type='str', choices=['PRIMARY', 'SECONDARY']),
- vpc_id=dict(type='str'),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300),
- ))
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- required_one_of=[['zone', 'hosted_zone_id']],
- # If alias is True then you must specify alias_hosted_zone as well
- required_together=[['alias', 'alias_hosted_zone_id']],
- # state=present, absent, create, delete THEN value is required
- required_if=(
- ('state', 'present', ['value']),
- ('state', 'create', ['value']),
- ('state', 'absent', ['value']),
- ('state', 'delete', ['value']),
- ),
- # failover, region and weight are mutually exclusive
- mutually_exclusive=[('failover', 'region', 'weight')],
- # failover, region and weight require identifier
- required_by=dict(
- failover=('identifier',),
- region=('identifier',),
- weight=('identifier',),
- ),
- )
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- if distutils.version.StrictVersion(boto.__version__) < distutils.version.StrictVersion(MINIMUM_BOTO_VERSION):
- module.fail_json(msg='Found boto in version %s, but >= %s is required' % (boto.__version__, MINIMUM_BOTO_VERSION))
-
- if module.params['state'] in ('present', 'create'):
- command_in = 'create'
- elif module.params['state'] in ('absent', 'delete'):
- command_in = 'delete'
- elif module.params['state'] == 'get':
- command_in = 'get'
-
- zone_in = (module.params.get('zone') or '').lower()
- hosted_zone_id_in = module.params.get('hosted_zone_id')
- ttl_in = module.params.get('ttl')
- record_in = module.params.get('record').lower()
- type_in = module.params.get('type')
- value_in = module.params.get('value') or []
- alias_in = module.params.get('alias')
- alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id')
- alias_evaluate_target_health_in = module.params.get('alias_evaluate_target_health')
- retry_interval_in = module.params.get('retry_interval')
-
- if module.params['vpc_id'] is not None:
- private_zone_in = True
- else:
- private_zone_in = module.params.get('private_zone')
-
- identifier_in = module.params.get('identifier')
- weight_in = module.params.get('weight')
- region_in = module.params.get('region')
- health_check_in = module.params.get('health_check')
- failover_in = module.params.get('failover')
- vpc_id_in = module.params.get('vpc_id')
- wait_in = module.params.get('wait')
- wait_timeout_in = module.params.get('wait_timeout')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
-
- if zone_in[-1:] != '.':
- zone_in += "."
-
- if record_in[-1:] != '.':
- record_in += "."
-
- if command_in == 'create' or command_in == 'delete':
- if alias_in and len(value_in) != 1:
- module.fail_json(msg="parameter 'value' must contain a single dns name for alias records")
- if (weight_in is None and region_in is None and failover_in is None) and identifier_in is not None:
- module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region or failover.")
-
- # connect to the route53 endpoint
- try:
- conn = Route53Connection(**aws_connect_kwargs)
- except boto.exception.BotoServerError as e:
- module.fail_json(msg=e.error_message)
-
- # Find the named zone ID
- zone_id = hosted_zone_id_in or get_zone_id_by_name(conn, module, zone_in, private_zone_in, vpc_id_in)
-
- # Verify that the requested zone is already defined in Route53
- if zone_id is None:
- errmsg = "Zone %s does not exist in Route53" % (zone_in or hosted_zone_id_in)
- module.fail_json(msg=errmsg)
-
- record = {}
-
- found_record = False
- wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in,
- identifier=identifier_in, weight=weight_in,
- region=region_in, health_check=health_check_in,
- failover=failover_in)
- for v in value_in:
- if alias_in:
- wanted_rset.set_alias(alias_hosted_zone_id_in, v, alias_evaluate_target_health_in)
- else:
- wanted_rset.add_value(v)
-
- need_to_sort_records = (type_in == 'CAA')
-
- # Sort records for wanted_rset if necessary (keep original list)
- unsorted_records = wanted_rset.resource_records
- if need_to_sort_records:
- wanted_rset.resource_records = sorted(unsorted_records)
-
- sets = invoke_with_throttling_retries(conn.get_all_rrsets, zone_id, name=record_in,
- type=type_in, identifier=identifier_in)
- sets_iter = iter(sets)
- while True:
- try:
- rset = invoke_with_throttling_retries(next, sets_iter)
- except StopIteration:
- break
- # Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block
- rset.name = decode_name(rset.name)
-
- if identifier_in is not None:
- identifier_in = str(identifier_in)
-
- if rset.type == type_in and rset.name.lower() == record_in.lower() and rset.identifier == identifier_in:
- if need_to_sort_records:
- # Sort records
- rset.resource_records = sorted(rset.resource_records)
- found_record = True
- record = to_dict(rset, zone_in, zone_id)
- if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml():
- module.exit_json(changed=False)
-
- # We need to look only at the first rrset returned by the above call,
- # so break here. The returned elements begin with the one matching our
- # requested name, type, and identifier, if such an element exists,
- # followed by all others that come after it in alphabetical order.
- # Therefore, if the first set does not match, no subsequent set will
- # match either.
- break
-
- if command_in == 'get':
- if type_in == 'NS':
- ns = record.get('values', [])
- else:
- # Retrieve name servers associated to the zone.
- z = invoke_with_throttling_retries(conn.get_zone, zone_in)
- ns = invoke_with_throttling_retries(z.get_nameservers)
-
- module.exit_json(changed=False, set=record, nameservers=ns)
-
- if command_in == 'delete' and not found_record:
- module.exit_json(changed=False)
-
- changes = ResourceRecordSets(conn, zone_id)
-
- if command_in == 'create' or command_in == 'delete':
- if command_in == 'create' and found_record:
- if not module.params['overwrite']:
- module.fail_json(msg="Record already exists with different value. Set 'overwrite' to replace it")
- command = 'UPSERT'
- else:
- command = command_in.upper()
- # Restore original order of records
- wanted_rset.resource_records = unsorted_records
- changes.add_change_record(command, wanted_rset)
-
- if not module.check_mode:
- try:
- invoke_with_throttling_retries(commit, changes, retry_interval_in, wait_in, wait_timeout_in)
- except boto.route53.exception.DNSServerError as e:
- txt = e.body.split("<Message>")[1]
- txt = txt.split("</Message>")[0]
- if "but it already exists" in txt:
- module.exit_json(changed=False)
- else:
- module.fail_json(msg=txt)
- except TimeoutError:
- module.fail_json(msg='Timeout waiting for changes to replicate')
-
- module.exit_json(
- changed=True,
- diff=dict(
- before=record,
- after=to_dict(wanted_rset, zone_in, zone_id) if command != 'delete' else {},
- ),
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/route53_health_check.py b/lib/ansible/modules/cloud/amazon/route53_health_check.py
deleted file mode 100644
index 0eb30e16cc..0000000000
--- a/lib/ansible/modules/cloud/amazon/route53_health_check.py
+++ /dev/null
@@ -1,375 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: route53_health_check
-short_description: Add or delete health-checks in Amazons Route53 DNS service
-description:
- - Creates and deletes DNS Health checks in Amazons Route53 service.
- - Only the port, resource_path, string_match and request_interval are
- considered when updating existing health-checks.
-version_added: "2.0"
-options:
- state:
- description:
- - Specifies the action to take.
- choices: [ 'present', 'absent' ]
- type: str
- default: 'present'
- ip_address:
- description:
- - IP address of the end-point to check. Either this or I(fqdn) has to be provided.
- type: str
- port:
- description:
- - The port on the endpoint on which you want Amazon Route 53 to perform
- health checks. Required for TCP checks.
- type: int
- type:
- description:
- - The type of health check that you want to create, which indicates how
- Amazon Route 53 determines whether an endpoint is healthy.
- required: true
- choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ]
- type: str
- resource_path:
- description:
- - The path that you want Amazon Route 53 to request when performing
- health checks. The path can be any value for which your endpoint will
- return an HTTP status code of 2xx or 3xx when the endpoint is healthy,
- for example the file /docs/route53-health-check.html.
- - Required for all checks except TCP.
- - The path must begin with a /
- - Maximum 255 characters.
- type: str
- fqdn:
- description:
- - Domain name of the endpoint to check. Either this or I(ip_address) has
- to be provided. When both are given the `fqdn` is used in the `Host:`
- header of the HTTP request.
- type: str
- string_match:
- description:
- - If the check type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string
- that you want Amazon Route 53 to search for in the response body from
- the specified resource. If the string appears in the first 5120 bytes
- of the response body, Amazon Route 53 considers the resource healthy.
- type: str
- request_interval:
- description:
- - The number of seconds between the time that Amazon Route 53 gets a
- response from your endpoint and the time that it sends the next
- health-check request.
- default: 30
- choices: [ 10, 30 ]
- type: int
- failure_threshold:
- description:
- - The number of consecutive health checks that an endpoint must pass or
- fail for Amazon Route 53 to change the current status of the endpoint
- from unhealthy to healthy or vice versa.
- default: 3
- choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
- type: int
-author: "zimbatm (@zimbatm)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Create a health-check for host1.example.com and use it in record
-- route53_health_check:
- state: present
- fqdn: host1.example.com
- type: HTTP_STR_MATCH
- resource_path: /
- string_match: "Hello"
- request_interval: 10
- failure_threshold: 2
- register: my_health_check
-
-- route53:
- action: create
- zone: "example.com"
- type: CNAME
- record: "www.example.com"
- value: host1.example.com
- ttl: 30
- # Routing policy
- identifier: "host1@www"
- weight: 100
- health_check: "{{ my_health_check.health_check.id }}"
-
-# Delete health-check
-- route53_health_check:
- state: absent
- fqdn: host1.example.com
-
-'''
-
-import uuid
-
-try:
- import boto
- import boto.ec2
- from boto import route53
- from boto.route53 import Route53Connection, exception
- from boto.route53.healthcheck import HealthCheck
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-# import module snippets
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
-
-
-# Things that can't get changed:
-# protocol
-# ip_address or domain
-# request_interval
-# string_match if not previously enabled
-def find_health_check(conn, wanted):
- """Searches for health checks that have the exact same set of immutable values"""
-
- results = conn.get_list_health_checks()
-
- while True:
- for check in results.HealthChecks:
- config = check.HealthCheckConfig
- if (
- config.get('IPAddress') == wanted.ip_addr and
- config.get('FullyQualifiedDomainName') == wanted.fqdn and
- config.get('Type') == wanted.hc_type and
- config.get('RequestInterval') == str(wanted.request_interval) and
- config.get('Port') == str(wanted.port)
- ):
- return check
-
- if (results.IsTruncated == 'true'):
- results = conn.get_list_health_checks(marker=results.NextMarker)
- else:
- return None
-
-
-def to_health_check(config):
- return HealthCheck(
- config.get('IPAddress'),
- int(config.get('Port')),
- config.get('Type'),
- config.get('ResourcePath'),
- fqdn=config.get('FullyQualifiedDomainName'),
- string_match=config.get('SearchString'),
- request_interval=int(config.get('RequestInterval')),
- failure_threshold=int(config.get('FailureThreshold')),
- )
-
-
-def health_check_diff(a, b):
- a = a.__dict__
- b = b.__dict__
- if a == b:
- return {}
- diff = {}
- for key in set(a.keys()) | set(b.keys()):
- if a.get(key) != b.get(key):
- diff[key] = b.get(key)
- return diff
-
-
-def to_template_params(health_check):
- params = {
- 'ip_addr_part': '',
- 'port': health_check.port,
- 'type': health_check.hc_type,
- 'resource_path_part': '',
- 'fqdn_part': '',
- 'string_match_part': '',
- 'request_interval': health_check.request_interval,
- 'failure_threshold': health_check.failure_threshold,
- }
- if health_check.ip_addr:
- params['ip_addr_part'] = HealthCheck.XMLIpAddrPart % {'ip_addr': health_check.ip_addr}
- if health_check.resource_path:
- params['resource_path_part'] = XMLResourcePathPart % {'resource_path': health_check.resource_path}
- if health_check.fqdn:
- params['fqdn_part'] = HealthCheck.XMLFQDNPart % {'fqdn': health_check.fqdn}
- if health_check.string_match:
- params['string_match_part'] = HealthCheck.XMLStringMatchPart % {'string_match': health_check.string_match}
- return params
-
-
-XMLResourcePathPart = """<ResourcePath>%(resource_path)s</ResourcePath>"""
-
-POSTXMLBody = """
- <CreateHealthCheckRequest xmlns="%(xmlns)s">
- <CallerReference>%(caller_ref)s</CallerReference>
- <HealthCheckConfig>
- %(ip_addr_part)s
- <Port>%(port)s</Port>
- <Type>%(type)s</Type>
- %(resource_path_part)s
- %(fqdn_part)s
- %(string_match_part)s
- <RequestInterval>%(request_interval)s</RequestInterval>
- <FailureThreshold>%(failure_threshold)s</FailureThreshold>
- </HealthCheckConfig>
- </CreateHealthCheckRequest>
- """
-
-UPDATEHCXMLBody = """
- <UpdateHealthCheckRequest xmlns="%(xmlns)s">
- <HealthCheckVersion>%(health_check_version)s</HealthCheckVersion>
- %(ip_addr_part)s
- <Port>%(port)s</Port>
- %(resource_path_part)s
- %(fqdn_part)s
- %(string_match_part)s
- <FailureThreshold>%(failure_threshold)i</FailureThreshold>
- </UpdateHealthCheckRequest>
- """
-
-
-def create_health_check(conn, health_check, caller_ref=None):
- if caller_ref is None:
- caller_ref = str(uuid.uuid4())
- uri = '/%s/healthcheck' % conn.Version
- params = to_template_params(health_check)
- params.update(xmlns=conn.XMLNameSpace, caller_ref=caller_ref)
-
- xml_body = POSTXMLBody % params
- response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
- body = response.read()
- boto.log.debug(body)
- if response.status == 201:
- e = boto.jsonresponse.Element()
- h = boto.jsonresponse.XmlHandler(e, None)
- h.parse(body)
- return e
- else:
- raise exception.DNSServerError(response.status, response.reason, body)
-
-
-def update_health_check(conn, health_check_id, health_check_version, health_check):
- uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id)
- params = to_template_params(health_check)
- params.update(
- xmlns=conn.XMLNameSpace,
- health_check_version=health_check_version,
- )
- xml_body = UPDATEHCXMLBody % params
- response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
- body = response.read()
- boto.log.debug(body)
- if response.status not in (200, 204):
- raise exception.DNSServerError(response.status,
- response.reason,
- body)
- e = boto.jsonresponse.Element()
- h = boto.jsonresponse.XmlHandler(e, None)
- h.parse(body)
- return e
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- state=dict(choices=['present', 'absent'], default='present'),
- ip_address=dict(),
- port=dict(type='int'),
- type=dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
- resource_path=dict(),
- fqdn=dict(),
- string_match=dict(),
- request_interval=dict(type='int', choices=[10, 30], default=30),
- failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
- )
- )
- module = AnsibleModule(argument_spec=argument_spec)
-
- if not HAS_BOTO:
- module.fail_json(msg='boto 2.27.0+ required for this module')
-
- state_in = module.params.get('state')
- ip_addr_in = module.params.get('ip_address')
- port_in = module.params.get('port')
- type_in = module.params.get('type')
- resource_path_in = module.params.get('resource_path')
- fqdn_in = module.params.get('fqdn')
- string_match_in = module.params.get('string_match')
- request_interval_in = module.params.get('request_interval')
- failure_threshold_in = module.params.get('failure_threshold')
-
- if ip_addr_in is None and fqdn_in is None:
- module.fail_json(msg="parameter 'ip_address' or 'fqdn' is required")
-
- # Default port
- if port_in is None:
- if type_in in ['HTTP', 'HTTP_STR_MATCH']:
- port_in = 80
- elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']:
- port_in = 443
- else:
- module.fail_json(msg="parameter 'port' is required for 'type' TCP")
-
- # string_match in relation with type
- if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']:
- if string_match_in is None:
- module.fail_json(msg="parameter 'string_match' is required for the HTTP(S)_STR_MATCH types")
- elif len(string_match_in) > 255:
- module.fail_json(msg="parameter 'string_match' is limited to 255 characters max")
- elif string_match_in:
- module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types")
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
- # connect to the route53 endpoint
- try:
- conn = Route53Connection(**aws_connect_kwargs)
- except boto.exception.BotoServerError as e:
- module.fail_json(msg=e.error_message)
-
- changed = False
- action = None
- check_id = None
- wanted_config = HealthCheck(ip_addr_in, port_in, type_in, resource_path_in, fqdn_in, string_match_in, request_interval_in, failure_threshold_in)
- existing_check = find_health_check(conn, wanted_config)
- if existing_check:
- check_id = existing_check.Id
- existing_config = to_health_check(existing_check.HealthCheckConfig)
-
- if state_in == 'present':
- if existing_check is None:
- action = "create"
- check_id = create_health_check(conn, wanted_config).HealthCheck.Id
- changed = True
- else:
- diff = health_check_diff(existing_config, wanted_config)
- if diff:
- action = "update"
- update_health_check(conn, existing_check.Id, int(existing_check.HealthCheckVersion), wanted_config)
- changed = True
- elif state_in == 'absent':
- if check_id:
- action = "delete"
- conn.delete_health_check(check_id)
- changed = True
- else:
- module.fail_json(msg="Logic Error: Unknown state")
-
- module.exit_json(changed=changed, health_check=dict(id=check_id), action=action)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/route53_info.py b/lib/ansible/modules/cloud/amazon/route53_info.py
deleted file mode 100644
index 910bdd1188..0000000000
--- a/lib/ansible/modules/cloud/amazon/route53_info.py
+++ /dev/null
@@ -1,499 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
-module: route53_info
-short_description: Retrieves route53 details using AWS methods
-description:
- - Gets various details related to Route53 zone, record set or health check details.
- - This module was called C(route53_facts) before Ansible 2.9. The usage did not change.
-version_added: "2.0"
-options:
- query:
- description:
- - Specifies the query action to take.
- required: True
- choices: [
- 'change',
- 'checker_ip_range',
- 'health_check',
- 'hosted_zone',
- 'record_sets',
- 'reusable_delegation_set',
- ]
- type: str
- change_id:
- description:
- - The ID of the change batch request.
- - The value that you specify here is the value that
- ChangeResourceRecordSets returned in the Id element
- when you submitted the request.
- - Required if I(query=change).
- required: false
- type: str
- hosted_zone_id:
- description:
- - The Hosted Zone ID of the DNS zone.
- - Required if I(query) is set to I(hosted_zone) and I(hosted_zone_method) is set to I(details).
- - Required if I(query) is set to I(record_sets).
- required: false
- type: str
- max_items:
- description:
- - Maximum number of items to return for various get/list requests.
- required: false
- type: str
- next_marker:
- description:
- - "Some requests such as list_command: hosted_zones will return a maximum
- number of entries - EG 100 or the number specified by I(max_items).
- If the number of entries exceeds this maximum another request can be sent
- using the NextMarker entry from the first response to get the next page
- of results."
- required: false
- type: str
- delegation_set_id:
- description:
- - The DNS Zone delegation set ID.
- required: false
- type: str
- start_record_name:
- description:
- - "The first name in the lexicographic ordering of domain names that you want
- the list_command: record_sets to start listing from."
- required: false
- type: str
- type:
- description:
- - The type of DNS record.
- required: false
- choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS' ]
- type: str
- dns_name:
- description:
- - The first name in the lexicographic ordering of domain names that you want
- the list_command to start listing from.
- required: false
- type: str
- resource_id:
- description:
- - The ID/s of the specified resource/s.
- - Required if I(query=health_check) and I(health_check_method=tags).
- - Required if I(query=hosted_zone) and I(hosted_zone_method=tags).
- required: false
- aliases: ['resource_ids']
- type: list
- elements: str
- health_check_id:
- description:
- - The ID of the health check.
- - Required if C(query) is set to C(health_check) and
- C(health_check_method) is set to C(details) or C(status) or C(failure_reason).
- required: false
- type: str
- hosted_zone_method:
- description:
- - "This is used in conjunction with query: hosted_zone.
- It allows for listing details, counts or tags of various
- hosted zone details."
- required: false
- choices: [
- 'details',
- 'list',
- 'list_by_name',
- 'count',
- 'tags',
- ]
- default: 'list'
- type: str
- health_check_method:
- description:
- - "This is used in conjunction with query: health_check.
- It allows for listing details, counts or tags of various
- health check details."
- required: false
- choices: [
- 'list',
- 'details',
- 'status',
- 'failure_reason',
- 'count',
- 'tags',
- ]
- default: 'list'
- type: str
-author: Karen Cheng (@Etherdaemon)
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Simple example of listing all hosted zones
-- name: List all hosted zones
- route53_info:
- query: hosted_zone
- register: hosted_zones
-
-# Getting a count of hosted zones
-- name: Return a count of all hosted zones
- route53_info:
- query: hosted_zone
- hosted_zone_method: count
- register: hosted_zone_count
-
-- name: List the first 20 resource record sets in a given hosted zone
- route53_info:
- profile: account_name
- query: record_sets
- hosted_zone_id: ZZZ1111112222
- max_items: 20
- register: record_sets
-
-- name: List first 20 health checks
- route53_info:
- query: health_check
- health_check_method: list
- max_items: 20
- register: health_checks
-
-- name: Get health check last failure_reason
- route53_info:
- query: health_check
- health_check_method: failure_reason
- health_check_id: 00000000-1111-2222-3333-12345678abcd
- register: health_check_failure_reason
-
-- name: Retrieve reusable delegation set details
- route53_info:
- query: reusable_delegation_set
- delegation_set_id: delegation id
- register: delegation_sets
-
-- name: setup of example for using next_marker
- route53_info:
- query: hosted_zone
- max_items: 1
- register: first_info
-
-- name: example for using next_marker
- route53_info:
- query: hosted_zone
- next_marker: "{{ first_info.NextMarker }}"
- max_items: 1
- when: "{{ 'NextMarker' in first_info }}"
-
-- name: retrieve host entries starting with host1.workshop.test.io
- block:
- - name: grab zone id
- route53_zone:
- zone: "test.io"
- register: AWSINFO
-
- - name: grab Route53 record information
- route53_info:
- type: A
- query: record_sets
- hosted_zone_id: "{{ AWSINFO.zone_id }}"
- start_record_name: "host1.workshop.test.io"
- register: RECORDS
-'''
-try:
- import boto
- import botocore
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-try:
- import boto3
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
-from ansible.module_utils._text import to_native
-
-
-def get_hosted_zone(client, module):
- params = dict()
-
- if module.params.get('hosted_zone_id'):
- params['Id'] = module.params.get('hosted_zone_id')
- else:
- module.fail_json(msg="Hosted Zone Id is required")
-
- return client.get_hosted_zone(**params)
-
-
-def reusable_delegation_set_details(client, module):
- params = dict()
- if not module.params.get('delegation_set_id'):
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- if module.params.get('next_marker'):
- params['Marker'] = module.params.get('next_marker')
-
- results = client.list_reusable_delegation_sets(**params)
- else:
- params['DelegationSetId'] = module.params.get('delegation_set_id')
- results = client.get_reusable_delegation_set(**params)
-
- return results
-
-
-def list_hosted_zones(client, module):
- params = dict()
-
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- if module.params.get('next_marker'):
- params['Marker'] = module.params.get('next_marker')
-
- if module.params.get('delegation_set_id'):
- params['DelegationSetId'] = module.params.get('delegation_set_id')
-
- paginator = client.get_paginator('list_hosted_zones')
- zones = paginator.paginate(**params).build_full_result()['HostedZones']
- return {
- "HostedZones": zones,
- "list": zones,
- }
-
-
-def list_hosted_zones_by_name(client, module):
- params = dict()
-
- if module.params.get('hosted_zone_id'):
- params['HostedZoneId'] = module.params.get('hosted_zone_id')
-
- if module.params.get('dns_name'):
- params['DNSName'] = module.params.get('dns_name')
-
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- return client.list_hosted_zones_by_name(**params)
-
-
-def change_details(client, module):
- params = dict()
-
- if module.params.get('change_id'):
- params['Id'] = module.params.get('change_id')
- else:
- module.fail_json(msg="change_id is required")
-
- results = client.get_change(**params)
- return results
-
-
-def checker_ip_range_details(client, module):
- return client.get_checker_ip_ranges()
-
-
-def get_count(client, module):
- if module.params.get('query') == 'health_check':
- results = client.get_health_check_count()
- else:
- results = client.get_hosted_zone_count()
-
- return results
-
-
-def get_health_check(client, module):
- params = dict()
-
- if not module.params.get('health_check_id'):
- module.fail_json(msg="health_check_id is required")
- else:
- params['HealthCheckId'] = module.params.get('health_check_id')
-
- if module.params.get('health_check_method') == 'details':
- results = client.get_health_check(**params)
- elif module.params.get('health_check_method') == 'failure_reason':
- results = client.get_health_check_last_failure_reason(**params)
- elif module.params.get('health_check_method') == 'status':
- results = client.get_health_check_status(**params)
-
- return results
-
-
-def get_resource_tags(client, module):
- params = dict()
-
- if module.params.get('resource_id'):
- params['ResourceIds'] = module.params.get('resource_id')
- else:
- module.fail_json(msg="resource_id or resource_ids is required")
-
- if module.params.get('query') == 'health_check':
- params['ResourceType'] = 'healthcheck'
- else:
- params['ResourceType'] = 'hostedzone'
-
- return client.list_tags_for_resources(**params)
-
-
-def list_health_checks(client, module):
- params = dict()
-
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- if module.params.get('next_marker'):
- params['Marker'] = module.params.get('next_marker')
-
- paginator = client.get_paginator('list_health_checks')
- health_checks = paginator.paginate(**params).build_full_result()['HealthChecks']
- return {
- "HealthChecks": health_checks,
- "list": health_checks,
- }
-
-
-def record_sets_details(client, module):
- params = dict()
-
- if module.params.get('hosted_zone_id'):
- params['HostedZoneId'] = module.params.get('hosted_zone_id')
- else:
- module.fail_json(msg="Hosted Zone Id is required")
-
- if module.params.get('max_items'):
- params['MaxItems'] = module.params.get('max_items')
-
- if module.params.get('start_record_name'):
- params['StartRecordName'] = module.params.get('start_record_name')
-
- if module.params.get('type') and not module.params.get('start_record_name'):
- module.fail_json(msg="start_record_name must be specified if type is set")
- elif module.params.get('type'):
- params['StartRecordType'] = module.params.get('type')
-
- paginator = client.get_paginator('list_resource_record_sets')
- record_sets = paginator.paginate(**params).build_full_result()['ResourceRecordSets']
- return {
- "ResourceRecordSets": record_sets,
- "list": record_sets,
- }
-
-
-def health_check_details(client, module):
- health_check_invocations = {
- 'list': list_health_checks,
- 'details': get_health_check,
- 'status': get_health_check,
- 'failure_reason': get_health_check,
- 'count': get_count,
- 'tags': get_resource_tags,
- }
-
- results = health_check_invocations[module.params.get('health_check_method')](client, module)
- return results
-
-
-def hosted_zone_details(client, module):
- hosted_zone_invocations = {
- 'details': get_hosted_zone,
- 'list': list_hosted_zones,
- 'list_by_name': list_hosted_zones_by_name,
- 'count': get_count,
- 'tags': get_resource_tags,
- }
-
- results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module)
- return results
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- query=dict(choices=[
- 'change',
- 'checker_ip_range',
- 'health_check',
- 'hosted_zone',
- 'record_sets',
- 'reusable_delegation_set',
- ], required=True),
- change_id=dict(),
- hosted_zone_id=dict(),
- max_items=dict(),
- next_marker=dict(),
- delegation_set_id=dict(),
- start_record_name=dict(),
- type=dict(choices=[
- 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS'
- ]),
- dns_name=dict(),
- resource_id=dict(type='list', aliases=['resource_ids']),
- health_check_id=dict(),
- hosted_zone_method=dict(choices=[
- 'details',
- 'list',
- 'list_by_name',
- 'count',
- 'tags'
- ], default='list'),
- health_check_method=dict(choices=[
- 'list',
- 'details',
- 'status',
- 'failure_reason',
- 'count',
- 'tags',
- ], default='list'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[
- ['hosted_zone_method', 'health_check_method'],
- ],
- )
- if module._name == 'route53_facts':
- module.deprecate("The 'route53_facts' module has been renamed to 'route53_info'", version='2.13')
-
- # Validate Requirements
- if not (HAS_BOTO or HAS_BOTO3):
- module.fail_json(msg='json and boto/boto3 is required.')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- route53 = boto3_conn(module, conn_type='client', resource='route53', region=region, endpoint=ec2_url, **aws_connect_kwargs)
-
- invocations = {
- 'change': change_details,
- 'checker_ip_range': checker_ip_range_details,
- 'health_check': health_check_details,
- 'hosted_zone': hosted_zone_details,
- 'record_sets': record_sets_details,
- 'reusable_delegation_set': reusable_delegation_set_details,
- }
-
- results = dict(changed=False)
- try:
- results = invocations[module.params.get('query')](route53, module)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json(msg=to_native(e))
-
- module.exit_json(**results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/route53_zone.py b/lib/ansible/modules/cloud/amazon/route53_zone.py
deleted file mode 100644
index f3285da2c5..0000000000
--- a/lib/ansible/modules/cloud/amazon/route53_zone.py
+++ /dev/null
@@ -1,442 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
-module: route53_zone
-short_description: add or delete Route53 zones
-description:
- - Creates and deletes Route53 private and public zones.
-version_added: "2.0"
-requirements: [ boto3 ]
-options:
- zone:
- description:
- - "The DNS zone record (eg: foo.com.)"
- required: true
- type: str
- state:
- description:
- - Whether or not the zone should exist or not.
- default: present
- choices: [ "present", "absent" ]
- type: str
- vpc_id:
- description:
- - The VPC ID the zone should be a part of (if this is going to be a private zone).
- type: str
- vpc_region:
- description:
- - The VPC Region the zone should be a part of (if this is going to be a private zone).
- type: str
- comment:
- description:
- - Comment associated with the zone.
- default: ''
- type: str
- hosted_zone_id:
- description:
- - The unique zone identifier you want to delete or "all" if there are many zones with the same domain name.
- - Required if there are multiple zones identified with the above options.
- version_added: 2.4
- type: str
- delegation_set_id:
- description:
- - The reusable delegation set ID to be associated with the zone.
- - Note that you can't associate a reusable delegation set with a private hosted zone.
- version_added: 2.6
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-author: "Christopher Troup (@minichate)"
-'''
-
-EXAMPLES = '''
-- name: create a public zone
- route53_zone:
- zone: example.com
- comment: this is an example
-
-- name: delete a public zone
- route53_zone:
- zone: example.com
- state: absent
-
-- name: create a private zone
- route53_zone:
- zone: devel.example.com
- vpc_id: '{{ myvpc_id }}'
- vpc_region: us-west-2
- comment: developer domain
-
-- name: create a public zone associated with a specific reusable delegation set
- route53_zone:
- zone: example.com
- comment: reusable delegation set example
- delegation_set_id: A1BCDEF2GHIJKL
-'''
-
-RETURN = '''
-comment:
- description: optional hosted zone comment
- returned: when hosted zone exists
- type: str
- sample: "Private zone"
-name:
- description: hosted zone name
- returned: when hosted zone exists
- type: str
- sample: "private.local."
-private_zone:
- description: whether hosted zone is private or public
- returned: when hosted zone exists
- type: bool
- sample: true
-vpc_id:
- description: id of vpc attached to private hosted zone
- returned: for private hosted zone
- type: str
- sample: "vpc-1d36c84f"
-vpc_region:
- description: region of vpc attached to private hosted zone
- returned: for private hosted zone
- type: str
- sample: "eu-west-1"
-zone_id:
- description: hosted zone id
- returned: when hosted zone exists
- type: str
- sample: "Z6JQG9820BEFMW"
-delegation_set_id:
- description: id of the associated reusable delegation set
- returned: for public hosted zones, if they have been associated with a reusable delegation set
- type: str
- sample: "A1BCDEF2GHIJKL"
-'''
-
-import time
-from ansible.module_utils.aws.core import AnsibleAWSModule
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def find_zones(module, client, zone_in, private_zone):
- try:
- paginator = client.get_paginator('list_hosted_zones')
- results = paginator.paginate().build_full_result()
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not list current hosted zones")
- zones = []
- for r53zone in results['HostedZones']:
- if r53zone['Name'] != zone_in:
- continue
- # only save zone names that match the public/private setting
- if (r53zone['Config']['PrivateZone'] and private_zone) or \
- (not r53zone['Config']['PrivateZone'] and not private_zone):
- zones.append(r53zone)
-
- return zones
-
-
-def create(module, client, matching_zones):
- zone_in = module.params.get('zone').lower()
- vpc_id = module.params.get('vpc_id')
- vpc_region = module.params.get('vpc_region')
- comment = module.params.get('comment')
- delegation_set_id = module.params.get('delegation_set_id')
-
- if not zone_in.endswith('.'):
- zone_in += "."
-
- private_zone = bool(vpc_id and vpc_region)
-
- record = {
- 'private_zone': private_zone,
- 'vpc_id': vpc_id,
- 'vpc_region': vpc_region,
- 'comment': comment,
- 'name': zone_in,
- 'delegation_set_id': delegation_set_id,
- 'zone_id': None,
- }
-
- if private_zone:
- changed, result = create_or_update_private(module, client, matching_zones, record)
- else:
- changed, result = create_or_update_public(module, client, matching_zones, record)
-
- return changed, result
-
-
-def create_or_update_private(module, client, matching_zones, record):
- for z in matching_zones:
- try:
- result = client.get_hosted_zone(Id=z['Id']) # could be in different regions or have different VPCids
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id'])
- zone_details = result['HostedZone']
- vpc_details = result['VPCs']
- current_vpc_id = None
- current_vpc_region = None
- if isinstance(vpc_details, dict):
- if vpc_details['VPC']['VPCId'] == record['vpc_id']:
- current_vpc_id = vpc_details['VPC']['VPCId']
- current_vpc_region = vpc_details['VPC']['VPCRegion']
- else:
- if record['vpc_id'] in [v['VPCId'] for v in vpc_details]:
- current_vpc_id = record['vpc_id']
- if record['vpc_region'] in [v['VPCRegion'] for v in vpc_details]:
- current_vpc_region = record['vpc_region']
-
- if record['vpc_id'] == current_vpc_id and record['vpc_region'] == current_vpc_region:
- record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
- if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']:
- if not module.check_mode:
- try:
- client.update_hosted_zone_comment(Id=zone_details['Id'], Comment=record['comment'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id'])
- return True, record
- else:
- record['msg'] = "There is already a private hosted zone in the same region with the same VPC \
- you chose. Unable to create a new private hosted zone in the same name space."
- return False, record
-
- if not module.check_mode:
- try:
- result = client.create_hosted_zone(
- Name=record['name'],
- HostedZoneConfig={
- 'Comment': record['comment'] if record['comment'] is not None else "",
- 'PrivateZone': True,
- },
- VPC={
- 'VPCRegion': record['vpc_region'],
- 'VPCId': record['vpc_id'],
- },
- CallerReference="%s-%s" % (record['name'], time.time()),
- )
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not create hosted zone")
-
- hosted_zone = result['HostedZone']
- zone_id = hosted_zone['Id'].replace('/hostedzone/', '')
- record['zone_id'] = zone_id
-
- changed = True
- return changed, record
-
-
-def create_or_update_public(module, client, matching_zones, record):
- zone_details, zone_delegation_set_details = None, {}
- for matching_zone in matching_zones:
- try:
- zone = client.get_hosted_zone(Id=matching_zone['Id'])
- zone_details = zone['HostedZone']
- zone_delegation_set_details = zone.get('DelegationSet', {})
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % matching_zone['Id'])
- if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']:
- if not module.check_mode:
- try:
- client.update_hosted_zone_comment(
- Id=zone_details['Id'],
- Comment=record['comment']
- )
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id'])
- changed = True
- else:
- changed = False
- break
-
- if zone_details is None:
- if not module.check_mode:
- try:
- params = dict(
- Name=record['name'],
- HostedZoneConfig={
- 'Comment': record['comment'] if record['comment'] is not None else "",
- 'PrivateZone': False,
- },
- CallerReference="%s-%s" % (record['name'], time.time()),
- )
-
- if record.get('delegation_set_id') is not None:
- params['DelegationSetId'] = record['delegation_set_id']
-
- result = client.create_hosted_zone(**params)
- zone_details = result['HostedZone']
- zone_delegation_set_details = result.get('DelegationSet', {})
-
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not create hosted zone")
- changed = True
-
- if module.check_mode:
- if zone_details:
- record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
- else:
- record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '')
- record['name'] = zone_details['Name']
- record['delegation_set_id'] = zone_delegation_set_details.get('Id', '').replace('/delegationset/', '')
-
- return changed, record
-
-
-def delete_private(module, client, matching_zones, vpc_id, vpc_region):
- for z in matching_zones:
- try:
- result = client.get_hosted_zone(Id=z['Id'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id'])
- zone_details = result['HostedZone']
- vpc_details = result['VPCs']
- if isinstance(vpc_details, dict):
- if vpc_details['VPC']['VPCId'] == vpc_id and vpc_region == vpc_details['VPC']['VPCRegion']:
- if not module.check_mode:
- try:
- client.delete_hosted_zone(Id=z['Id'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
- return True, "Successfully deleted %s" % zone_details['Name']
- else:
- if vpc_id in [v['VPCId'] for v in vpc_details] and vpc_region in [v['VPCRegion'] for v in vpc_details]:
- if not module.check_mode:
- try:
- client.delete_hosted_zone(Id=z['Id'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
- return True, "Successfully deleted %s" % zone_details['Name']
-
- return False, "The vpc_id and the vpc_region do not match a private hosted zone."
-
-
-def delete_public(module, client, matching_zones):
- if len(matching_zones) > 1:
- changed = False
- msg = "There are multiple zones that match. Use hosted_zone_id to specify the correct zone."
- else:
- if not module.check_mode:
- try:
- client.delete_hosted_zone(Id=matching_zones[0]['Id'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not get delete hosted zone %s" % matching_zones[0]['Id'])
- changed = True
- msg = "Successfully deleted %s" % matching_zones[0]['Id']
- return changed, msg
-
-
-def delete_hosted_id(module, client, hosted_zone_id, matching_zones):
- if hosted_zone_id == "all":
- deleted = []
- for z in matching_zones:
- deleted.append(z['Id'])
- if not module.check_mode:
- try:
- client.delete_hosted_zone(Id=z['Id'])
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id'])
- changed = True
- msg = "Successfully deleted zones: %s" % deleted
- elif hosted_zone_id in [zo['Id'].replace('/hostedzone/', '') for zo in matching_zones]:
- if not module.check_mode:
- try:
- client.delete_hosted_zone(Id=hosted_zone_id)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Could not delete hosted zone %s" % hosted_zone_id)
- changed = True
- msg = "Successfully deleted zone: %s" % hosted_zone_id
- else:
- changed = False
- msg = "There is no zone to delete that matches hosted_zone_id %s." % hosted_zone_id
- return changed, msg
-
-
-def delete(module, client, matching_zones):
- zone_in = module.params.get('zone').lower()
- vpc_id = module.params.get('vpc_id')
- vpc_region = module.params.get('vpc_region')
- hosted_zone_id = module.params.get('hosted_zone_id')
-
- if not zone_in.endswith('.'):
- zone_in += "."
-
- private_zone = bool(vpc_id and vpc_region)
-
- if zone_in in [z['Name'] for z in matching_zones]:
- if hosted_zone_id:
- changed, result = delete_hosted_id(module, client, hosted_zone_id, matching_zones)
- else:
- if private_zone:
- changed, result = delete_private(module, client, matching_zones, vpc_id, vpc_region)
- else:
- changed, result = delete_public(module, client, matching_zones)
- else:
- changed = False
- result = "No zone to delete."
-
- return changed, result
-
-
-def main():
- argument_spec = dict(
- zone=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- vpc_id=dict(default=None),
- vpc_region=dict(default=None),
- comment=dict(default=''),
- hosted_zone_id=dict(),
- delegation_set_id=dict(),
- )
-
- mutually_exclusive = [
- ['delegation_set_id', 'vpc_id'],
- ['delegation_set_id', 'vpc_region'],
- ]
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- mutually_exclusive=mutually_exclusive,
- supports_check_mode=True,
- )
-
- zone_in = module.params.get('zone').lower()
- state = module.params.get('state').lower()
- vpc_id = module.params.get('vpc_id')
- vpc_region = module.params.get('vpc_region')
-
- if not zone_in.endswith('.'):
- zone_in += "."
-
- private_zone = bool(vpc_id and vpc_region)
-
- client = module.client('route53')
-
- zones = find_zones(module, client, zone_in, private_zone)
- if state == 'present':
- changed, result = create(module, client, matching_zones=zones)
- elif state == 'absent':
- changed, result = delete(module, client, matching_zones=zones)
-
- if isinstance(result, dict):
- module.exit_json(changed=changed, result=result, **result)
- else:
- module.exit_json(changed=changed, result=result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/s3_bucket_notification.py b/lib/ansible/modules/cloud/amazon/s3_bucket_notification.py
deleted file mode 100644
index 1955892b9f..0000000000
--- a/lib/ansible/modules/cloud/amazon/s3_bucket_notification.py
+++ /dev/null
@@ -1,265 +0,0 @@
-#!/usr/bin/python
-# (c) 2019, XLAB d.o.o <www.xlab.si>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: s3_bucket_notification
-short_description: Creates, updates or deletes S3 Bucket notification for lambda
-description:
- - This module allows the management of AWS Lambda function bucket event mappings via the
- Ansible framework. Use module M(lambda) to manage the lambda function itself, M(lambda_alias)
- to manage function aliases and M(lambda_policy) to modify lambda permissions.
-notes:
- - This module heavily depends on M(lambda_policy) as you need to allow C(lambda:InvokeFunction)
- permission for your lambda function.
-version_added: "2.9"
-
-author:
- - XLAB d.o.o. (@xlab-si)
- - Aljaz Kosir (@aljazkosir)
- - Miha Plesko (@miha-plesko)
-options:
- event_name:
- description:
- - Unique name for event notification on bucket.
- required: true
- type: str
- lambda_function_arn:
- description:
- - The ARN of the lambda function.
- aliases: ['function_arn']
- type: str
- bucket_name:
- description:
- - S3 bucket name.
- required: true
- type: str
- state:
- description:
- - Describes the desired state.
- default: "present"
- choices: ["present", "absent"]
- type: str
- lambda_alias:
- description:
- - Name of the Lambda function alias.
- - Mutually exclusive with I(lambda_version).
- type: str
- lambda_version:
- description:
- - Version of the Lambda function.
- - Mutually exclusive with I(lambda_alias).
- type: int
- events:
- description:
- - Events that you want to be triggering notifications. You can select multiple events to send
- to the same destination, you can set up different events to send to different destinations,
- and you can set up a prefix or suffix for an event. However, for each bucket,
- individual events cannot have multiple configurations with overlapping prefixes or
- suffixes that could match the same object key.
- - Required when I(state=present).
- choices: ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post',
- 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload',
- 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete',
- 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post',
- 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject']
- type: list
- elements: str
- prefix:
- description:
- - Optional prefix to limit the notifications to objects with keys that start with matching
- characters.
- type: str
- suffix:
- description:
- - Optional suffix to limit the notifications to objects with keys that end with matching
- characters.
- type: str
-requirements:
- - boto3
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
----
-# Example that creates a lambda event notification for a bucket
-- hosts: localhost
- gather_facts: no
- tasks:
- - name: Process jpg image
- s3_bucket_notification:
- state: present
- event_name: on_file_add_or_remove
- bucket_name: test-bucket
- function_name: arn:aws:lambda:us-east-2:526810320200:function:test-lambda
- events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"]
- prefix: images/
- suffix: .jpg
-'''
-
-RETURN = '''
-notification_configuration:
- description: list of currently applied notifications
- returned: success
- type: list
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import ClientError, BotoCoreError
-except ImportError:
- pass # will be protected by AnsibleAWSModule
-
-
-class AmazonBucket:
- def __init__(self, client, bucket_name):
- self.client = client
- self.bucket_name = bucket_name
- self._full_config_cache = None
-
- def full_config(self):
- if self._full_config_cache is None:
- self._full_config_cache = [Config.from_api(cfg) for cfg in
- self.client.get_bucket_notification_configuration(
- Bucket=self.bucket_name).get(
- 'LambdaFunctionConfigurations', list())]
- return self._full_config_cache
-
- def current_config(self, config_name):
- for config in self.full_config():
- if config.raw['Id'] == config_name:
- return config
-
- def apply_config(self, desired):
- configs = [cfg.raw for cfg in self.full_config() if cfg.name != desired.raw['Id']]
- configs.append(desired.raw)
- self._upload_bucket_config(configs)
- return configs
-
- def delete_config(self, desired):
- configs = [cfg.raw for cfg in self.full_config() if cfg.name != desired.raw['Id']]
- self._upload_bucket_config(configs)
- return configs
-
- def _upload_bucket_config(self, config):
- self.client.put_bucket_notification_configuration(
- Bucket=self.bucket_name,
- NotificationConfiguration={
- 'LambdaFunctionConfigurations': config
- })
-
-
-class Config:
- def __init__(self, content):
- self._content = content
- self.name = content['Id']
-
- @property
- def raw(self):
- return self._content
-
- def __eq__(self, other):
- if other:
- return self.raw == other.raw
- return False
-
- @classmethod
- def from_params(cls, **params):
- function_arn = params['lambda_function_arn']
-
- qualifier = None
- if params['lambda_version'] > 0:
- qualifier = str(params['lambda_version'])
- elif params['lambda_alias']:
- qualifier = str(params['lambda_alias'])
- if qualifier:
- params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
-
- return cls({
- 'Id': params['event_name'],
- 'LambdaFunctionArn': params['lambda_function_arn'],
- 'Events': sorted(params['events']),
- 'Filter': {
- 'Key': {
- 'FilterRules': [{
- 'Name': 'Prefix',
- 'Value': params['prefix']
- }, {
- 'Name': 'Suffix',
- 'Value': params['suffix']
- }]
- }
- }
- })
-
- @classmethod
- def from_api(cls, config):
- return cls(config)
-
-
-def main():
- event_types = ['s3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post',
- 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload',
- 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete',
- 's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post',
- 's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject']
- argument_spec = dict(
- state=dict(default='present', choices=['present', 'absent']),
- event_name=dict(required=True),
- lambda_function_arn=dict(aliases=['function_arn']),
- bucket_name=dict(required=True),
- events=dict(type='list', default=[], choices=event_types),
- prefix=dict(default=''),
- suffix=dict(default=''),
- lambda_alias=dict(),
- lambda_version=dict(type='int', default=0),
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[['lambda_alias', 'lambda_version']],
- required_if=[['state', 'present', ['events']]]
- )
-
- bucket = AmazonBucket(module.client('s3'), module.params['bucket_name'])
- current = bucket.current_config(module.params['event_name'])
- desired = Config.from_params(**module.params)
- notification_configuration = [cfg.raw for cfg in bucket.full_config()]
-
- state = module.params['state']
- try:
- if (state == 'present' and current == desired) or (state == 'absent' and not current):
- changed = False
- elif module.check_mode:
- changed = True
- elif state == 'present':
- changed = True
- notification_configuration = bucket.apply_config(desired)
- elif state == 'absent':
- changed = True
- notification_configuration = bucket.delete_config(desired)
- except (ClientError, BotoCoreError) as e:
- module.fail_json(msg='{0}'.format(e))
-
- module.exit_json(**dict(changed=changed,
- notification_configuration=[camel_dict_to_snake_dict(cfg) for cfg in
- notification_configuration]))
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/s3_lifecycle.py b/lib/ansible/modules/cloud/amazon/s3_lifecycle.py
deleted file mode 100644
index 73f89c95e9..0000000000
--- a/lib/ansible/modules/cloud/amazon/s3_lifecycle.py
+++ /dev/null
@@ -1,520 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: s3_lifecycle
-short_description: Manage s3 bucket lifecycle rules in AWS
-description:
- - Manage s3 bucket lifecycle rules in AWS
-version_added: "2.0"
-author: "Rob White (@wimnat)"
-notes:
- - If specifying expiration time as days then transition time must also be specified in days
- - If specifying expiration time as a date then transition time must also be specified as a date
-requirements:
- - python-dateutil
-options:
- name:
- description:
- - "Name of the s3 bucket"
- required: true
- type: str
- expiration_date:
- description:
- - >
- Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must
- be midnight and a GMT timezone must be specified.
- type: str
- expiration_days:
- description:
- - "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer."
- type: int
- prefix:
- description:
- - "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket."
- type: str
- purge_transitions:
- description:
- - >
- "Whether to replace all the current transition(s) with the new transition(s). When false, the provided transition(s)
- will be added, replacing transitions with the same storage_class. When true, existing transitions will be removed and
- replaced with the new transition(s)
- default: true
- type: bool
- version_added: 2.6
- noncurrent_version_expiration_days:
- description:
- - 'Delete noncurrent versions this many days after they become noncurrent'
- required: false
- version_added: 2.6
- type: int
- noncurrent_version_storage_class:
- description:
- - 'Transition noncurrent versions to this storage class'
- default: glacier
- choices: ['glacier', 'onezone_ia', 'standard_ia']
- required: false
- version_added: 2.6
- type: str
- noncurrent_version_transition_days:
- description:
- - 'Transition noncurrent versions this many days after they become noncurrent'
- required: false
- version_added: 2.6
- type: int
- noncurrent_version_transitions:
- description:
- - >
- A list of transition behaviors to be applied to noncurrent versions for the rule. Each storage class may be used only once. Each transition
- behavior contains these elements
- I(transition_days)
- I(storage_class)
- version_added: 2.6
- type: list
- rule_id:
- description:
- - "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided."
- type: str
- state:
- description:
- - "Create or remove the lifecycle rule"
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- status:
- description:
- - "If 'enabled', the rule is currently being applied. If 'disabled', the rule is not currently being applied."
- default: enabled
- choices: [ 'enabled', 'disabled' ]
- type: str
- storage_class:
- description:
- - "The storage class to transition to. Currently there are two supported values - 'glacier', 'onezone_ia', or 'standard_ia'."
- - "The 'standard_ia' class is only being available from Ansible version 2.2."
- default: glacier
- choices: [ 'glacier', 'onezone_ia', 'standard_ia']
- type: str
- transition_date:
- description:
- - >
- Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class.
- The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified,
- this parameter is required."
- type: str
- transition_days:
- description:
- - "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required."
- type: int
- transitions:
- description:
- - A list of transition behaviors to be applied to the rule. Each storage class may be used only once. Each transition
- behavior may contain these elements
- I(transition_days)
- I(transition_date)
- I(storage_class)
- version_added: 2.6
- type: list
- requester_pays:
- description:
- - The I(requester_pays) option does nothing and will be removed in Ansible 2.14.
- type: bool
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
-- s3_lifecycle:
- name: mybucket
- expiration_days: 30
- prefix: logs/
- status: enabled
- state: present
-
-# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
-- s3_lifecycle:
- name: mybucket
- transition_days: 7
- expiration_days: 90
- prefix: logs/
- status: enabled
- state: present
-
-# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030.
-# Note that midnight GMT must be specified.
-# Be sure to quote your date strings
-- s3_lifecycle:
- name: mybucket
- transition_date: "2020-12-30T00:00:00.000Z"
- expiration_date: "2030-12-30T00:00:00.000Z"
- prefix: logs/
- status: enabled
- state: present
-
-# Disable the rule created above
-- s3_lifecycle:
- name: mybucket
- prefix: logs/
- status: disabled
- state: present
-
-# Delete the lifecycle rule created above
-- s3_lifecycle:
- name: mybucket
- prefix: logs/
- state: absent
-
-# Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class.
-- s3_lifecycle:
- name: mybucket
- prefix: backups/
- storage_class: standard_ia
- transition_days: 31
- state: present
- status: enabled
-
-# Configure a lifecycle rule to transition files to infrequent access after 30 days and glacier after 90
-- s3_lifecycle:
- name: mybucket
- prefix: logs/
- state: present
- status: enabled
- transitions:
- - transition_days: 30
- storage_class: standard_ia
- - transition_days: 90
- storage_class: glacier
-'''
-
-from copy import deepcopy
-import datetime
-
-try:
- import dateutil.parser
- HAS_DATEUTIL = True
-except ImportError:
- HAS_DATEUTIL = False
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # handled by AnsibleAwsModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-
-
-def create_lifecycle_rule(client, module):
-
- name = module.params.get("name")
- expiration_date = module.params.get("expiration_date")
- expiration_days = module.params.get("expiration_days")
- noncurrent_version_expiration_days = module.params.get("noncurrent_version_expiration_days")
- noncurrent_version_transition_days = module.params.get("noncurrent_version_transition_days")
- noncurrent_version_transitions = module.params.get("noncurrent_version_transitions")
- noncurrent_version_storage_class = module.params.get("noncurrent_version_storage_class")
- prefix = module.params.get("prefix") or ""
- rule_id = module.params.get("rule_id")
- status = module.params.get("status")
- storage_class = module.params.get("storage_class")
- transition_date = module.params.get("transition_date")
- transition_days = module.params.get("transition_days")
- transitions = module.params.get("transitions")
- purge_transitions = module.params.get("purge_transitions")
- changed = False
-
- # Get the bucket's current lifecycle rules
- try:
- current_lifecycle = client.get_bucket_lifecycle_configuration(Bucket=name)
- current_lifecycle_rules = current_lifecycle['Rules']
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchLifecycleConfiguration':
- current_lifecycle_rules = []
- else:
- module.fail_json_aws(e)
- except BotoCoreError as e:
- module.fail_json_aws(e)
-
- rule = dict(Filter=dict(Prefix=prefix), Status=status.title())
- if rule_id is not None:
- rule['ID'] = rule_id
- # Create expiration
- if expiration_days is not None:
- rule['Expiration'] = dict(Days=expiration_days)
- elif expiration_date is not None:
- rule['Expiration'] = dict(Date=expiration_date)
-
- if noncurrent_version_expiration_days is not None:
- rule['NoncurrentVersionExpiration'] = dict(NoncurrentDays=noncurrent_version_expiration_days)
-
- if transition_days is not None:
- rule['Transitions'] = [dict(Days=transition_days, StorageClass=storage_class.upper()), ]
-
- elif transition_date is not None:
- rule['Transitions'] = [dict(Date=transition_date, StorageClass=storage_class.upper()), ]
-
- if transitions is not None:
- if not rule.get('Transitions'):
- rule['Transitions'] = []
- for transition in transitions:
- t_out = dict()
- if transition.get('transition_date'):
- t_out['Date'] = transition['transition_date']
- elif transition.get('transition_days'):
- t_out['Days'] = transition['transition_days']
- if transition.get('storage_class'):
- t_out['StorageClass'] = transition['storage_class'].upper()
- rule['Transitions'].append(t_out)
-
- if noncurrent_version_transition_days is not None:
- rule['NoncurrentVersionTransitions'] = [dict(NoncurrentDays=noncurrent_version_transition_days,
- StorageClass=noncurrent_version_storage_class.upper()), ]
-
- if noncurrent_version_transitions is not None:
- if not rule.get('NoncurrentVersionTransitions'):
- rule['NoncurrentVersionTransitions'] = []
- for noncurrent_version_transition in noncurrent_version_transitions:
- t_out = dict()
- t_out['NoncurrentDays'] = noncurrent_version_transition['transition_days']
- if noncurrent_version_transition.get('storage_class'):
- t_out['StorageClass'] = noncurrent_version_transition['storage_class'].upper()
- rule['NoncurrentVersionTransitions'].append(t_out)
-
- lifecycle_configuration = dict(Rules=[])
- appended = False
- # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
- if current_lifecycle_rules:
- # If rule ID exists, use that for comparison otherwise compare based on prefix
- for existing_rule in current_lifecycle_rules:
- if rule.get('ID') == existing_rule.get('ID') and rule['Filter']['Prefix'] != existing_rule.get('Filter', {}).get('Prefix', ''):
- existing_rule.pop('ID')
- elif rule_id is None and rule['Filter']['Prefix'] == existing_rule.get('Filter', {}).get('Prefix', ''):
- existing_rule.pop('ID')
- if rule.get('ID') == existing_rule.get('ID'):
- changed_, appended_ = update_or_append_rule(rule, existing_rule, purge_transitions, lifecycle_configuration)
- changed = changed_ or changed
- appended = appended_ or appended
- else:
- lifecycle_configuration['Rules'].append(existing_rule)
-
- # If nothing appended then append now as the rule must not exist
- if not appended:
- lifecycle_configuration['Rules'].append(rule)
- changed = True
- else:
- lifecycle_configuration['Rules'].append(rule)
- changed = True
-
- # Write lifecycle to bucket
- try:
- client.put_bucket_lifecycle_configuration(Bucket=name, LifecycleConfiguration=lifecycle_configuration)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e)
-
- module.exit_json(changed=changed)
-
-
-def update_or_append_rule(new_rule, existing_rule, purge_transitions, lifecycle_obj):
- changed = False
- if existing_rule['Status'] != new_rule['Status']:
- if not new_rule.get('Transitions') and existing_rule.get('Transitions'):
- new_rule['Transitions'] = existing_rule['Transitions']
- if not new_rule.get('Expiration') and existing_rule.get('Expiration'):
- new_rule['Expiration'] = existing_rule['Expiration']
- if not new_rule.get('NoncurrentVersionExpiration') and existing_rule.get('NoncurrentVersionExpiration'):
- new_rule['NoncurrentVersionExpiration'] = existing_rule['NoncurrentVersionExpiration']
- lifecycle_obj['Rules'].append(new_rule)
- changed = True
- appended = True
- else:
- if not purge_transitions:
- merge_transitions(new_rule, existing_rule)
- if compare_rule(new_rule, existing_rule, purge_transitions):
- lifecycle_obj['Rules'].append(new_rule)
- appended = True
- else:
- lifecycle_obj['Rules'].append(new_rule)
- changed = True
- appended = True
- return changed, appended
-
-
-def compare_rule(rule_a, rule_b, purge_transitions):
-
- # Copy objects
- rule1 = deepcopy(rule_a)
- rule2 = deepcopy(rule_b)
-
- if purge_transitions:
- return rule1 == rule2
- else:
- transitions1 = rule1.pop('Transitions', [])
- transitions2 = rule2.pop('Transitions', [])
- noncurrent_transtions1 = rule1.pop('NoncurrentVersionTransitions', [])
- noncurrent_transtions2 = rule2.pop('NoncurrentVersionTransitions', [])
- if rule1 != rule2:
- return False
- for transition in transitions1:
- if transition not in transitions2:
- return False
- for transition in noncurrent_transtions1:
- if transition not in noncurrent_transtions2:
- return False
- return True
-
-
-def merge_transitions(updated_rule, updating_rule):
- # because of the legal s3 transitions, we know only one can exist for each storage class.
- # So, our strategy is build some dicts, keyed on storage class and add the storage class transitions that are only
- # in updating_rule to updated_rule
- updated_transitions = {}
- updating_transitions = {}
- for transition in updated_rule.get('Transitions', []):
- updated_transitions[transition['StorageClass']] = transition
- for transition in updating_rule.get('Transitions', []):
- updating_transitions[transition['StorageClass']] = transition
- for storage_class, transition in updating_transitions.items():
- if updated_transitions.get(storage_class) is None:
- updated_rule['Transitions'].append(transition)
-
-
-def destroy_lifecycle_rule(client, module):
-
- name = module.params.get("name")
- prefix = module.params.get("prefix")
- rule_id = module.params.get("rule_id")
- changed = False
-
- if prefix is None:
- prefix = ""
-
- # Get the bucket's current lifecycle rules
- try:
- current_lifecycle_rules = client.get_bucket_lifecycle_configuration(Bucket=name)['Rules']
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchLifecycleConfiguration':
- current_lifecycle_rules = []
- else:
- module.fail_json_aws(e)
- except BotoCoreError as e:
- module.fail_json_aws(e)
-
- # Create lifecycle
- lifecycle_obj = dict(Rules=[])
-
- # Check if rule exists
- # If an ID exists, use that otherwise compare based on prefix
- if rule_id is not None:
- for existing_rule in current_lifecycle_rules:
- if rule_id == existing_rule['ID']:
- # We're not keeping the rule (i.e. deleting) so mark as changed
- changed = True
- else:
- lifecycle_obj['Rules'].append(existing_rule)
- else:
- for existing_rule in current_lifecycle_rules:
- if prefix == existing_rule['Filter']['Prefix']:
- # We're not keeping the rule (i.e. deleting) so mark as changed
- changed = True
- else:
- lifecycle_obj['Rules'].append(existing_rule)
-
- # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
- try:
- if lifecycle_obj['Rules']:
- client.put_bucket_lifecycle_configuration(Bucket=name, LifecycleConfiguration=lifecycle_obj)
- elif current_lifecycle_rules:
- changed = True
- client.delete_bucket_lifecycle(Bucket=name)
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e)
- module.exit_json(changed=changed)
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True, type='str'),
- expiration_days=dict(type='int'),
- expiration_date=dict(),
- noncurrent_version_expiration_days=dict(type='int'),
- noncurrent_version_storage_class=dict(default='glacier', type='str', choices=['glacier', 'onezone_ia', 'standard_ia']),
- noncurrent_version_transition_days=dict(type='int'),
- noncurrent_version_transitions=dict(type='list'),
- prefix=dict(),
- requester_pays=dict(type='bool', removed_in_version='2.14'),
- rule_id=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- status=dict(default='enabled', choices=['enabled', 'disabled']),
- storage_class=dict(default='glacier', type='str', choices=['glacier', 'onezone_ia', 'standard_ia']),
- transition_days=dict(type='int'),
- transition_date=dict(),
- transitions=dict(type='list'),
- purge_transitions=dict(default='yes', type='bool')
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- mutually_exclusive=[
- ['expiration_days', 'expiration_date'],
- ['expiration_days', 'transition_date'],
- ['transition_days', 'transition_date'],
- ['transition_days', 'expiration_date'],
- ['transition_days', 'transitions'],
- ['transition_date', 'transitions'],
- ['noncurrent_version_transition_days', 'noncurrent_version_transitions'],
- ],)
-
- if not HAS_DATEUTIL:
- module.fail_json(msg='dateutil required for this module')
-
- client = module.client('s3')
-
- expiration_date = module.params.get("expiration_date")
- transition_date = module.params.get("transition_date")
- state = module.params.get("state")
-
- if state == 'present' and module.params["status"] == "enabled": # allow deleting/disabling a rule by id/prefix
-
- required_when_present = ('expiration_date', 'expiration_days', 'transition_date',
- 'transition_days', 'transitions', 'noncurrent_version_expiration_days',
- 'noncurrent_version_transition_days',
- 'noncurrent_version_transitions')
- for param in required_when_present:
- if module.params.get(param):
- break
- else:
- msg = "one of the following is required when 'state' is 'present': %s" % ', '.join(required_when_present)
- module.fail_json(msg=msg)
- # If expiration_date set, check string is valid
- if expiration_date is not None:
- try:
- datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z")
- except ValueError as e:
- module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
-
- if transition_date is not None:
- try:
- datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z")
- except ValueError as e:
- module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
-
- if state == 'present':
- create_lifecycle_rule(client, module)
- elif state == 'absent':
- destroy_lifecycle_rule(client, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/s3_logging.py b/lib/ansible/modules/cloud/amazon/s3_logging.py
deleted file mode 100644
index d58a58f40c..0000000000
--- a/lib/ansible/modules/cloud/amazon/s3_logging.py
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: s3_logging
-short_description: Manage logging facility of an s3 bucket in AWS
-description:
- - Manage logging facility of an s3 bucket in AWS
-version_added: "2.0"
-author: Rob White (@wimnat)
-options:
- name:
- description:
- - "Name of the s3 bucket."
- required: true
- type: str
- state:
- description:
- - "Enable or disable logging."
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- target_bucket:
- description:
- - "The bucket to log to. Required when state=present."
- type: str
- target_prefix:
- description:
- - "The prefix that should be prepended to the generated log files written to the target_bucket."
- default: ""
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
- s3_logging:
- name: mywebsite.com
- target_bucket: mylogs
- target_prefix: logs/mywebsite.com
- state: present
-
-- name: Remove logging on an s3 bucket
- s3_logging:
- name: mywebsite.com
- state: absent
-
-'''
-
-try:
- import boto.ec2
- from boto.s3.connection import OrdinaryCallingFormat, Location
- from boto.exception import S3ResponseError
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info
-
-
-def compare_bucket_logging(bucket, target_bucket, target_prefix):
-
- bucket_log_obj = bucket.get_logging_status()
- if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix:
- return False
- else:
- return True
-
-
-def enable_bucket_logging(connection, module):
-
- bucket_name = module.params.get("name")
- target_bucket = module.params.get("target_bucket")
- target_prefix = module.params.get("target_prefix")
- changed = False
-
- try:
- bucket = connection.get_bucket(bucket_name)
- except S3ResponseError as e:
- module.fail_json(msg=e.message)
-
- try:
- if not compare_bucket_logging(bucket, target_bucket, target_prefix):
- # Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket
- try:
- target_bucket_obj = connection.get_bucket(target_bucket)
- except S3ResponseError as e:
- if e.status == 301:
- module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged")
- else:
- module.fail_json(msg=e.message)
- target_bucket_obj.set_as_logging_target()
-
- bucket.enable_logging(target_bucket, target_prefix)
- changed = True
-
- except S3ResponseError as e:
- module.fail_json(msg=e.message)
-
- module.exit_json(changed=changed)
-
-
-def disable_bucket_logging(connection, module):
-
- bucket_name = module.params.get("name")
- changed = False
-
- try:
- bucket = connection.get_bucket(bucket_name)
- if not compare_bucket_logging(bucket, None, None):
- bucket.disable_logging()
- changed = True
- except S3ResponseError as e:
- module.fail_json(msg=e.message)
-
- module.exit_json(changed=changed)
-
-
-def main():
-
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- name=dict(required=True),
- target_bucket=dict(required=False, default=None),
- target_prefix=dict(required=False, default=""),
- state=dict(required=False, default='present', choices=['present', 'absent'])
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec)
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module)
-
- if region in ('us-east-1', '', None):
- # S3ism for the US Standard region
- location = Location.DEFAULT
- else:
- # Boto uses symbolic names for locations but region strings will
- # actually work fine for everything except us-east-1 (US Standard)
- location = region
- try:
- connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
- # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
- if connection is None:
- connection = boto.connect_s3(**aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
- module.fail_json(msg=str(e))
-
- state = module.params.get("state")
-
- if state == 'present':
- enable_bucket_logging(connection, module)
- elif state == 'absent':
- disable_bucket_logging(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/s3_sync.py b/lib/ansible/modules/cloud/amazon/s3_sync.py
deleted file mode 100644
index faf2617397..0000000000
--- a/lib/ansible/modules/cloud/amazon/s3_sync.py
+++ /dev/null
@@ -1,567 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: s3_sync
-short_description: Efficiently upload multiple files to S3
-description:
- - The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing,
- inclusions/exclusions, mime types, expiration mapping, recursion, cache control and smart directory mapping.
-version_added: "2.3"
-options:
- mode:
- description:
- - sync direction.
- default: 'push'
- choices: [ 'push' ]
- type: str
- file_change_strategy:
- description:
- - Difference determination method to allow changes-only syncing. Unlike rsync, files are not patched- they are fully skipped or fully uploaded.
- - date_size will upload if file sizes don't match or if local file modified date is newer than s3's version
- - checksum will compare etag values based on s3's implementation of chunked md5s.
- - force will always upload all files.
- required: false
- default: 'date_size'
- choices: [ 'force', 'checksum', 'date_size' ]
- type: str
- bucket:
- description:
- - Bucket name.
- required: true
- type: str
- key_prefix:
- description:
- - In addition to file path, prepend s3 path with this prefix. Module will add slash at end of prefix if necessary.
- required: false
- type: str
- file_root:
- description:
- - File/directory path for synchronization. This is a local path.
- - This root path is scrubbed from the key name, so subdirectories will remain as keys.
- required: true
- type: path
- permission:
- description:
- - Canned ACL to apply to synced files.
- - Changing this ACL only changes newly synced files, it does not trigger a full reupload.
- required: false
- choices:
- - 'private'
- - 'public-read'
- - 'public-read-write'
- - 'authenticated-read'
- - 'aws-exec-read'
- - 'bucket-owner-read'
- - 'bucket-owner-full-control'
- type: str
- mime_map:
- description:
- - >
- Dict entry from extension to MIME type. This will override any default/sniffed MIME type.
- For example C({".txt": "application/text", ".yml": "application/text"})
- required: false
- type: dict
- include:
- description:
- - Shell pattern-style file matching.
- - Used before exclude to determine eligible files (for instance, only "*.gif")
- - For multiple patterns, comma-separate them.
- required: false
- default: "*"
- type: str
- exclude:
- description:
- - Shell pattern-style file matching.
- - Used after include to remove files (for instance, skip "*.txt")
- - For multiple patterns, comma-separate them.
- required: false
- default: ".*"
- type: str
- cache_control:
- description:
- - Cache-Control header set on uploaded objects.
- - Directives are separated by commas.
- required: false
- version_added: "2.4"
- type: str
- delete:
- description:
- - Remove remote files that exist in bucket but are not present in the file root.
- required: false
- default: no
- version_added: "2.4"
- type: bool
- retries:
- description:
- - The I(retries) option does nothing and will be removed in Ansible 2.14.
- type: str
-
-requirements:
- - boto3 >= 1.4.4
- - botocore
- - python-dateutil
-
-author: Ted Timmons (@tedder)
-extends_documentation_fragment:
-- aws
-- ec2
-'''
-
-EXAMPLES = '''
-- name: basic upload
- s3_sync:
- bucket: tedder
- file_root: roles/s3/files/
-
-- name: all the options
- s3_sync:
- bucket: tedder
- file_root: roles/s3/files
- mime_map:
- .yml: application/text
- .json: application/text
- key_prefix: config_files/web
- file_change_strategy: force
- permission: public-read
- cache_control: "public, max-age=31536000"
- include: "*"
- exclude: "*.txt,.*"
-'''
-
-RETURN = '''
-filelist_initial:
- description: file listing (dicts) from initial globbing
- returned: always
- type: list
- sample: [{
- "bytes": 151,
- "chopped_path": "policy.json",
- "fullpath": "roles/cf/files/policy.json",
- "modified_epoch": 1477416706
- }]
-filelist_local_etag:
- description: file listing (dicts) including calculated local etag
- returned: always
- type: list
- sample: [{
- "bytes": 151,
- "chopped_path": "policy.json",
- "fullpath": "roles/cf/files/policy.json",
- "mime_type": "application/json",
- "modified_epoch": 1477416706,
- "s3_path": "s3sync/policy.json"
- }]
-filelist_s3:
- description: file listing (dicts) including information about previously-uploaded versions
- returned: always
- type: list
- sample: [{
- "bytes": 151,
- "chopped_path": "policy.json",
- "fullpath": "roles/cf/files/policy.json",
- "mime_type": "application/json",
- "modified_epoch": 1477416706,
- "s3_path": "s3sync/policy.json"
- }]
-filelist_typed:
- description: file listing (dicts) with calculated or overridden mime types
- returned: always
- type: list
- sample: [{
- "bytes": 151,
- "chopped_path": "policy.json",
- "fullpath": "roles/cf/files/policy.json",
- "mime_type": "application/json",
- "modified_epoch": 1477416706
- }]
-filelist_actionable:
- description: file listing (dicts) of files that will be uploaded after the strategy decision
- returned: always
- type: list
- sample: [{
- "bytes": 151,
- "chopped_path": "policy.json",
- "fullpath": "roles/cf/files/policy.json",
- "mime_type": "application/json",
- "modified_epoch": 1477931256,
- "s3_path": "s3sync/policy.json",
- "whysize": "151 / 151",
- "whytime": "1477931256 / 1477929260"
- }]
-uploaded:
- description: file listing (dicts) of files that were actually uploaded
- returned: always
- type: list
- sample: [{
- "bytes": 151,
- "chopped_path": "policy.json",
- "fullpath": "roles/cf/files/policy.json",
- "s3_path": "s3sync/policy.json",
- "whysize": "151 / 151",
- "whytime": "1477931637 / 1477931489"
- }]
-
-'''
-
-import datetime
-import fnmatch
-import hashlib
-import mimetypes
-import os
-import stat as osstat # os.stat constants
-import traceback
-
-# import module snippets
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn, get_aws_connection_info, HAS_BOTO3, boto_exception
-from ansible.module_utils._text import to_text
-
-try:
- from dateutil import tz
- HAS_DATEUTIL = True
-except ImportError:
- HAS_DATEUTIL = False
-
-try:
- import botocore
-except ImportError:
- # Handled by imported HAS_BOTO3
- pass
-
-
-# the following function, calculate_multipart_etag, is from tlastowka
-# on github and is used under its (compatible) GPL license. So this
-# license applies to the following function.
-# source: https://github.com/tlastowka/calculate_multipart_etag/blob/master/calculate_multipart_etag.py
-#
-# calculate_multipart_etag Copyright (C) 2015
-# Tony Lastowka <tlastowka at gmail dot com>
-# https://github.com/tlastowka
-#
-#
-# calculate_multipart_etag is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# calculate_multipart_etag is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with calculate_multipart_etag. If not, see <http://www.gnu.org/licenses/>.
-
-DEFAULT_CHUNK_SIZE = 5 * 1024 * 1024
-
-
-def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE):
- """
- calculates a multipart upload etag for amazon s3
-
- Arguments:
-
- source_path -- The file to calculate the etag for
- chunk_size -- The chunk size to calculate for.
- """
-
- md5s = []
-
- with open(source_path, 'rb') as fp:
- while True:
-
- data = fp.read(chunk_size)
-
- if not data:
- break
- md5s.append(hashlib.md5(data))
-
- if len(md5s) == 1:
- new_etag = '"{0}"'.format(md5s[0].hexdigest())
- else: # > 1
- digests = b"".join(m.digest() for m in md5s)
-
- new_md5 = hashlib.md5(digests)
- new_etag = '"{0}-{1}"'.format(new_md5.hexdigest(), len(md5s))
-
- return new_etag
-
-
-def gather_files(fileroot, include=None, exclude=None):
- ret = []
- for (dirpath, dirnames, filenames) in os.walk(fileroot):
- for fn in filenames:
- fullpath = os.path.join(dirpath, fn)
- # include/exclude
- if include:
- found = False
- for x in include.split(','):
- if fnmatch.fnmatch(fn, x):
- found = True
- if not found:
- # not on the include list, so we don't want it.
- continue
-
- if exclude:
- found = False
- for x in exclude.split(','):
- if fnmatch.fnmatch(fn, x):
- found = True
- if found:
- # skip it, even if previously included.
- continue
-
- chopped_path = os.path.relpath(fullpath, start=fileroot)
- fstat = os.stat(fullpath)
- f_size = fstat[osstat.ST_SIZE]
- f_modified_epoch = fstat[osstat.ST_MTIME]
- ret.append({
- 'fullpath': fullpath,
- 'chopped_path': chopped_path,
- 'modified_epoch': f_modified_epoch,
- 'bytes': f_size,
- })
- # dirpath = path *to* the directory
- # dirnames = subdirs *in* our directory
- # filenames
- return ret
-
-
-def calculate_s3_path(filelist, key_prefix=''):
- ret = []
- for fileentry in filelist:
- # don't modify the input dict
- retentry = fileentry.copy()
- retentry['s3_path'] = os.path.join(key_prefix, fileentry['chopped_path'])
- ret.append(retentry)
- return ret
-
-
-def calculate_local_etag(filelist, key_prefix=''):
- '''Really, "calculate md5", but since AWS uses their own format, we'll just call
- it a "local etag". TODO optimization: only calculate if remote key exists.'''
- ret = []
- for fileentry in filelist:
- # don't modify the input dict
- retentry = fileentry.copy()
- retentry['local_etag'] = calculate_multipart_etag(fileentry['fullpath'])
- ret.append(retentry)
- return ret
-
-
-def determine_mimetypes(filelist, override_map):
- ret = []
- for fileentry in filelist:
- retentry = fileentry.copy()
- localfile = fileentry['fullpath']
-
- # reminder: file extension is '.txt', not 'txt'.
- file_extension = os.path.splitext(localfile)[1]
- if override_map and override_map.get(file_extension):
- # override? use it.
- retentry['mime_type'] = override_map[file_extension]
- else:
- # else sniff it
- retentry['mime_type'], retentry['encoding'] = mimetypes.guess_type(localfile, strict=False)
-
- # might be None or '' from one of the above. Not a great type but better than nothing.
- if not retentry['mime_type']:
- retentry['mime_type'] = 'application/octet-stream'
-
- ret.append(retentry)
-
- return ret
-
-
-def head_s3(s3, bucket, s3keys):
- retkeys = []
- for entry in s3keys:
- retentry = entry.copy()
- # don't modify the input dict
- try:
- retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path'])
- except botocore.exceptions.ClientError as err:
- if (hasattr(err, 'response') and
- 'ResponseMetadata' in err.response and
- 'HTTPStatusCode' in err.response['ResponseMetadata'] and
- str(err.response['ResponseMetadata']['HTTPStatusCode']) == '404'):
- pass
- else:
- raise Exception(err)
- # error_msg = boto_exception(err)
- # return {'error': error_msg}
- retkeys.append(retentry)
- return retkeys
-
-
-def filter_list(s3, bucket, s3filelist, strategy):
- keeplist = list(s3filelist)
-
- for e in keeplist:
- e['_strategy'] = strategy
-
- # init/fetch info from S3 if we're going to use it for comparisons
- if not strategy == 'force':
- keeplist = head_s3(s3, bucket, s3filelist)
-
- # now actually run the strategies
- if strategy == 'checksum':
- for entry in keeplist:
- if entry.get('s3_head'):
- # since we have a remote s3 object, compare the values.
- if entry['s3_head']['ETag'] == entry['local_etag']:
- # files match, so remove the entry
- entry['skip_flag'] = True
- else:
- # file etags don't match, keep the entry.
- pass
- else: # we don't have an etag, so we'll keep it.
- pass
- elif strategy == 'date_size':
- for entry in keeplist:
- if entry.get('s3_head'):
- # fstat = entry['stat']
- local_modified_epoch = entry['modified_epoch']
- local_size = entry['bytes']
-
- # py2's datetime doesn't have a timestamp() field, so we have to revert to something more awkward.
- # remote_modified_epoch = entry['s3_head']['LastModified'].timestamp()
- remote_modified_datetime = entry['s3_head']['LastModified']
- delta = (remote_modified_datetime - datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc()))
- remote_modified_epoch = delta.seconds + (delta.days * 86400)
-
- remote_size = entry['s3_head']['ContentLength']
-
- entry['whytime'] = '{0} / {1}'.format(local_modified_epoch, remote_modified_epoch)
- entry['whysize'] = '{0} / {1}'.format(local_size, remote_size)
-
- if local_modified_epoch <= remote_modified_epoch and local_size == remote_size:
- entry['skip_flag'] = True
- else:
- entry['why'] = "no s3_head"
- # else: probably 'force'. Basically we don't skip with any with other strategies.
- else:
- pass
-
- # prune 'please skip' entries, if any.
- return [x for x in keeplist if not x.get('skip_flag')]
-
-
-def upload_files(s3, bucket, filelist, params):
- ret = []
- for entry in filelist:
- args = {
- 'ContentType': entry['mime_type']
- }
- if params.get('permission'):
- args['ACL'] = params['permission']
- if params.get('cache_control'):
- args['CacheControl'] = params['cache_control']
- # if this fails exception is caught in main()
- s3.upload_file(entry['fullpath'], bucket, entry['s3_path'], ExtraArgs=args, Callback=None, Config=None)
- ret.append(entry)
- return ret
-
-
-def remove_files(s3, sourcelist, params):
- bucket = params.get('bucket')
- key_prefix = params.get('key_prefix')
- paginator = s3.get_paginator('list_objects_v2')
- current_keys = set(x['Key'] for x in paginator.paginate(Bucket=bucket, Prefix=key_prefix).build_full_result().get('Contents', []))
- keep_keys = set(to_text(source_file['s3_path']) for source_file in sourcelist)
- delete_keys = list(current_keys - keep_keys)
-
- # can delete 1000 objects at a time
- groups_of_keys = [delete_keys[i:i + 1000] for i in range(0, len(delete_keys), 1000)]
- for keys in groups_of_keys:
- s3.delete_objects(Bucket=bucket, Delete={'Objects': [{'Key': key} for key in keys]})
-
- return delete_keys
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- mode=dict(choices=['push'], default='push'),
- file_change_strategy=dict(choices=['force', 'date_size', 'checksum'], default='date_size'),
- bucket=dict(required=True),
- key_prefix=dict(required=False, default=''),
- file_root=dict(required=True, type='path'),
- permission=dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read',
- 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']),
- retries=dict(required=False, removed_in_version='2.14'),
- mime_map=dict(required=False, type='dict'),
- exclude=dict(required=False, default=".*"),
- include=dict(required=False, default="*"),
- cache_control=dict(required=False, default=''),
- delete=dict(required=False, type='bool', default=False),
- # future options: encoding, metadata, storage_class, retries
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- )
-
- if not HAS_DATEUTIL:
- module.fail_json(msg='dateutil required for this module')
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- result = {}
- mode = module.params['mode']
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- if not region:
- module.fail_json(msg="Region must be specified")
- s3 = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_kwargs)
-
- if mode == 'push':
- try:
- result['filelist_initial'] = gather_files(module.params['file_root'], exclude=module.params['exclude'], include=module.params['include'])
- result['filelist_typed'] = determine_mimetypes(result['filelist_initial'], module.params.get('mime_map'))
- result['filelist_s3'] = calculate_s3_path(result['filelist_typed'], module.params['key_prefix'])
- result['filelist_local_etag'] = calculate_local_etag(result['filelist_s3'])
- result['filelist_actionable'] = filter_list(s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy'])
- result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params)
-
- if module.params['delete']:
- result['removed'] = remove_files(s3, result['filelist_local_etag'], module.params)
-
- # mark changed if we actually upload something.
- if result.get('uploads') or result.get('removed'):
- result['changed'] = True
- # result.update(filelist=actionable_filelist)
- except botocore.exceptions.ClientError as err:
- error_msg = boto_exception(err)
- module.fail_json(msg=error_msg, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/s3_website.py b/lib/ansible/modules/cloud/amazon/s3_website.py
deleted file mode 100644
index f8e76b0475..0000000000
--- a/lib/ansible/modules/cloud/amazon/s3_website.py
+++ /dev/null
@@ -1,335 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: s3_website
-short_description: Configure an s3 bucket as a website
-description:
- - Configure an s3 bucket as a website
-version_added: "2.2"
-requirements: [ boto3 ]
-author: Rob White (@wimnat)
-options:
- name:
- description:
- - "Name of the s3 bucket"
- required: true
- type: str
- error_key:
- description:
- - "The object key name to use when a 4XX class error occurs. To remove an error key, set to None."
- type: str
- redirect_all_requests:
- description:
- - "Describes the redirect behavior for every request to this s3 bucket website endpoint"
- type: str
- state:
- description:
- - "Add or remove s3 website configuration"
- choices: [ 'present', 'absent' ]
- required: true
- type: str
- suffix:
- description:
- - >
- Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to
- samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash
- character.
- default: index.html
- type: str
-
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Configure an s3 bucket to redirect all requests to example.com
-- s3_website:
- name: mybucket.com
- redirect_all_requests: example.com
- state: present
-
-# Remove website configuration from an s3 bucket
-- s3_website:
- name: mybucket.com
- state: absent
-
-# Configure an s3 bucket as a website with index and error pages
-- s3_website:
- name: mybucket.com
- suffix: home.htm
- error_key: errors/404.htm
- state: present
-
-'''
-
-RETURN = '''
-index_document:
- description: index document
- type: complex
- returned: always
- contains:
- suffix:
- description: suffix that is appended to a request that is for a directory on the website endpoint
- returned: success
- type: str
- sample: index.html
-error_document:
- description: error document
- type: complex
- returned: always
- contains:
- key:
- description: object key name to use when a 4XX class error occurs
- returned: when error_document parameter set
- type: str
- sample: error.html
-redirect_all_requests_to:
- description: where to redirect requests
- type: complex
- returned: always
- contains:
- host_name:
- description: name of the host where requests will be redirected.
- returned: when redirect all requests parameter set
- type: str
- sample: ansible.com
- protocol:
- description: protocol to use when redirecting requests.
- returned: when redirect all requests parameter set
- type: str
- sample: https
-routing_rules:
- description: routing rules
- type: list
- returned: always
- contains:
- condition:
- type: complex
- description: A container for describing a condition that must be met for the specified redirect to apply.
- contains:
- http_error_code_returned_equals:
- description: The HTTP error code when the redirect is applied.
- returned: always
- type: str
- key_prefix_equals:
- description: object key name prefix when the redirect is applied. For example, to redirect
- requests for ExamplePage.html, the key prefix will be ExamplePage.html
- returned: when routing rule present
- type: str
- sample: docs/
- redirect:
- type: complex
- description: Container for redirect information.
- returned: always
- contains:
- host_name:
- description: name of the host where requests will be redirected.
- returned: when host name set as part of redirect rule
- type: str
- sample: ansible.com
- http_redirect_code:
- description: The HTTP redirect code to use on the response.
- returned: when routing rule present
- type: str
- protocol:
- description: Protocol to use when redirecting requests.
- returned: when routing rule present
- type: str
- sample: http
- replace_key_prefix_with:
- description: object key prefix to use in the redirect request
- returned: when routing rule present
- type: str
- sample: documents/
- replace_key_with:
- description: object key prefix to use in the redirect request
- returned: when routing rule present
- type: str
- sample: documents/
-'''
-
-import time
-
-try:
- import boto3
- from botocore.exceptions import ClientError, ParamValidationError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
- get_aws_connection_info)
-
-
-def _create_redirect_dict(url):
-
- redirect_dict = {}
- url_split = url.split(':')
-
- # Did we split anything?
- if len(url_split) == 2:
- redirect_dict[u'Protocol'] = url_split[0]
- redirect_dict[u'HostName'] = url_split[1].replace('//', '')
- elif len(url_split) == 1:
- redirect_dict[u'HostName'] = url_split[0]
- else:
- raise ValueError('Redirect URL appears invalid')
-
- return redirect_dict
-
-
-def _create_website_configuration(suffix, error_key, redirect_all_requests):
-
- website_configuration = {}
-
- if error_key is not None:
- website_configuration['ErrorDocument'] = {'Key': error_key}
-
- if suffix is not None:
- website_configuration['IndexDocument'] = {'Suffix': suffix}
-
- if redirect_all_requests is not None:
- website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
-
- return website_configuration
-
-
-def enable_or_update_bucket_as_website(client_connection, resource_connection, module):
-
- bucket_name = module.params.get("name")
- redirect_all_requests = module.params.get("redirect_all_requests")
- # If redirect_all_requests is set then don't use the default suffix that has been set
- if redirect_all_requests is not None:
- suffix = None
- else:
- suffix = module.params.get("suffix")
- error_key = module.params.get("error_key")
- changed = False
-
- try:
- bucket_website = resource_connection.BucketWebsite(bucket_name)
- except ClientError as e:
- module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
-
- try:
- website_config = client_connection.get_bucket_website(Bucket=bucket_name)
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
- website_config = None
- else:
- module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
-
- if website_config is None:
- try:
- bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
- changed = True
- except (ClientError, ParamValidationError) as e:
- module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
- except ValueError as e:
- module.fail_json(msg=str(e))
- else:
- try:
- if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \
- (error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \
- (redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)):
-
- try:
- bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
- changed = True
- except (ClientError, ParamValidationError) as e:
- module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
- except KeyError as e:
- try:
- bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
- changed = True
- except (ClientError, ParamValidationError) as e:
- module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
- except ValueError as e:
- module.fail_json(msg=str(e))
-
- # Wait 5 secs before getting the website_config again to give it time to update
- time.sleep(5)
-
- website_config = client_connection.get_bucket_website(Bucket=bucket_name)
- module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config))
-
-
-def disable_bucket_as_website(client_connection, module):
-
- changed = False
- bucket_name = module.params.get("name")
-
- try:
- client_connection.get_bucket_website(Bucket=bucket_name)
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
- module.exit_json(changed=changed)
- else:
- module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
-
- try:
- client_connection.delete_bucket_website(Bucket=bucket_name)
- changed = True
- except ClientError as e:
- module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
-
- module.exit_json(changed=changed)
-
-
-def main():
-
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- name=dict(type='str', required=True),
- state=dict(type='str', required=True, choices=['present', 'absent']),
- suffix=dict(type='str', required=False, default='index.html'),
- error_key=dict(type='str', required=False),
- redirect_all_requests=dict(type='str', required=False)
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- mutually_exclusive=[
- ['redirect_all_requests', 'suffix'],
- ['redirect_all_requests', 'error_key']
- ])
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 required for this module')
-
- region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
-
- if region:
- client_connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
- resource_connection = boto3_conn(module, conn_type='resource', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
- else:
- module.fail_json(msg="region must be specified")
-
- state = module.params.get("state")
-
- if state == 'present':
- enable_or_update_bucket_as_website(client_connection, resource_connection, module)
- elif state == 'absent':
- disable_bucket_as_website(client_connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/sns.py b/lib/ansible/modules/cloud/amazon/sns.py
deleted file mode 100644
index 91ff68b37e..0000000000
--- a/lib/ansible/modules/cloud/amazon/sns.py
+++ /dev/null
@@ -1,237 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2014, Michael J. Schultz <mjschultz@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
-module: sns
-short_description: Send Amazon Simple Notification Service messages
-description:
- - Sends a notification to a topic on your Amazon SNS account.
-version_added: 1.6
-author:
- - Michael J. Schultz (@mjschultz)
- - Paul Arthur (@flowerysong)
-options:
- msg:
- description:
- - Default message for subscriptions without a more specific message.
- required: true
- aliases: [ "default" ]
- type: str
- subject:
- description:
- - Message subject
- type: str
- topic:
- description:
- - The name or ARN of the topic to publish to.
- required: true
- type: str
- email:
- description:
- - Message to send to email subscriptions.
- type: str
- email_json:
- description:
- - Message to send to email-json subscriptions.
- version_added: '2.8'
- type: str
- sqs:
- description:
- - Message to send to SQS subscriptions.
- type: str
- sms:
- description:
- - Message to send to SMS subscriptions.
- type: str
- http:
- description:
- - Message to send to HTTP subscriptions.
- type: str
- https:
- description:
- - Message to send to HTTPS subscriptions.
- type: str
- application:
- description:
- - Message to send to application subscriptions.
- version_added: '2.8'
- type: str
- lambda:
- description:
- - Message to send to Lambda subscriptions.
- version_added: '2.8'
- type: str
- message_attributes:
- description:
- - Dictionary of message attributes. These are optional structured data entries to be sent along to the endpoint.
- - This is in AWS's distinct Name/Type/Value format; see example below.
- type: dict
- message_structure:
- description:
- - The payload format to use for the message.
- - This must be 'json' to support protocol-specific messages (C(http), C(https), C(email), C(sms), C(sqs)).
- - It must be 'string' to support I(message_attributes).
- default: json
- choices: ['json', 'string']
- type: str
-extends_documentation_fragment:
- - ec2
- - aws
-requirements:
- - boto3
- - botocore
-"""
-
-EXAMPLES = """
-- name: Send default notification message via SNS
- sns:
- msg: '{{ inventory_hostname }} has completed the play.'
- subject: Deploy complete!
- topic: deploy
- delegate_to: localhost
-
-- name: Send notification messages via SNS with short message for SMS
- sns:
- msg: '{{ inventory_hostname }} has completed the play.'
- sms: deployed!
- subject: Deploy complete!
- topic: deploy
- delegate_to: localhost
-
-- name: Send message with message_attributes
- sns:
- topic: "deploy"
- msg: "message with extra details!"
- message_attributes:
- channel:
- data_type: String
- string_value: "mychannel"
- color:
- data_type: String
- string_value: "green"
- delegate_to: localhost
-"""
-
-RETURN = """
-msg:
- description: Human-readable diagnostic information
- returned: always
- type: str
- sample: OK
-message_id:
- description: The message ID of the submitted message
- returned: when success
- type: str
- sample: 2f681ef0-6d76-5c94-99b2-4ae3996ce57b
-"""
-
-import json
-import traceback
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError
-except ImportError:
- pass # Handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-
-
-def arn_topic_lookup(module, client, short_topic):
- lookup_topic = ':{0}'.format(short_topic)
-
- try:
- paginator = client.get_paginator('list_topics')
- topic_iterator = paginator.paginate()
- for response in topic_iterator:
- for topic in response['Topics']:
- if topic['TopicArn'].endswith(lookup_topic):
- return topic['TopicArn']
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to look up topic ARN')
-
- return None
-
-
-def main():
- protocols = [
- 'http',
- 'https',
- 'email',
- 'email_json',
- 'sms',
- 'sqs',
- 'application',
- 'lambda',
- ]
-
- argument_spec = dict(
- msg=dict(required=True, aliases=['default']),
- subject=dict(),
- topic=dict(required=True),
- message_attributes=dict(type='dict'),
- message_structure=dict(choices=['json', 'string'], default='json'),
- )
-
- for p in protocols:
- argument_spec[p] = dict()
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
-
- sns_kwargs = dict(
- Message=module.params['msg'],
- Subject=module.params['subject'],
- MessageStructure=module.params['message_structure'],
- )
-
- if module.params['message_attributes']:
- if module.params['message_structure'] != 'string':
- module.fail_json(msg='message_attributes is only supported when the message_structure is "string".')
- sns_kwargs['MessageAttributes'] = module.params['message_attributes']
-
- dict_msg = {
- 'default': sns_kwargs['Message']
- }
-
- for p in protocols:
- if module.params[p]:
- if sns_kwargs['MessageStructure'] != 'json':
- module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".')
- dict_msg[p.replace('_', '-')] = module.params[p]
-
- client = module.client('sns')
-
- topic = module.params['topic']
- if ':' in topic:
- # Short names can't contain ':' so we'll assume this is the full ARN
- sns_kwargs['TopicArn'] = topic
- else:
- sns_kwargs['TopicArn'] = arn_topic_lookup(module, client, topic)
-
- if not sns_kwargs['TopicArn']:
- module.fail_json(msg='Could not find topic: {0}'.format(topic))
-
- if sns_kwargs['MessageStructure'] == 'json':
- sns_kwargs['Message'] = json.dumps(dict_msg)
-
- try:
- result = client.publish(**sns_kwargs)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg='Failed to publish message')
-
- module.exit_json(msg='OK', message_id=result['MessageId'])
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/sns_topic.py b/lib/ansible/modules/cloud/amazon/sns_topic.py
deleted file mode 100644
index a247dfdd3f..0000000000
--- a/lib/ansible/modules/cloud/amazon/sns_topic.py
+++ /dev/null
@@ -1,529 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
-module: sns_topic
-short_description: Manages AWS SNS topics and subscriptions
-description:
- - The M(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
- - As of 2.6, this module can be use to subscribe and unsubscribe to topics outside of your AWS account.
-version_added: 2.0
-author:
- - "Joel Thompson (@joelthompson)"
- - "Fernando Jose Pando (@nand0p)"
- - "Will Thames (@willthames)"
-options:
- name:
- description:
- - The name or ARN of the SNS topic to manage.
- required: true
- type: str
- state:
- description:
- - Whether to create or destroy an SNS topic.
- default: present
- choices: ["absent", "present"]
- type: str
- display_name:
- description:
- - Display name of the topic.
- type: str
- policy:
- description:
- - Policy to apply to the SNS topic.
- type: dict
- delivery_policy:
- description:
- - Delivery policy to apply to the SNS topic.
- type: dict
- subscriptions:
- description:
- - List of subscriptions to apply to the topic. Note that AWS requires
- subscriptions to be confirmed, so you will need to confirm any new
- subscriptions.
- suboptions:
- endpoint:
- description: Endpoint of subscription.
- required: true
- protocol:
- description: Protocol of subscription.
- required: true
- type: list
- elements: dict
- default: []
- purge_subscriptions:
- description:
- - "Whether to purge any subscriptions not listed here. NOTE: AWS does not
- allow you to purge any PendingConfirmation subscriptions, so if any
- exist and would be purged, they are silently skipped. This means that
- somebody could come back later and confirm the subscription. Sorry.
- Blame Amazon."
- default: true
- type: bool
-extends_documentation_fragment:
- - aws
- - ec2
-requirements: [ "boto" ]
-"""
-
-EXAMPLES = """
-
-- name: Create alarm SNS topic
- sns_topic:
- name: "alarms"
- state: present
- display_name: "alarm SNS topic"
- delivery_policy:
- http:
- defaultHealthyRetryPolicy:
- minDelayTarget: 2
- maxDelayTarget: 4
- numRetries: 3
- numMaxDelayRetries: 5
- backoffFunction: "<linear|arithmetic|geometric|exponential>"
- disableSubscriptionOverrides: True
- defaultThrottlePolicy:
- maxReceivesPerSecond: 10
- subscriptions:
- - endpoint: "my_email_address@example.com"
- protocol: "email"
- - endpoint: "my_mobile_number"
- protocol: "sms"
-
-"""
-
-RETURN = '''
-sns_arn:
- description: The ARN of the topic you are modifying
- type: str
- returned: always
- sample: "arn:aws:sns:us-east-2:111111111111:my_topic_name"
-sns_topic:
- description: Dict of sns topic details
- type: complex
- returned: always
- contains:
- attributes_set:
- description: list of attributes set during this run
- returned: always
- type: list
- sample: []
- check_mode:
- description: whether check mode was on
- returned: always
- type: bool
- sample: false
- delivery_policy:
- description: Delivery policy for the SNS topic
- returned: when topic is owned by this AWS account
- type: str
- sample: >
- {"http":{"defaultHealthyRetryPolicy":{"minDelayTarget":20,"maxDelayTarget":20,"numRetries":3,"numMaxDelayRetries":0,
- "numNoDelayRetries":0,"numMinDelayRetries":0,"backoffFunction":"linear"},"disableSubscriptionOverrides":false}}
- display_name:
- description: Display name for SNS topic
- returned: when topic is owned by this AWS account
- type: str
- sample: My topic name
- name:
- description: Topic name
- returned: always
- type: str
- sample: ansible-test-dummy-topic
- owner:
- description: AWS account that owns the topic
- returned: when topic is owned by this AWS account
- type: str
- sample: '111111111111'
- policy:
- description: Policy for the SNS topic
- returned: when topic is owned by this AWS account
- type: str
- sample: >
- {"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::111111111111:root"},
- "Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]}
- state:
- description: whether the topic is present or absent
- returned: always
- type: str
- sample: present
- subscriptions:
- description: List of subscribers to the topic in this AWS account
- returned: always
- type: list
- sample: []
- subscriptions_added:
- description: List of subscribers added in this run
- returned: always
- type: list
- sample: []
- subscriptions_confirmed:
- description: Count of confirmed subscriptions
- returned: when topic is owned by this AWS account
- type: str
- sample: '0'
- subscriptions_deleted:
- description: Count of deleted subscriptions
- returned: when topic is owned by this AWS account
- type: str
- sample: '0'
- subscriptions_existing:
- description: List of existing subscriptions
- returned: always
- type: list
- sample: []
- subscriptions_new:
- description: List of new subscriptions
- returned: always
- type: list
- sample: []
- subscriptions_pending:
- description: Count of pending subscriptions
- returned: when topic is owned by this AWS account
- type: str
- sample: '0'
- subscriptions_purge:
- description: Whether or not purge_subscriptions was set
- returned: always
- type: bool
- sample: true
- topic_arn:
- description: ARN of the SNS topic (equivalent to sns_arn)
- returned: when topic is owned by this AWS account
- type: str
- sample: arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic
- topic_created:
- description: Whether the topic was created
- returned: always
- type: bool
- sample: false
- topic_deleted:
- description: Whether the topic was deleted
- returned: always
- type: bool
- sample: false
-'''
-
-import json
-import re
-import copy
-
-try:
- import botocore
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import compare_policies, AWSRetry, camel_dict_to_snake_dict
-
-
-class SnsTopicManager(object):
- """ Handles SNS Topic creation and destruction """
-
- def __init__(self,
- module,
- name,
- state,
- display_name,
- policy,
- delivery_policy,
- subscriptions,
- purge_subscriptions,
- check_mode):
-
- self.connection = module.client('sns')
- self.module = module
- self.name = name
- self.state = state
- self.display_name = display_name
- self.policy = policy
- self.delivery_policy = delivery_policy
- self.subscriptions = subscriptions
- self.subscriptions_existing = []
- self.subscriptions_deleted = []
- self.subscriptions_added = []
- self.purge_subscriptions = purge_subscriptions
- self.check_mode = check_mode
- self.topic_created = False
- self.topic_deleted = False
- self.topic_arn = None
- self.attributes_set = []
-
- @AWSRetry.jittered_backoff()
- def _list_topics_with_backoff(self):
- paginator = self.connection.get_paginator('list_topics')
- return paginator.paginate().build_full_result()['Topics']
-
- @AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound'])
- def _list_topic_subscriptions_with_backoff(self):
- paginator = self.connection.get_paginator('list_subscriptions_by_topic')
- return paginator.paginate(TopicArn=self.topic_arn).build_full_result()['Subscriptions']
-
- @AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound'])
- def _list_subscriptions_with_backoff(self):
- paginator = self.connection.get_paginator('list_subscriptions')
- return paginator.paginate().build_full_result()['Subscriptions']
-
- def _list_topics(self):
- try:
- topics = self._list_topics_with_backoff()
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't get topic list")
- return [t['TopicArn'] for t in topics]
-
- def _topic_arn_lookup(self):
- # topic names cannot have colons, so this captures the full topic name
- all_topics = self._list_topics()
- lookup_topic = ':%s' % self.name
- for topic in all_topics:
- if topic.endswith(lookup_topic):
- return topic
-
- def _create_topic(self):
- if not self.check_mode:
- try:
- response = self.connection.create_topic(Name=self.name)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name)
- self.topic_arn = response['TopicArn']
- return True
-
- def _compare_delivery_policies(self, policy_a, policy_b):
- _policy_a = copy.deepcopy(policy_a)
- _policy_b = copy.deepcopy(policy_b)
- # AWS automatically injects disableSubscriptionOverrides if you set an
- # http policy
- if 'http' in policy_a:
- if 'disableSubscriptionOverrides' not in policy_a['http']:
- _policy_a['http']['disableSubscriptionOverrides'] = False
- if 'http' in policy_b:
- if 'disableSubscriptionOverrides' not in policy_b['http']:
- _policy_b['http']['disableSubscriptionOverrides'] = False
- comparison = (_policy_a != _policy_b)
- return comparison
-
- def _set_topic_attrs(self):
- changed = False
- try:
- topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn)
-
- if self.display_name and self.display_name != topic_attributes['DisplayName']:
- changed = True
- self.attributes_set.append('display_name')
- if not self.check_mode:
- try:
- self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DisplayName',
- AttributeValue=self.display_name)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't set display name")
-
- if self.policy and compare_policies(self.policy, json.loads(topic_attributes['Policy'])):
- changed = True
- self.attributes_set.append('policy')
- if not self.check_mode:
- try:
- self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='Policy',
- AttributeValue=json.dumps(self.policy))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't set topic policy")
-
- if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or
- self._compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))):
- changed = True
- self.attributes_set.append('delivery_policy')
- if not self.check_mode:
- try:
- self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DeliveryPolicy',
- AttributeValue=json.dumps(self.delivery_policy))
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy")
- return changed
-
- def _canonicalize_endpoint(self, protocol, endpoint):
- if protocol == 'sms':
- return re.sub('[^0-9]*', '', endpoint)
- return endpoint
-
- def _set_topic_subs(self):
- changed = False
- subscriptions_existing_list = set()
- desired_subscriptions = [(sub['protocol'],
- self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
- self.subscriptions]
-
- for sub in self._list_topic_subscriptions():
- sub_key = (sub['Protocol'], sub['Endpoint'])
- subscriptions_existing_list.add(sub_key)
- if (self.purge_subscriptions and sub_key not in desired_subscriptions and
- sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')):
- changed = True
- self.subscriptions_deleted.append(sub_key)
- if not self.check_mode:
- try:
- self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
-
- for protocol, endpoint in set(desired_subscriptions).difference(subscriptions_existing_list):
- changed = True
- self.subscriptions_added.append((protocol, endpoint))
- if not self.check_mode:
- try:
- self.connection.subscribe(TopicArn=self.topic_arn, Protocol=protocol, Endpoint=endpoint)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn)
- return changed
-
- def _list_topic_subscriptions(self):
- try:
- return self._list_topic_subscriptions_with_backoff()
- except is_boto3_error_code('AuthorizationError'):
- try:
- # potentially AuthorizationError when listing subscriptions for third party topic
- return [sub for sub in self._list_subscriptions_with_backoff()
- if sub['TopicArn'] == self.topic_arn]
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
- self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn)
-
- def _delete_subscriptions(self):
- # NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
- # https://forums.aws.amazon.com/thread.jspa?threadID=85993
- subscriptions = self._list_topic_subscriptions()
- if not subscriptions:
- return False
- for sub in subscriptions:
- if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'):
- self.subscriptions_deleted.append(sub['SubscriptionArn'])
- if not self.check_mode:
- try:
- self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
- return True
-
- def _delete_topic(self):
- self.topic_deleted = True
- if not self.check_mode:
- try:
- self.connection.delete_topic(TopicArn=self.topic_arn)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- self.module.fail_json_aws(e, msg="Couldn't delete topic %s" % self.topic_arn)
- return True
-
- def _name_is_arn(self):
- return self.name.startswith('arn:')
-
- def ensure_ok(self):
- changed = False
- if self._name_is_arn():
- self.topic_arn = self.name
- else:
- self.topic_arn = self._topic_arn_lookup()
- if not self.topic_arn:
- changed = self._create_topic()
- if self.topic_arn in self._list_topics():
- changed |= self._set_topic_attrs()
- elif self.display_name or self.policy or self.delivery_policy:
- self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account")
- changed |= self._set_topic_subs()
- return changed
-
- def ensure_gone(self):
- changed = False
- if self._name_is_arn():
- self.topic_arn = self.name
- else:
- self.topic_arn = self._topic_arn_lookup()
- if self.topic_arn:
- if self.topic_arn not in self._list_topics():
- self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe")
- changed = self._delete_subscriptions()
- changed |= self._delete_topic()
- return changed
-
- def get_info(self):
- info = {
- 'name': self.name,
- 'state': self.state,
- 'subscriptions_new': self.subscriptions,
- 'subscriptions_existing': self.subscriptions_existing,
- 'subscriptions_deleted': self.subscriptions_deleted,
- 'subscriptions_added': self.subscriptions_added,
- 'subscriptions_purge': self.purge_subscriptions,
- 'check_mode': self.check_mode,
- 'topic_created': self.topic_created,
- 'topic_deleted': self.topic_deleted,
- 'attributes_set': self.attributes_set,
- }
- if self.state != 'absent':
- if self.topic_arn in self._list_topics():
- info.update(camel_dict_to_snake_dict(self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes']))
- info['delivery_policy'] = info.pop('effective_delivery_policy')
- info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in self._list_topic_subscriptions()]
-
- return info
-
-
-def main():
- argument_spec = dict(
- name=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- display_name=dict(),
- policy=dict(type='dict'),
- delivery_policy=dict(type='dict'),
- subscriptions=dict(default=[], type='list'),
- purge_subscriptions=dict(type='bool', default=True),
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec,
- supports_check_mode=True)
-
- name = module.params.get('name')
- state = module.params.get('state')
- display_name = module.params.get('display_name')
- policy = module.params.get('policy')
- delivery_policy = module.params.get('delivery_policy')
- subscriptions = module.params.get('subscriptions')
- purge_subscriptions = module.params.get('purge_subscriptions')
- check_mode = module.check_mode
-
- sns_topic = SnsTopicManager(module,
- name,
- state,
- display_name,
- policy,
- delivery_policy,
- subscriptions,
- purge_subscriptions,
- check_mode)
-
- if state == 'present':
- changed = sns_topic.ensure_ok()
-
- elif state == 'absent':
- changed = sns_topic.ensure_gone()
-
- sns_facts = dict(changed=changed,
- sns_arn=sns_topic.topic_arn,
- sns_topic=sns_topic.get_info())
-
- module.exit_json(**sns_facts)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/sqs_queue.py b/lib/ansible/modules/cloud/amazon/sqs_queue.py
deleted file mode 100644
index 6e8b680190..0000000000
--- a/lib/ansible/modules/cloud/amazon/sqs_queue.py
+++ /dev/null
@@ -1,481 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: sqs_queue
-short_description: Creates or deletes AWS SQS queues.
-description:
- - Create or delete AWS SQS queues.
- - Update attributes on existing queues.
-version_added: "2.0"
-author:
- - Alan Loi (@loia)
- - Fernando Jose Pando (@nand0p)
- - Nadir Lloret (@nadirollo)
- - Dennis Podkovyrin (@sbj-ss)
-requirements:
- - boto3
-options:
- state:
- description:
- - Create or delete the queue.
- choices: ['present', 'absent']
- default: 'present'
- type: str
- name:
- description:
- - Name of the queue.
- required: true
- type: str
- queue_type:
- description:
- - Standard or FIFO queue.
- - I(queue_type) can only be set at queue creation and will otherwise be
- ignored.
- choices: ['standard', 'fifo']
- default: 'standard'
- version_added: "2.10"
- type: str
- visibility_timeout:
- description:
- - The default visibility timeout in seconds.
- aliases: [default_visibility_timeout]
- type: int
- message_retention_period:
- description:
- - The message retention period in seconds.
- type: int
- maximum_message_size:
- description:
- - The maximum message size in bytes.
- type: int
- delay_seconds:
- description:
- - The delivery delay in seconds.
- aliases: [delivery_delay]
- type: int
- receive_message_wait_time_seconds:
- description:
- - The receive message wait time in seconds.
- aliases: [receive_message_wait_time]
- type: int
- policy:
- description:
- - The JSON dict policy to attach to queue.
- version_added: "2.1"
- type: dict
- redrive_policy:
- description:
- - JSON dict with the redrive_policy (see example).
- version_added: "2.2"
- type: dict
- kms_master_key_id:
- description:
- - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK.
- version_added: "2.10"
- type: str
- kms_data_key_reuse_period_seconds:
- description:
- - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again.
- aliases: [kms_data_key_reuse_period]
- version_added: "2.10"
- type: int
- content_based_deduplication:
- type: bool
- description: Enables content-based deduplication. Used for FIFOs only.
- version_added: "2.10"
- default: false
- tags:
- description:
- - Tag dict to apply to the queue (requires botocore 1.5.40 or above).
- - To remove all tags set I(tags={}) and I(purge_tags=true).
- version_added: "2.10"
- type: dict
- purge_tags:
- description:
- - Remove tags not listed in I(tags).
- type: bool
- default: false
- version_added: "2.10"
-extends_documentation_fragment:
- - aws
- - ec2
-"""
-
-RETURN = '''
-content_based_deduplication:
- description: Enables content-based deduplication. Used for FIFOs only.
- type: bool
- returned: always
- sample: True
-visibility_timeout:
- description: The default visibility timeout in seconds.
- type: int
- returned: always
- sample: 30
-delay_seconds:
- description: The delivery delay in seconds.
- type: int
- returned: always
- sample: 0
-kms_master_key_id:
- description: The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK.
- type: str
- returned: always
- sample: alias/MyAlias
-kms_data_key_reuse_period_seconds:
- description: The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again.
- type: int
- returned: always
- sample: 300
-maximum_message_size:
- description: The maximum message size in bytes.
- type: int
- returned: always
- sample: 262144
-message_retention_period:
- description: The message retention period in seconds.
- type: int
- returned: always
- sample: 345600
-name:
- description: Name of the SQS Queue
- type: str
- returned: always
- sample: "queuename-987d2de0"
-queue_arn:
- description: The queue's Amazon resource name (ARN).
- type: str
- returned: on success
- sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0'
-queue_url:
- description: URL to access the queue
- type: str
- returned: on success
- sample: 'https://queue.amazonaws.com/123456789012/MyQueue'
-receive_message_wait_time_seconds:
- description: The receive message wait time in seconds.
- type: int
- returned: always
- sample: 0
-region:
- description: Region that the queue was created within
- type: str
- returned: always
- sample: 'us-east-1'
-tags:
- description: List of queue tags
- type: dict
- returned: always
- sample: '{"Env": "prod"}'
-'''
-
-EXAMPLES = '''
-# Create SQS queue with redrive policy
-- sqs_queue:
- name: my-queue
- region: ap-southeast-2
- default_visibility_timeout: 120
- message_retention_period: 86400
- maximum_message_size: 1024
- delivery_delay: 30
- receive_message_wait_time: 20
- policy: "{{ json_dict }}"
- redrive_policy:
- maxReceiveCount: 5
- deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue
-
-# Drop redrive policy
-- sqs_queue:
- name: my-queue
- region: ap-southeast-2
- redrive_policy: {}
-
-# Create FIFO queue
-- sqs_queue:
- name: fifo-queue
- region: ap-southeast-2
- queue_type: fifo
- content_based_deduplication: yes
-
-# Tag queue
-- sqs_queue:
- name: fifo-queue
- region: ap-southeast-2
- tags:
- example: SomeValue
-
-# Configure Encryption, automatically uses a new data key every hour
-- sqs_queue:
- name: fifo-queue
- region: ap-southeast-2
- kms_master_key_id: alias/MyQueueKey
- kms_data_key_reuse_period_seconds: 3600
-
-# Delete SQS queue
-- sqs_queue:
- name: my-queue
- region: ap-southeast-2
- state: absent
-'''
-
-import json
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags, snake_dict_to_camel_dict, compare_policies
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError, ParamValidationError
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-
-def get_queue_name(module, is_fifo=False):
- name = module.params.get('name')
- if not is_fifo or name.endswith('.fifo'):
- return name
- return name + '.fifo'
-
-
-# NonExistentQueue is explicitly expected when a queue doesn't exist
-@AWSRetry.jittered_backoff()
-def get_queue_url(client, name):
- try:
- return client.get_queue_url(QueueName=name)['QueueUrl']
- except ClientError as e:
- if e.response['Error']['Code'] == 'AWS.SimpleQueueService.NonExistentQueue':
- return None
- raise
-
-
-def describe_queue(client, queue_url):
- """
- Description a queue in snake format
- """
- attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes']
- description = dict(attributes)
- description.pop('Policy', None)
- description.pop('RedrivePolicy', None)
- description = camel_dict_to_snake_dict(description)
- description['policy'] = attributes.get('Policy', None)
- description['redrive_policy'] = attributes.get('RedrivePolicy', None)
-
- # Boto3 returns everything as a string, convert them back to integers/dicts if
- # that's what we expected.
- for key, value in description.items():
- if value is None:
- continue
-
- if key in ['policy', 'redrive_policy']:
- policy = json.loads(value)
- description[key] = policy
- continue
-
- if key == 'content_based_deduplication':
- try:
- description[key] = bool(value)
- except (TypeError, ValueError):
- pass
-
- try:
- if value == str(int(value)):
- description[key] = int(value)
- except (TypeError, ValueError):
- pass
-
- return description
-
-
-def create_or_update_sqs_queue(client, module):
- is_fifo = (module.params.get('queue_type') == 'fifo')
- queue_name = get_queue_name(module, is_fifo)
- result = dict(
- name=queue_name,
- region=module.params.get('region'),
- changed=False,
- )
-
- queue_url = get_queue_url(client, queue_name)
- result['queue_url'] = queue_url
-
- if not queue_url:
- create_attributes = {'FifoQueue': 'true'} if is_fifo else {}
- result['changed'] = True
- if module.check_mode:
- return result
- queue_url = client.create_queue(QueueName=queue_name, Attributes=create_attributes, aws_retry=True)['QueueUrl']
-
- changed, arn = update_sqs_queue(module, client, queue_url)
- result['changed'] |= changed
- result['queue_arn'] = arn
-
- changed, tags = update_tags(client, queue_url, module)
- result['changed'] |= changed
- result['tags'] = tags
-
- result.update(describe_queue(client, queue_url))
-
- COMPATABILITY_KEYS = dict(
- delay_seconds='delivery_delay',
- receive_message_wait_time_seconds='receive_message_wait_time',
- visibility_timeout='default_visibility_timeout',
- kms_data_key_reuse_period_seconds='kms_data_key_reuse_period',
- )
- for key in list(result.keys()):
-
- # The return values changed between boto and boto3, add the old keys too
- # for backwards compatibility
- return_name = COMPATABILITY_KEYS.get(key)
- if return_name:
- result[return_name] = result.get(key)
-
- return result
-
-
-def update_sqs_queue(module, client, queue_url):
- check_mode = module.check_mode
- changed = False
- existing_attributes = client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes']
- new_attributes = snake_dict_to_camel_dict(module.params, capitalize_first=True)
- attributes_to_set = dict()
-
- # Boto3 SQS deals with policies as strings, we want to deal with them as
- # dicts
- if module.params.get('policy') is not None:
- policy = module.params.get('policy')
- current_value = existing_attributes.get('Policy', '{}')
- current_policy = json.loads(current_value)
- if compare_policies(current_policy, policy):
- attributes_to_set['Policy'] = json.dumps(policy)
- changed = True
- if module.params.get('redrive_policy') is not None:
- policy = module.params.get('redrive_policy')
- current_value = existing_attributes.get('RedrivePolicy', '{}')
- current_policy = json.loads(current_value)
- if compare_policies(current_policy, policy):
- attributes_to_set['RedrivePolicy'] = json.dumps(policy)
- changed = True
-
- for attribute, value in existing_attributes.items():
- # We handle these as a special case because they're IAM policies
- if attribute in ['Policy', 'RedrivePolicy']:
- continue
-
- if attribute not in new_attributes.keys():
- continue
-
- if new_attributes.get(attribute) is None:
- continue
-
- new_value = new_attributes[attribute]
-
- if isinstance(new_value, bool):
- new_value = str(new_value).lower()
- existing_value = str(existing_value).lower()
-
- if new_value == value:
- continue
-
- # Boto3 expects strings
- attributes_to_set[attribute] = str(new_value)
- changed = True
-
- if changed and not check_mode:
- client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True)
-
- return changed, existing_attributes.get('queue_arn'),
-
-
-def delete_sqs_queue(client, module):
- is_fifo = (module.params.get('queue_type') == 'fifo')
- queue_name = get_queue_name(module, is_fifo)
- result = dict(
- name=queue_name,
- region=module.params.get('region'),
- changed=False
- )
-
- queue_url = get_queue_url(client, queue_name)
- if not queue_url:
- return result
-
- result['changed'] = bool(queue_url)
- if not module.check_mode:
- AWSRetry.jittered_backoff()(client.delete_queue)(QueueUrl=queue_url)
-
- return result
-
-
-def update_tags(client, queue_url, module):
- new_tags = module.params.get('tags')
- purge_tags = module.params.get('purge_tags')
- if new_tags is None:
- return False, {}
-
- try:
- existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True)['Tags']
- except (ClientError, KeyError) as e:
- existing_tags = {}
-
- tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags)
-
- if not module.check_mode:
- if tags_to_remove:
- client.untag_queue(QueueUrl=queue_url, TagKeys=tags_to_remove, aws_retry=True)
- if tags_to_add:
- client.tag_queue(QueueUrl=queue_url, Tags=tags_to_add)
- existing_tags = client.list_queue_tags(QueueUrl=queue_url, aws_retry=True).get('Tags', {})
- else:
- existing_tags = new_tags
-
- changed = bool(tags_to_remove) or bool(tags_to_add)
- return changed, existing_tags
-
-
-def main():
-
- argument_spec = dict(
- state=dict(type='str', default='present', choices=['present', 'absent']),
- name=dict(type='str', required=True),
- queue_type=dict(type='str', default='standard', choices=['standard', 'fifo']),
- delay_seconds=dict(type='int', aliases=['delivery_delay']),
- maximum_message_size=dict(type='int'),
- message_retention_period=dict(type='int'),
- policy=dict(type='dict'),
- receive_message_wait_time_seconds=dict(type='int', aliases=['receive_message_wait_time']),
- redrive_policy=dict(type='dict'),
- visibility_timeout=dict(type='int', aliases=['default_visibility_timeout']),
- kms_master_key_id=dict(type='str'),
- kms_data_key_reuse_period_seconds=dict(type='int', aliases=['kms_data_key_reuse_period']),
- content_based_deduplication=dict(type='bool'),
- tags=dict(type='dict'),
- purge_tags=dict(type='bool', default=False),
- )
- module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
-
- state = module.params.get('state')
- retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=['AWS.SimpleQueueService.NonExistentQueue'])
- try:
- client = module.client('sqs', retry_decorator=retry_decorator)
- if state == 'present':
- result = create_or_update_sqs_queue(client, module)
- elif state == 'absent':
- result = delete_sqs_queue(client, module)
- except (BotoCoreError, ClientError, ParamValidationError) as e:
- module.fail_json_aws(e, msg='Failed to control sqs queue')
- else:
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/sts_assume_role.py b/lib/ansible/modules/cloud/amazon/sts_assume_role.py
deleted file mode 100644
index cd82a549cb..0000000000
--- a/lib/ansible/modules/cloud/amazon/sts_assume_role.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: sts_assume_role
-short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
-description:
- - Assume a role using AWS Security Token Service and obtain temporary credentials.
-version_added: "2.0"
-author:
- - Boris Ekelchik (@bekelchik)
- - Marek Piatek (@piontas)
-options:
- role_arn:
- description:
- - The Amazon Resource Name (ARN) of the role that the caller is
- assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs).
- required: true
- type: str
- role_session_name:
- description:
- - Name of the role's session - will be used by CloudTrail.
- required: true
- type: str
- policy:
- description:
- - Supplemental policy to use in addition to assumed role's policies.
- type: str
- duration_seconds:
- description:
- - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours).
- - The max depends on the IAM role's sessions duration setting.
- - By default, the value is set to 3600 seconds.
- type: int
- external_id:
- description:
- - A unique identifier that is used by third parties to assume a role in their customers' accounts.
- type: str
- mfa_serial_number:
- description:
- - The identification number of the MFA device that is associated with the user who is making the AssumeRole call.
- type: str
- mfa_token:
- description:
- - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
- type: str
-notes:
- - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token.
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - boto3
- - botocore
- - python >= 2.6
-'''
-
-RETURN = '''
-sts_creds:
- description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token
- returned: always
- type: dict
- sample:
- access_key: XXXXXXXXXXXXXXXXXXXX
- expiration: 2017-11-11T11:11:11+00:00
- secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-sts_user:
- description: The Amazon Resource Name (ARN) and the assumed role ID
- returned: always
- type: dict
- sample:
- assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob
- arn: ARO123EXAMPLE123:Bob
-changed:
- description: True if obtaining the credentials succeeds
- type: bool
- returned: always
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
-- sts_assume_role:
- role_arn: "arn:aws:iam::123456789012:role/someRole"
- role_session_name: "someRoleSession"
- register: assumed_role
-
-# Use the assumed role above to tag an instance in account 123456789012
-- ec2_tag:
- aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
- aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
- security_token: "{{ assumed_role.sts_creds.session_token }}"
- resource: i-xyzxyz01
- state: present
- tags:
- MyNewTag: value
-
-'''
-
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-
-try:
- from botocore.exceptions import ClientError, ParamValidationError
-except ImportError:
- pass # caught by AnsibleAWSModule
-
-
-def _parse_response(response):
- credentials = response.get('Credentials', {})
- user = response.get('AssumedRoleUser', {})
-
- sts_cred = {
- 'access_key': credentials.get('AccessKeyId'),
- 'secret_key': credentials.get('SecretAccessKey'),
- 'session_token': credentials.get('SessionToken'),
- 'expiration': credentials.get('Expiration')
-
- }
- sts_user = camel_dict_to_snake_dict(user)
- return sts_cred, sts_user
-
-
-def assume_role_policy(connection, module):
- params = {
- 'RoleArn': module.params.get('role_arn'),
- 'RoleSessionName': module.params.get('role_session_name'),
- 'Policy': module.params.get('policy'),
- 'DurationSeconds': module.params.get('duration_seconds'),
- 'ExternalId': module.params.get('external_id'),
- 'SerialNumber': module.params.get('mfa_serial_number'),
- 'TokenCode': module.params.get('mfa_token')
- }
- changed = False
-
- kwargs = dict((k, v) for k, v in params.items() if v is not None)
-
- try:
- response = connection.assume_role(**kwargs)
- changed = True
- except (ClientError, ParamValidationError) as e:
- module.fail_json_aws(e)
-
- sts_cred, sts_user = _parse_response(response)
- module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user)
-
-
-def main():
- argument_spec = dict(
- role_arn=dict(required=True),
- role_session_name=dict(required=True),
- duration_seconds=dict(required=False, default=None, type='int'),
- external_id=dict(required=False, default=None),
- policy=dict(required=False, default=None),
- mfa_serial_number=dict(required=False, default=None),
- mfa_token=dict(required=False, default=None)
- )
-
- module = AnsibleAWSModule(argument_spec=argument_spec)
-
- connection = module.client('sts')
-
- assume_role_policy(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/amazon/sts_session_token.py b/lib/ansible/modules/cloud/amazon/sts_session_token.py
deleted file mode 100644
index 4606448dbf..0000000000
--- a/lib/ansible/modules/cloud/amazon/sts_session_token.py
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: sts_session_token
-short_description: Obtain a session token from the AWS Security Token Service
-description:
- - Obtain a session token from the AWS Security Token Service.
-version_added: "2.2"
-author: Victor Costan (@pwnall)
-options:
- duration_seconds:
- description:
- - The duration, in seconds, of the session token.
- See U(https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters)
- for acceptable and default values.
- type: int
- mfa_serial_number:
- description:
- - The identification number of the MFA device that is associated with the user who is making the GetSessionToken call.
- type: str
- mfa_token:
- description:
- - The value provided by the MFA device, if the trust policy of the user requires MFA.
- type: str
-notes:
- - In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token).
-extends_documentation_fragment:
- - aws
- - ec2
-requirements:
- - boto3
- - botocore
- - python >= 2.6
-'''
-
-RETURN = """
-sts_creds:
- description: The Credentials object returned by the AWS Security Token Service
- returned: always
- type: list
- sample:
- access_key: ASXXXXXXXXXXXXXXXXXX
- expiration: "2016-04-08T11:59:47+00:00"
- secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-changed:
- description: True if obtaining the credentials succeeds
- type: bool
- returned: always
-"""
-
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Get a session token (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html)
-sts_session_token:
- duration_seconds: 3600
-register: session_credentials
-
-# Use the session token obtained above to tag an instance in account 123456789012
-ec2_tag:
- aws_access_key: "{{ session_credentials.sts_creds.access_key }}"
- aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}"
- security_token: "{{ session_credentials.sts_creds.session_token }}"
- resource: i-xyzxyz01
- state: present
- tags:
- MyNewTag: value
-
-'''
-
-try:
- import boto3
- from botocore.exceptions import ClientError
- HAS_BOTO3 = True
-except ImportError:
- HAS_BOTO3 = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
-
-
-def normalize_credentials(credentials):
- access_key = credentials.get('AccessKeyId', None)
- secret_key = credentials.get('SecretAccessKey', None)
- session_token = credentials.get('SessionToken', None)
- expiration = credentials.get('Expiration', None)
- return {
- 'access_key': access_key,
- 'secret_key': secret_key,
- 'session_token': session_token,
- 'expiration': expiration
- }
-
-
-def get_session_token(connection, module):
- duration_seconds = module.params.get('duration_seconds')
- mfa_serial_number = module.params.get('mfa_serial_number')
- mfa_token = module.params.get('mfa_token')
- changed = False
-
- args = {}
- if duration_seconds is not None:
- args['DurationSeconds'] = duration_seconds
- if mfa_serial_number is not None:
- args['SerialNumber'] = mfa_serial_number
- if mfa_token is not None:
- args['TokenCode'] = mfa_token
-
- try:
- response = connection.get_session_token(**args)
- changed = True
- except ClientError as e:
- module.fail_json(msg=e)
-
- credentials = normalize_credentials(response.get('Credentials', {}))
- module.exit_json(changed=changed, sts_creds=credentials)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- duration_seconds=dict(required=False, default=None, type='int'),
- mfa_serial_number=dict(required=False, default=None),
- mfa_token=dict(required=False, default=None)
- )
- )
-
- module = AnsibleModule(argument_spec=argument_spec)
-
- if not HAS_BOTO3:
- module.fail_json(msg='boto3 and botocore are required.')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- if region:
- connection = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- else:
- module.fail_json(msg="region must be specified")
-
- get_session_token(connection, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/plugins/connection/aws_ssm.py b/lib/ansible/plugins/connection/aws_ssm.py
deleted file mode 100644
index 359db76f3d..0000000000
--- a/lib/ansible/plugins/connection/aws_ssm.py
+++ /dev/null
@@ -1,557 +0,0 @@
-# Based on the ssh connection plugin by Michael DeHaan
-#
-# Copyright: (c) 2018, Pat Sharkey <psharkey@cleo.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
-author:
-- Pat Sharkey (@psharkey) <psharkey@cleo.com>
-- HanumanthaRao MVL (@hanumantharaomvl) <hanumanth@flux7.com>
-- Gaurav Ashtikar (@gau1991 )<gaurav.ashtikar@flux7.com>
-connection: aws_ssm
-short_description: execute via AWS Systems Manager
-description:
-- This connection plugin allows ansible to execute tasks on an EC2 instance via the aws ssm CLI.
-version_added: "2.10"
-requirements:
-- The remote EC2 instance must be running the AWS Systems Manager Agent (SSM Agent).
-- The control machine must have the aws session manager plugin installed.
-- The remote EC2 linux instance must have the curl installed.
-options:
- instance_id:
- description: The EC2 instance ID.
- vars:
- - name: ansible_aws_ssm_instance_id
- region:
- description: The region the EC2 instance is located.
- vars:
- - name: ansible_aws_ssm_region
- default: 'us-east-1'
- bucket_name:
- description: The name of the S3 bucket used for file transfers.
- vars:
- - name: ansible_aws_ssm_bucket_name
- plugin:
- description: This defines the location of the session-manager-plugin binary.
- vars:
- - name: ansible_aws_ssm_plugin
- default: '/usr/local/bin/session-manager-plugin'
- retries:
- description: Number of attempts to connect.
- default: 3
- type: integer
- vars:
- - name: ansible_aws_ssm_retries
- timeout:
- description: Connection timeout seconds.
- default: 60
- type: integer
- vars:
- - name: ansible_aws_ssm_timeout
-"""
-
-EXAMPLES = r'''
-
-# Stop Spooler Process on Windows Instances
-- name: Stop Spooler Service on Windows Instances
- vars:
- ansible_connection: aws_ssm
- ansible_shell_type: powershell
- ansible_aws_ssm_bucket_name: nameofthebucket
- ansible_aws_ssm_region: us-east-1
- tasks:
- - name: Stop spooler service
- win_service:
- name: spooler
- state: stopped
-
-# Install a Nginx Package on Linux Instance
-- name: Install a Nginx Package
- vars:
- ansible_connection: aws_ssm
- ansible_aws_ssm_bucket_name: nameofthebucket
- ansible_aws_ssm_region: us-west-2
- tasks:
- - name: Install a Nginx Package
- yum:
- name: nginx
- state: present
-
-# Create a directory in Windows Instances
-- name: Create a directory in Windows Instance
- vars:
- ansible_connection: aws_ssm
- ansible_shell_type: powershell
- ansible_aws_ssm_bucket_name: nameofthebucket
- ansible_aws_ssm_region: us-east-1
- tasks:
- - name: Create a Directory
- win_file:
- path: C:\Windows\temp
- state: directory
-
-# Making use of Dynamic Inventory Plugin
-# =======================================
-# aws_ec2.yml (Dynamic Inventory - Linux)
-# This will return the Instance IDs matching the filter
-#plugin: aws_ec2
-#regions:
-# - us-east-1
-#hostnames:
-# - instance-id
-#filters:
-# tag:SSMTag: ssmlinux
-# -----------------------
-- name: install aws-cli
- hosts: all
- gather_facts: false
- vars:
- ansible_connection: aws_ssm
- ansible_aws_ssm_bucket_name: nameofthebucket
- ansible_aws_ssm_region: us-east-1
- tasks:
- - name: aws-cli
- raw: yum install -y awscli
- tags: aws-cli
-# Execution: ansible-playbook linux.yaml -i aws_ec2.yml
-# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection.
-# =====================================================
-# aws_ec2.yml (Dynamic Inventory - Windows)
-#plugin: aws_ec2
-#regions:
-# - us-east-1
-#hostnames:
-# - instance-id
-#filters:
-# tag:SSMTag: ssmwindows
-# -----------------------
-- name: Create a dir.
- hosts: all
- gather_facts: false
- vars:
- ansible_connection: aws_ssm
- ansible_shell_type: powershell
- ansible_aws_ssm_bucket_name: nameofthebucket
- ansible_aws_ssm_region: us-east-1
- tasks:
- - name: Create the directory
- win_file:
- path: C:\Temp\SSM_Testing5
- state: directory
-# Execution: ansible-playbook win_file.yaml -i aws_ec2.yml
-# The playbook tasks will get executed on the instance ids returned from the dynamic inventory plugin using ssm connection.
-'''
-
-import os
-import getpass
-import json
-import os
-import pty
-import random
-import re
-import select
-import string
-import subprocess
-import time
-
-try:
- import boto3
- HAS_BOTO_3 = True
-except ImportError as e:
- HAS_BOTO_3_ERROR = str(e)
- HAS_BOTO_3 = False
-
-from functools import wraps
-from ansible import constants as C
-from ansible.errors import AnsibleConnectionFailure, AnsibleError, AnsibleFileNotFound
-from ansible.module_utils.basic import missing_required_lib
-from ansible.module_utils.six import PY3
-from ansible.module_utils.six.moves import xrange
-from ansible.module_utils._text import to_bytes, to_native, to_text
-from ansible.plugins.connection import ConnectionBase
-from ansible.plugins.shell.powershell import _common_args
-from ansible.utils.display import Display
-
-display = Display()
-
-
-def _ssm_retry(func):
- """
- Decorator to retry in the case of a connection failure
- Will retry if:
- * an exception is caught
- Will not retry if
- * remaining_tries is <2
- * retries limit reached
- """
- @wraps(func)
- def wrapped(self, *args, **kwargs):
- remaining_tries = int(self.get_option('retries')) + 1
- cmd_summary = "%s..." % args[0]
- for attempt in range(remaining_tries):
- cmd = args[0]
-
- try:
- return_tuple = func(self, *args, **kwargs)
- display.vvv(return_tuple, host=self.host)
- break
-
- except (AnsibleConnectionFailure, Exception) as e:
- if attempt == remaining_tries - 1:
- raise
- else:
- pause = 2 ** attempt - 1
- if pause > 30:
- pause = 30
-
- if isinstance(e, AnsibleConnectionFailure):
- msg = "ssm_retry: attempt: %d, cmd (%s), pausing for %d seconds" % (attempt, cmd_summary, pause)
- else:
- msg = "ssm_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds" % (attempt, e, cmd_summary, pause)
-
- display.vv(msg, host=self.host)
-
- time.sleep(pause)
-
- # Do not attempt to reuse the existing session on retries
- self.close()
-
- continue
-
- return return_tuple
- return wrapped
-
-
-def chunks(lst, n):
- """Yield successive n-sized chunks from lst."""
- for i in range(0, len(lst), n):
- yield lst[i:i + n]
-
-
-class Connection(ConnectionBase):
- ''' AWS SSM based connections '''
-
- transport = 'aws_ssm'
- allow_executable = False
- allow_extras = True
- has_pipelining = False
- is_windows = False
- _client = None
- _session = None
- _stdout = None
- _session_id = ''
- _timeout = False
- MARK_LENGTH = 26
-
- def __init__(self, *args, **kwargs):
- if not HAS_BOTO_3:
- raise AnsibleError('{0}: {1}'.format(missing_required_lib("boto3"), HAS_BOTO_3_ERROR))
-
- super(Connection, self).__init__(*args, **kwargs)
- self.host = self._play_context.remote_addr
-
- if getattr(self._shell, "SHELL_FAMILY", '') == 'powershell':
- self.delegate = None
- self.has_native_async = True
- self.always_pipeline_modules = True
- self.module_implementation_preferences = ('.ps1', '.exe', '')
- self.protocol = None
- self.shell_id = None
- self._shell_type = 'powershell'
- self.is_windows = True
-
- def _connect(self):
- ''' connect to the host via ssm '''
-
- self._play_context.remote_user = getpass.getuser()
-
- if not self._session_id:
- self.start_session()
- return self
-
- def start_session(self):
- ''' start ssm session '''
-
- if self.get_option('instance_id') is None:
- self.instance_id = self.host
- else:
- self.instance_id = self.get_option('instance_id')
-
- display.vvv(u"ESTABLISH SSM CONNECTION TO: {0}".format(self.instance_id), host=self.host)
-
- executable = self.get_option('plugin')
- if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
- raise AnsibleError("failed to find the executable specified %s."
- " Please verify if the executable exists and re-try." % executable)
-
- profile_name = ''
- region_name = self.get_option('region')
- ssm_parameters = dict()
-
- client = boto3.client('ssm', region_name=region_name)
- self._client = client
- response = client.start_session(Target=self.instance_id, Parameters=ssm_parameters)
- self._session_id = response['SessionId']
-
- cmd = [
- executable,
- json.dumps(response),
- region_name,
- "StartSession",
- profile_name,
- json.dumps({"Target": self.instance_id}),
- client.meta.endpoint_url
- ]
-
- display.vvvv(u"SSM COMMAND: {0}".format(to_text(cmd)), host=self.host)
-
- stdout_r, stdout_w = pty.openpty()
- session = subprocess.Popen(
- cmd,
- stdin=subprocess.PIPE,
- stdout=stdout_w,
- stderr=subprocess.PIPE,
- close_fds=True,
- bufsize=0,
- )
-
- os.close(stdout_w)
- self._stdout = os.fdopen(stdout_r, 'rb', 0)
- self._session = session
- self._poll_stdout = select.poll()
- self._poll_stdout.register(self._stdout, select.POLLIN)
-
- # Disable command echo and prompt.
- self._prepare_terminal()
-
- display.vvv(u"SSM CONNECTION ID: {0}".format(self._session_id), host=self.host)
-
- return session
-
- @_ssm_retry
- def exec_command(self, cmd, in_data=None, sudoable=True):
- ''' run a command on the ssm host '''
-
- super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
-
- display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self.host)
-
- session = self._session
-
- mark_begin = "".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)])
- if self.is_windows:
- mark_start = mark_begin + " $LASTEXITCODE"
- else:
- mark_start = mark_begin
- mark_end = "".join([random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH)])
-
- # Wrap command in markers accordingly for the shell used
- cmd = self._wrap_command(cmd, sudoable, mark_start, mark_end)
-
- self._flush_stderr(session)
-
- for chunk in chunks(cmd, 1024):
- session.stdin.write(to_bytes(chunk, errors='surrogate_or_strict'))
-
- # Read stdout between the markers
- stdout = ''
- win_line = ''
- begin = False
- stop_time = int(round(time.time())) + self.get_option('timeout')
- while session.poll() is None:
- remaining = stop_time - int(round(time.time()))
- if remaining < 1:
- self._timeout = True
- display.vvvv(u"EXEC timeout stdout: {0}".format(to_text(stdout)), host=self.host)
- raise AnsibleConnectionFailure("SSM exec_command timeout on host: %s"
- % self.instance_id)
- if self._poll_stdout.poll(1000):
- line = self._filter_ansi(self._stdout.readline())
- display.vvvv(u"EXEC stdout line: {0}".format(to_text(line)), host=self.host)
- else:
- display.vvvv(u"EXEC remaining: {0}".format(remaining), host=self.host)
- continue
-
- if not begin and self.is_windows:
- win_line = win_line + line
- line = win_line
-
- if mark_start in line:
- begin = True
- if not line.startswith(mark_start):
- stdout = ''
- continue
- if begin:
- if mark_end in line:
- display.vvvv(u"POST_PROCESS: {0}".format(to_text(stdout)), host=self.host)
- returncode, stdout = self._post_process(stdout, mark_begin)
- break
- else:
- stdout = stdout + line
-
- stderr = self._flush_stderr(session)
-
- return (returncode, stdout, stderr)
-
- def _prepare_terminal(self):
- ''' perform any one-time terminal settings '''
-
- if not self.is_windows:
- cmd = "stty -echo\n" + "PS1=''\n"
- cmd = to_bytes(cmd, errors='surrogate_or_strict')
- self._session.stdin.write(cmd)
-
- def _wrap_command(self, cmd, sudoable, mark_start, mark_end):
- ''' wrap command so stdout and status can be extracted '''
-
- if self.is_windows:
- if not cmd.startswith(" ".join(_common_args) + " -EncodedCommand"):
- cmd = self._shell._encode_script(cmd, preserve_rc=True)
- cmd = cmd + "; echo " + mark_start + "\necho " + mark_end + "\n"
- else:
- if sudoable:
- cmd = "sudo " + cmd
- cmd = "echo " + mark_start + "\n" + cmd + "\necho $'\\n'$?\n" + "echo " + mark_end + "\n"
-
- display.vvvv(u"_wrap_command: '{0}'".format(to_text(cmd)), host=self.host)
- return cmd
-
- def _post_process(self, stdout, mark_begin):
- ''' extract command status and strip unwanted lines '''
-
- if self.is_windows:
- # Value of $LASTEXITCODE will be the line after the mark
- trailer = stdout[stdout.rfind(mark_begin):]
- last_exit_code = trailer.splitlines()[1]
- if last_exit_code.isdigit:
- returncode = int(last_exit_code)
- else:
- returncode = -1
- # output to keep will be before the mark
- stdout = stdout[:stdout.rfind(mark_begin)]
-
- # If it looks like JSON remove any newlines
- if stdout.startswith('{'):
- stdout = stdout.replace('\n', '')
-
- return (returncode, stdout)
- else:
- # Get command return code
- returncode = int(stdout.splitlines()[-2])
-
- # Throw away ending lines
- for x in range(0, 3):
- stdout = stdout[:stdout.rfind('\n')]
-
- return (returncode, stdout)
-
- def _filter_ansi(self, line):
- ''' remove any ANSI terminal control codes '''
- line = to_text(line)
-
- if self.is_windows:
- osc_filter = re.compile(r'\x1b\][^\x07]*\x07')
- line = osc_filter.sub('', line)
- ansi_filter = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]')
- line = ansi_filter.sub('', line)
-
- # Replace or strip sequence (at terminal width)
- line = line.replace('\r\r\n', '\n')
- if len(line) == 201:
- line = line[:-1]
-
- return line
-
- def _flush_stderr(self, subprocess):
- ''' read and return stderr with minimal blocking '''
-
- poll_stderr = select.poll()
- poll_stderr.register(subprocess.stderr, select.POLLIN)
- stderr = ''
-
- while subprocess.poll() is None:
- if poll_stderr.poll(1):
- line = subprocess.stderr.readline()
- display.vvvv(u"stderr line: {0}".format(to_text(line)), host=self.host)
- stderr = stderr + line
- else:
- break
-
- return stderr
-
- def _get_url(self, client_method, bucket_name, out_path, http_method):
- ''' Generate URL for get_object / put_object '''
- client = boto3.client('s3')
- return client.generate_presigned_url(client_method, Params={'Bucket': bucket_name, 'Key': out_path}, ExpiresIn=3600, HttpMethod=http_method)
-
- @_ssm_retry
- def _file_transport_command(self, in_path, out_path, ssm_action):
- ''' transfer a file from using an intermediate S3 bucket '''
-
- s3_path = out_path.replace('\\', '/')
- bucket_url = 's3://%s/%s' % (self.get_option('bucket_name'), s3_path)
-
- if self.is_windows:
- put_command = "Invoke-WebRequest -Method PUT -InFile '%s' -Uri '%s' -UseBasicParsing" % (
- in_path, self._get_url('put_object', self.get_option('bucket_name'), s3_path, 'PUT'))
- get_command = "Invoke-WebRequest '%s' -OutFile '%s'" % (
- self._get_url('get_object', self.get_option('bucket_name'), s3_path, 'GET'), out_path)
- else:
- put_command = "curl --request PUT --upload-file '%s' '%s'" % (
- in_path, self._get_url('put_object', self.get_option('bucket_name'), s3_path, 'PUT'))
- get_command = "curl '%s' -o '%s'" % (
- self._get_url('get_object', self.get_option('bucket_name'), s3_path, 'GET'), out_path)
-
- client = boto3.client('s3')
- if ssm_action == 'get':
- (returncode, stdout, stderr) = self.exec_command(put_command, in_data=None, sudoable=False)
- with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as data:
- client.download_fileobj(self.get_option('bucket_name'), s3_path, data)
- else:
- with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as data:
- client.upload_fileobj(data, self.get_option('bucket_name'), s3_path)
- (returncode, stdout, stderr) = self.exec_command(get_command, in_data=None, sudoable=False)
-
- # Check the return code
- if returncode == 0:
- return (returncode, stdout, stderr)
- else:
- raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" %
- (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to remote '''
-
- super(Connection, self).put_file(in_path, out_path)
-
- display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
- if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
- raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
-
- return self._file_transport_command(in_path, out_path, 'put')
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from remote to local '''
-
- super(Connection, self).fetch_file(in_path, out_path)
-
- display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
- return self._file_transport_command(in_path, out_path, 'get')
-
- def close(self):
- ''' terminate the connection '''
- if self._session_id:
-
- display.vvv(u"CLOSING SSM CONNECTION TO: {0}".format(self.instance_id), host=self.host)
- if self._timeout:
- self._session.terminate()
- else:
- cmd = b"\nexit\n"
- self._session.communicate(cmd)
-
- display.vvvv(u"TERMINATE SSM SESSION: {0}".format(self._session_id), host=self.host)
- self._client.terminate_session(SessionId=self._session_id)
- self._session_id = ''
diff --git a/test/integration/targets/aws_acm/aliases b/test/integration/targets/aws_acm/aliases
deleted file mode 100644
index c5a973f85c..0000000000
--- a/test/integration/targets/aws_acm/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/aws
-aws_acm_info
-shippable/aws/group2
-unstable
diff --git a/test/integration/targets/aws_acm/defaults/main.yml b/test/integration/targets/aws_acm/defaults/main.yml
deleted file mode 100644
index 5d3648f8e6..0000000000
--- a/test/integration/targets/aws_acm/defaults/main.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-# we'll generate 3 certificates locally for the test
-# Upload the first
-# overwrite it with the second
-# and the third is unrelated, to check we only get info about the first when we want
-local_certs:
- - priv_key: "{{ remote_tmp_dir }}/private-1.pem"
- cert: "{{ remote_tmp_dir }}/public-1.pem"
- csr: "{{ remote_tmp_dir }}/csr-1.csr"
- domain: "acm1.{{ aws_acm_test_uuid }}.ansible.com"
- name: "{{ resource_prefix }}_{{ aws_acm_test_uuid }}_1"
-
- - priv_key: "{{ remote_tmp_dir }}/private-2.pem"
- cert: "{{ remote_tmp_dir }}/public-2.pem"
- csr: "{{ remote_tmp_dir }}/csr-2.csr"
- domain: "acm2.{{ aws_acm_test_uuid }}.ansible.com"
- name: "{{ resource_prefix }}_{{ aws_acm_test_uuid }}_2"
-
- - priv_key: "{{ remote_tmp_dir }}/private-3.pem"
- cert: "{{ remote_tmp_dir }}/public-3.pem"
- csr: "{{ remote_tmp_dir }}/csr-3.csr"
- domain: "acm3.{{ aws_acm_test_uuid }}.ansible.com"
- name: "{{ resource_prefix }}_{{ aws_acm_test_uuid }}_3"
-
-# we'll have one private key
-# make 2 chains using it
-# so we can test what happens when you change just the chain
-# not the domain or key
-chained_cert:
- priv_key: "{{ remote_tmp_dir }}/private-ch-0.pem"
- domain: "acm-ch.{{ aws_acm_test_uuid }}.ansible.com"
- name: "{{ resource_prefix }}_{{ aws_acm_test_uuid }}_4"
- chains:
- - cert: "{{ remote_tmp_dir }}/public-ch-0.pem"
- csr: "{{ remote_tmp_dir }}/csr-ch-0.csr"
- ca: 0 # index into local_certs
- - cert: "{{ remote_tmp_dir }}/public-ch-1.pem"
- csr: "{{ remote_tmp_dir }}/csr-ch-1.csr"
- ca: 1 # index into local_certs
- \ No newline at end of file
diff --git a/test/integration/targets/aws_acm/meta/main.yml b/test/integration/targets/aws_acm/meta/main.yml
deleted file mode 100644
index 1810d4bec9..0000000000
--- a/test/integration/targets/aws_acm/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_remote_tmp_dir
diff --git a/test/integration/targets/aws_acm/tasks/full_acm_test.yml b/test/integration/targets/aws_acm/tasks/full_acm_test.yml
deleted file mode 100644
index 3647531958..0000000000
--- a/test/integration/targets/aws_acm/tasks/full_acm_test.yml
+++ /dev/null
@@ -1,482 +0,0 @@
-- name: AWS ACM integration test
- module_defaults:
- group/aws:
- aws_region: "{{ aws_region }}"
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- block:
- # just check this task doesn't fail
- # I'm not sure if I can assume there aren't already other certs in this account
- - name: list certs
- aws_acm_info:
- register: list_all
- failed_when: list_all.certificates is not defined
-
- - name: ensure absent cert which doesn't exist - first time
- aws_acm:
- name_tag: "{{ item.name }}"
- state: absent
- with_items: "{{ local_certs }}"
-
- # just in case it actually existed and was deleted last task
- # check we don't fail when deleting nothing
- - name: ensure absent cert which doesn't exist - second time
- aws_acm:
- name_tag: "{{ item.name }}"
- state: absent
- with_items: "{{ local_certs }}"
- register: absent_start_two
- failed_when: absent_start_two.changed
-
- - name: list cert which shouldn't exist
- aws_acm_info:
- tags:
- Name: "{{ item.name }}"
- register: list_tag
- with_items: "{{ local_certs }}"
- failed_when: list_tag.certificates | length > 0
-
- - name: check directory was made
- assert:
- that:
- - remote_tmp_dir is defined
-
- # https://github.com/vbotka/ansible-certificate/blob/master/tasks/cert-self-signed.yml
- - name: Generate private key for local certs
- openssl_privatekey:
- path: "{{ item.priv_key }}"
- type: RSA
- size: 2048 # ACM doesn't work properly with 4096
- with_items: "{{ local_certs }}"
-
- - name: Generate an OpenSSL Certificate Signing Request for own certs
- openssl_csr:
- path: "{{ item.csr }}"
- privatekey_path: "{{ item.priv_key }}"
- common_name: "{{ item.domain }}"
- with_items: "{{ local_certs }}"
-
- - name: Generate a Self Signed OpenSSL certificate for own certs
- openssl_certificate:
- provider: selfsigned
- path: "{{ item.cert }}"
- csr_path: "{{ item.csr }}"
- privatekey_path: "{{ item.priv_key }}"
- signature_algorithms:
- - 'sha256WithRSAEncryption'
- # - 'sha512WithRSAEncryption'
- with_items: "{{ local_certs }}"
-
- # now upload that certificate
- - name: upload certificates first time
- aws_acm:
- name_tag: "{{ item.name }}"
- certificate: "{{ lookup('file', item.cert ) }}"
- private_key: "{{ lookup('file', item.priv_key ) }}"
- state: present
- register: upload
- with_items: "{{ local_certs }}"
- until: upload is succeeded
- retries: 5
- delay: 10
-
- - assert:
- that:
- - prev_task.certificate.arn is defined
- - ('arn:aws:acm:123' | regex_search( 'arn:aws:acm:' )) is defined # check this works like s.startswith('arn')
- - (prev_task.certificate.arn | regex_search( 'arn:aws:acm:' )) is defined
- - prev_task.certificate.domain_name == original_cert.domain
- - prev_task.changed
- with_items: "{{ upload.results }}"
- vars:
- original_cert: "{{ item.item }}"
- prev_task: "{{ item }}"
-
- - name: fetch data about cert just uploaded, by ARN
- aws_acm_info:
- certificate_arn: "{{ item.certificate.arn }}"
- register: fetch_after_up
- with_items: "{{ upload.results }}"
-
- - name: check output of prior task (fetch data about cert just uploaded, by ARN)
- assert:
- that:
- - fetch_after_up_result.certificates | length == 1
- - fetch_after_up_result.certificates[0].certificate_arn == upload_result.certificate.arn
- - fetch_after_up_result.certificates[0].domain_name == original_cert.domain
- - (fetch_after_up_result.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', ''))
- ==
- (lookup( 'file', original_cert.cert ) | replace( ' ', '' ) | replace( '\n', '' ))
- - "'Name' in fetch_after_up_result.certificates[0].tags"
- - fetch_after_up_result.certificates[0].tags['Name'] == original_cert.name
- with_items: "{{ fetch_after_up.results }}"
- vars:
- fetch_after_up_result: "{{ item }}" # corresponding result from task registered as fetch_after_up
- upload_result: "{{ item.item }}" # corresponding result from task registered as upload
- original_cert: "{{ item.item.item }}"
-
- - name: fetch data about cert just uploaded, by name
- aws_acm_info:
- tags:
- Name: "{{ original_cert.name }}"
- register: fetch_after_up_name
- with_items: "{{ upload.results }}"
- vars:
- upload_result: "{{ item }}"
- original_cert: "{{ item.item }}"
-
- - name: check fetched data of cert we just uploaded
- assert:
- that:
- - fetch_after_up_name_result.certificates | length == 1
- - fetch_after_up_name_result.certificates[0].certificate_arn == upload_result.certificate.arn
- - fetch_after_up_name_result.certificates[0].domain_name == original_cert.domain
- - (fetch_after_up_name_result.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', ''))
- ==
- (lookup('file', original_cert.cert ) | replace( ' ', '' ) | replace( '\n', ''))
- - "'Name' in fetch_after_up_name_result.certificates[0].tags"
- - fetch_after_up_name_result.certificates[0].tags['Name'] == original_cert.name
- with_items: "{{ fetch_after_up_name.results }}"
- vars:
- fetch_after_up_name_result: "{{ item }}" # corresponding result from task registered as fetch_after_up_name
- upload_result: "{{ item.item }}" # corresponding result from task registered as upload
- original_cert: "{{ item.item.item }}"
-
-
- - name: fetch data about cert just uploaded, by domain name
- aws_acm_info:
- domain_name: "{{ original_cert.domain }}"
- register: fetch_after_up_domain
- with_items: "{{ upload.results }}"
- vars:
- original_cert: "{{ item.item }}"
-
- - name: compare fetched data of cert just uploaded to upload task
- assert:
- that:
- - fetch_after_up_domain_result.certificates | length == 1
- - fetch_after_up_domain_result.certificates[0].certificate_arn == upload_result.certificate.arn
- - fetch_after_up_domain_result.certificates[0].domain_name == original_cert.domain
- - (fetch_after_up_domain_result.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', ''))
- ==
- (lookup('file', original_cert.cert ) | replace( ' ', '' ) | replace( '\n', ''))
- - "'Name' in fetch_after_up_domain_result.certificates[0].tags"
- - fetch_after_up_domain_result.certificates[0].tags['Name'] == original_cert.name
- with_items: "{{ fetch_after_up_domain.results }}"
- vars:
- fetch_after_up_domain_result: "{{ item }}"
- upload_result: "{{ item.item }}"
- original_cert: "{{ item.item.item }}"
-
-
- # now upload that certificate
- - name: upload certificates again, check not changed
- aws_acm:
- name_tag: "{{ item.name }}"
- certificate: "{{ lookup('file', item.cert ) }}"
- private_key: "{{ lookup('file', item.priv_key ) }}"
- state: present
- register: upload2
- with_items: "{{ local_certs }}"
- failed_when: upload2.changed
-
-
- - name: update first cert with body of the second, first time
- aws_acm:
- state: present
- name_tag: "{{ local_certs[0].name }}"
- certificate: "{{ lookup('file', local_certs[1].cert ) }}"
- private_key: "{{ lookup('file', local_certs[1].priv_key ) }}"
- register: overwrite
-
- - name: check output of previous task (update first cert with body of the second, first time)
- assert:
- that:
- - overwrite.certificate.arn is defined
- - overwrite.certificate.arn | regex_search( 'arn:aws:acm:' ) is defined
- - overwrite.certificate.arn == upload.results[0].certificate.arn
- - overwrite.certificate.domain_name == local_certs[1].domain
- - overwrite.changed
-
- - name: check update was sucessfull
- aws_acm_info:
- tags:
- Name: "{{ local_certs[0].name }}"
- register: fetch_after_overwrite
-
- - name: check output of update fetch
- assert:
- that:
- - fetch_after_overwrite.certificates | length == 1
- - fetch_after_overwrite.certificates[0].certificate_arn == fetch_after_up.results[0].certificates[0].certificate_arn
- - fetch_after_overwrite.certificates[0].domain_name == local_certs[1].domain
- - (fetch_after_overwrite.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == (lookup('file', local_certs[1].cert )| replace( ' ', '' ) | replace( '\n', ''))
- - "'Name' in fetch_after_overwrite.certificates[0].tags"
- - fetch_after_overwrite.certificates[0].tags['Name'] == local_certs[0].name
-
- - name: fetch other cert
- aws_acm_info:
- tags:
- Name: "{{ local_certs[1].name }}"
- register: check_after_overwrite
-
- - name: check other cert unaffected
- assert:
- that:
- - check_after_overwrite.certificates | length == 1
- - check_after_overwrite.certificates[0].certificate_arn == fetch_after_up.results[1].certificates[0].certificate_arn
- - check_after_overwrite.certificates[0].domain_name == local_certs[1].domain
- - (check_after_overwrite.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', '')) == (lookup('file', local_certs[1].cert ) | replace( ' ', '' ) | replace( '\n', ''))
- - "'Name' in check_after_overwrite.certificates[0].tags"
- - check_after_overwrite.certificates[0].tags['Name'] == local_certs[1].name
-
- - name: update first cert with body of the second again
- aws_acm:
- state: present
- name_tag: "{{ local_certs[0].name }}"
- certificate: "{{ lookup('file', local_certs[1].cert ) }}"
- private_key: "{{ lookup('file', local_certs[1].priv_key ) }}"
- register: overwrite2
-
- - name: check output of previous task (update first cert with body of the second again)
- assert:
- that:
- - overwrite2.certificate.arn is defined
- - overwrite2.certificate.arn | regex_search( 'arn:aws:acm:' ) is defined
- - overwrite2.certificate.arn == upload.results[0].certificate.arn
- - overwrite2.certificate.domain_name == local_certs[1].domain
- - not overwrite2.changed
-
- - name: delete certs 1 and 2
- aws_acm:
- state: absent
- domain_name: "{{ local_certs[1].domain }}"
- register: delete_both
-
- - name: test prev task
- assert:
- that:
- - delete_both.arns is defined
- - check_after_overwrite.certificates[0].certificate_arn in delete_both.arns
- - upload.results[0].certificate.arn in delete_both.arns
- - delete_both.changed
-
- - name: fetch info for certs 1 and 2
- aws_acm_info:
- tags:
- Name: "{{ local_certs[item].name }}"
- register: check_del_one
- with_items:
- - 0
- - 1
- # There is the chance that we're running as the deletion is in progress,
- # this could trigger ResourceNotFoundException allow a single retry to cope
- # with this.
- retries: 2
- until:
- - check_del_one is not failed
- - check_del_one.certificates | length == 0
- delay: 10
-
- - name: check certs 1 and 2 were already deleted
- with_items: "{{ check_del_one.results }}"
- assert:
- that: item.certificates | length == 0
-
- - name: check cert 3 not deleted
- aws_acm_info:
- tags:
- Name: "{{ local_certs[2].name }}"
- register: check_del_one_remain
- failed_when: check_del_one_remain.certificates | length != 1
-
- - name: delete cert 3
- aws_acm:
- state: absent
- domain_name: "{{ local_certs[2].domain }}"
- register: delete_third
-
- - name: check cert 3 deletion went as expected
- assert:
- that:
- - delete_third.arns is defined
- - delete_third.arns | length == 1
- - delete_third.arns[0] == upload.results[2].certificate.arn
- - delete_third.changed
-
- - name: check cert 3 was deleted
- aws_acm_info:
- tags:
- Name: "{{ local_certs[2].name }}"
- register: check_del_three
- failed_when: check_del_three.certificates | length != 0
-
- - name: delete cert 3 again
- aws_acm:
- state: absent
- domain_name: "{{ local_certs[2].domain }}"
- register: delete_third
-
- - name: check deletion of cert 3 not changed, because already deleted
- assert:
- that:
- - delete_third.arns is defined
- - delete_third.arns | length == 0
- - not delete_third.changed
-
- - name: check directory was made
- assert:
- that:
- - remote_tmp_dir is defined
-
- - name: Generate private key for cert to be chained
- openssl_privatekey:
- path: "{{ chained_cert.priv_key }}"
- type: RSA
- size: 2048 # ACM doesn't work properly with 4096
-
- - name: Generate two OpenSSL Certificate Signing Requests for cert to be chained
- openssl_csr:
- path: "{{ item.csr }}"
- privatekey_path: "{{ chained_cert.priv_key }}"
- common_name: "{{ chained_cert.domain }}"
- with_items: "{{ chained_cert.chains }}"
-
-
- - name: Sign new certs with cert 0 and 1
- openssl_certificate:
- provider: ownca
- path: "{{ item.cert }}"
- csr_path: "{{ item.csr }}"
- ownca_path: "{{ local_certs[item.ca].cert }}"
- ownca_privatekey_path: "{{ local_certs[item.ca].priv_key }}"
- signature_algorithms:
- - 'sha256WithRSAEncryption'
- # - 'sha512WithRSAEncryption'
- with_items: "{{ chained_cert.chains }}"
-
- - name: check files exist (for next task)
- file:
- path: "{{ item }}"
- state: file
- with_items:
- - "{{ local_certs[chained_cert.chains[0].ca].cert }}"
- - "{{ local_certs[chained_cert.chains[1].ca].cert }}"
- - "{{ chained_cert.chains[0].cert }}"
- - "{{ chained_cert.chains[1].cert }}"
-
- - name: Find chains
- certificate_complete_chain:
- input_chain: "{{ lookup('file', item.cert ) }}"
- root_certificates:
- - "{{ local_certs[item.ca].cert }}"
- with_items: "{{ chained_cert.chains }}"
- register: chains
-
- - name: upload chained cert, first chain, first time
- aws_acm:
- name_tag: "{{ chained_cert.name }}"
- certificate: "{{ lookup('file', chained_cert.chains[0].cert ) }}"
- certificate_chain: "{{ chains.results[0].complete_chain | join('\n') }}"
- private_key: "{{ lookup('file', chained_cert.priv_key ) }}"
- state: present
- register: upload_chain
- failed_when: not upload_chain.changed
-
- - name: fetch chain of cert we just uploaded
- aws_acm_info:
- tags:
- Name: "{{ chained_cert.name }}"
- register: check_chain
-
- - name: check chain of cert we just uploaded
- assert:
- that:
- - (check_chain.certificates[0].certificate_chain | replace( ' ', '' ) | replace( '\n', ''))
- ==
- ( chains.results[0].complete_chain | join( '\n' ) | replace( ' ', '' ) | replace( '\n', '') )
- - (check_chain.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', ''))
- ==
- ( lookup('file', chained_cert.chains[0].cert ) | replace( ' ', '' ) | replace( '\n', '') )
-
- - name: upload chained cert again, check not changed
- aws_acm:
- name_tag: "{{ chained_cert.name }}"
- certificate: "{{ lookup('file', chained_cert.chains[0].cert ) }}"
- certificate_chain: "{{ chains.results[0].complete_chain | join('\n') }}"
- private_key: "{{ lookup('file', chained_cert.priv_key ) }}"
- state: present
- register: upload_chain_2
-
- - name: check previous task not changed
- assert:
- that:
- - upload_chain_2.certificate.arn == upload_chain.certificate.arn
- - not upload_chain_2.changed
-
- - name: upload chained cert, different chain
- aws_acm:
- name_tag: "{{ chained_cert.name }}"
- certificate: "{{ lookup('file', chained_cert.chains[1].cert ) }}"
- certificate_chain: "{{ chains.results[1].complete_chain | join('\n') }}"
- private_key: "{{ lookup('file', chained_cert.priv_key ) }}"
- state: present
- register: upload_chain_3
-
- - name: check uploading with different chain is changed
- assert:
- that:
- - upload_chain_3.changed
- - upload_chain_3.certificate.arn == upload_chain.certificate.arn
-
- - name: fetch info about chain of cert we just updated
- aws_acm_info:
- tags:
- Name: "{{ chained_cert.name }}"
- register: check_chain_2
-
- - name: check chain of cert we just uploaded
- assert:
- that:
- - (check_chain_2.certificates[0].certificate_chain | replace( ' ', '' ) | replace( '\n', ''))
- ==
- ( chains.results[1].complete_chain | join( '\n' ) | replace( ' ', '' ) | replace( '\n', '') )
- - (check_chain_2.certificates[0].certificate | replace( ' ', '' ) | replace( '\n', ''))
- ==
- ( lookup('file', chained_cert.chains[1].cert ) | replace( ' ', '' ) | replace( '\n', '') )
-
- - name: delete chained cert
- aws_acm:
- name_tag: "{{ chained_cert.name }}"
- state: absent
- register: delete_chain_3
-
- - name: check deletion of chained cert 3 is changed
- assert:
- that:
- - delete_chain_3.changed
- - upload_chain.certificate.arn in delete_chain_3.arns
-
-
- always:
-
- - name: delete first bunch of certificates
- aws_acm:
- name_tag: "{{ item.name }}"
- state: absent
- with_items: "{{ local_certs }}"
- ignore_errors: yes
-
- - name: delete chained cert
- aws_acm:
- state: absent
- name_tag: "{{ chained_cert.name }}"
- ignore_errors: yes
-
-
- - name: deleting local directory with test artefacts
- file:
- path: "{{ remote_tmp_dir }}"
- state: directory
- ignore_errors: yes
diff --git a/test/integration/targets/aws_acm/tasks/main.yml b/test/integration/targets/aws_acm/tasks/main.yml
deleted file mode 100644
index 7b85a29b6b..0000000000
--- a/test/integration/targets/aws_acm/tasks/main.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-- name: AWS ACM integration test virtualenv wrapper
- block:
- - set_fact:
- virtualenv: "{{ remote_tmp_dir }}/virtualenv"
- virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
-
- - set_fact:
- virtualenv_interpreter: "{{ virtualenv }}/bin/python"
-
- # The CI runs many of these tests in parallel
- # Use this random ID to differentiate which resources
- # are from which test
- - set_fact:
- aws_acm_test_uuid: "{{ (10**9) | random }}"
-
- - pip:
- name: virtualenv
-
- - pip:
- name:
- - 'botocore<1.13.0,>=1.12.211'
- - boto3
- - coverage
- - jinja2
- - pyyaml
- - 'pyopenssl>=0.15'
- - 'cryptography>=1.6'
- virtualenv: "{{ virtualenv }}"
- virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: no
-
- - include_tasks: full_acm_test.yml
- vars:
- ansible_python_interpreter: "{{ virtualenv_interpreter }}"
-
- always:
- - file:
- path: "{{ virtualenv }}"
- state: absent
diff --git a/test/integration/targets/aws_api_gateway/aliases b/test/integration/targets/aws_api_gateway/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/aws_api_gateway/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/aws_api_gateway/meta/main.yml b/test/integration/targets/aws_api_gateway/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/aws_api_gateway/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/aws_api_gateway/tasks/main.yml b/test/integration/targets/aws_api_gateway/tasks/main.yml
deleted file mode 100644
index 5c6047c33f..0000000000
--- a/test/integration/targets/aws_api_gateway/tasks/main.yml
+++ /dev/null
@@ -1,207 +0,0 @@
-- block:
-
- # ====================== testing failure cases: ==================================
-
- - name: test with no parameters
- aws_api_gateway:
- register: result
- ignore_errors: true
-
- - name: assert failure when called with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("The aws_api_gateway module requires a region")'
-
- - name: test with minimal parameters but no region
- aws_api_gateway:
- api_id: 'fake-api-doesnt-exist'
- register: result
- ignore_errors: true
-
- - name: assert failure when called with with minimal parameters but no region
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("The aws_api_gateway module requires a region")'
-
- - name: test for disallowing multiple swagger sources
- aws_api_gateway:
- api_id: 'fake-api-doesnt-exist'
- region: '{{ec2_region}}'
- swagger_file: foo.yml
- swagger_text: "this is not really an API"
- register: result
- ignore_errors: true
-
- - name: assert failure when called with with minimal parameters but no region
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("parameters are mutually exclusive")'
-
-
- # ====================== regular testing: ===================================
-
- - name: build API file
- template:
- src: minimal-swagger-api.yml.j2
- dest: "{{output_dir}}/minimal-swagger-api.yml"
-
- - name: deploy new API
- aws_api_gateway:
- api_file: "{{output_dir}}/minimal-swagger-api.yml"
- stage: "minimal"
- endpoint_type: 'REGIONAL'
- state: present
- region: '{{ec2_region}}'
- aws_access_key: '{{ec2_access_key}}'
- aws_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- register: create_result
-
- - name: assert deploy new API worked
- assert:
- that:
- - 'create_result.changed == True'
- - 'create_result.failed == False'
- - 'create_result.deploy_response.description == "Automatic deployment by Ansible."'
- - 'create_result.configure_response.id == create_result.api_id'
- - '"apigateway:CreateRestApi" in create_result.resource_actions'
- - 'create_result.configure_response.endpoint_configuration.types.0 == "REGIONAL"'
-
- - name: check if API endpoint works
- uri: url="https://{{create_result.api_id}}.execute-api.{{ec2_region}}.amazonaws.com/minimal"
- register: uri_result
-
- - name: assert API works success
- assert:
- that:
- - 'uri_result.status == 200'
-
- - name: check if nonexistent endpoint causes error
- uri: url="https://{{create_result.api_id}}.execute-api.{{ec2_region}}.amazonaws.com/nominal"
- register: bad_uri_result
- ignore_errors: true
-
- - name: assert
- assert:
- that:
- - bad_uri_result is failed
-
- - name: Update API to test params effect
- aws_api_gateway:
- api_id: '{{create_result.api_id}}'
- api_file: "{{output_dir}}/minimal-swagger-api.yml"
- cache_enabled: true
- cache_size: '1.6'
- tracing_enabled: true
- state: present
- region: '{{ec2_region}}'
- aws_access_key: '{{ec2_access_key}}'
- aws_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- register: update_result
-
- - name: assert update result
- assert:
- that:
- - 'update_result.changed == True'
- - 'update_result.failed == False'
- - '"apigateway:PutRestApi" in update_result.resource_actions'
-
- # ==== additional create/delete tests ====
-
- - name: deploy first API
- aws_api_gateway:
- api_file: "{{output_dir}}/minimal-swagger-api.yml"
- stage: "minimal"
- cache_enabled: false
- state: present
- region: '{{ec2_region}}'
- aws_access_key: '{{ec2_access_key}}'
- aws_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- register: create_result_1
-
- - name: deploy second API rapidly after first
- aws_api_gateway:
- api_file: "{{output_dir}}/minimal-swagger-api.yml"
- stage: "minimal"
- state: present
- region: '{{ec2_region}}'
- aws_access_key: '{{ec2_access_key}}'
- aws_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- register: create_result_2
-
- - name: assert both APIs deployed successfully
- assert:
- that:
- - 'create_result_1.changed == True'
- - 'create_result_2.changed == True'
- - '"api_id" in create_result_1'
- - '"api_id" in create_result_1'
- - 'create_result_1.configure_response.endpoint_configuration.types.0 == "EDGE"'
-
- - name: destroy first API
- aws_api_gateway:
- state: absent
- api_id: '{{create_result_1.api_id}}'
- region: '{{ec2_region}}'
- aws_access_key: '{{ec2_access_key}}'
- aws_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- register: destroy_result_1
-
- - name: destroy second API rapidly after first
- aws_api_gateway:
- state: absent
- api_id: '{{create_result_2.api_id}}'
- region: '{{ec2_region}}'
- aws_access_key: '{{ec2_access_key}}'
- aws_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- register: destroy_result_2
-
- - name: assert both APIs deployed successfully
- assert:
- that:
- - 'destroy_result_1.changed == True'
- - 'destroy_result_2.changed == True'
- - '"apigateway:DeleteRestApi" in destroy_result_1.resource_actions'
- - '"apigateway:DeleteRestApi" in destroy_result_2.resource_actions'
-
- # ================= end testing ====================================
-
- always:
-
- - name: Ensure cleanup of API deploy
- aws_api_gateway:
- state: absent
- api_id: '{{create_result.api_id}}'
- ec2_region: '{{ec2_region}}'
- aws_access_key: '{{ec2_access_key}}'
- aws_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- ignore_errors: true
-
- - name: Ensure cleanup of API deploy 1
- aws_api_gateway:
- state: absent
- api_id: '{{create_result_1.api_id}}'
- ec2_region: '{{ec2_region}}'
- aws_access_key: '{{ec2_access_key}}'
- aws_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- ignore_errors: true
-
- - name: Ensure cleanup of API deploy 2
- aws_api_gateway:
- state: absent
- api_id: '{{create_result_2.api_id}}'
- ec2_region: '{{ec2_region}}'
- aws_access_key: '{{ec2_access_key}}'
- aws_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- ignore_errors: true
diff --git a/test/integration/targets/aws_api_gateway/templates/minimal-swagger-api.yml.j2 b/test/integration/targets/aws_api_gateway/templates/minimal-swagger-api.yml.j2
deleted file mode 100644
index 8c5c058106..0000000000
--- a/test/integration/targets/aws_api_gateway/templates/minimal-swagger-api.yml.j2
+++ /dev/null
@@ -1,33 +0,0 @@
----
-swagger: "2.0"
-info:
- version: "2017-05-11T12:14:59Z"
- title: "{{resource_prefix}}Empty_API"
-host: "fakeexample.execute-api.us-east-1.amazonaws.com"
-basePath: "/minimal"
-schemes:
-- "https"
-paths:
- /:
- get:
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 200:
- description: "200 response"
- schema:
- $ref: "#/definitions/Empty"
- x-amazon-apigateway-integration:
- responses:
- default:
- statusCode: "200"
- requestTemplates:
- application/json: "{\"statusCode\": 200}"
- passthroughBehavior: "when_no_match"
- type: "mock"
-definitions:
- Empty:
- type: "object"
- title: "Empty Schema"
diff --git a/test/integration/targets/aws_codebuild/aliases b/test/integration/targets/aws_codebuild/aliases
deleted file mode 100644
index a112c3d1bb..0000000000
--- a/test/integration/targets/aws_codebuild/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group1
diff --git a/test/integration/targets/aws_codebuild/defaults/main.yml b/test/integration/targets/aws_codebuild/defaults/main.yml
deleted file mode 100644
index a36eb3de72..0000000000
--- a/test/integration/targets/aws_codebuild/defaults/main.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# defaults file for aws_codebuild
-
-# IAM role names have to be less than 64 characters
-# The 8 digit identifier at the end of resource_prefix helps determine during
-# which test something was created and allows tests to be run in parallel
-# Shippable resource_prefixes are in the format shippable-123456-123, so in those cases
-# we need both sets of digits to keep the resource name unique
-unique_id: "{{ resource_prefix | regex_search('(\\d+-?)(\\d+)$') }}"
-iam_role_name: "ansible-test-sts-{{ unique_id }}-codebuild-service-role"
diff --git a/test/integration/targets/aws_codebuild/files/codebuild_iam_trust_policy.json b/test/integration/targets/aws_codebuild/files/codebuild_iam_trust_policy.json
deleted file mode 100644
index 3af7c64120..0000000000
--- a/test/integration/targets/aws_codebuild/files/codebuild_iam_trust_policy.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "Service": "codebuild.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/aws_codebuild/tasks/main.yml b/test/integration/targets/aws_codebuild/tasks/main.yml
deleted file mode 100644
index 953aaeaad8..0000000000
--- a/test/integration/targets/aws_codebuild/tasks/main.yml
+++ /dev/null
@@ -1,119 +0,0 @@
----
-# tasks file for aws_codebuild
-
-- name: Run aws_codebuild integration tests.
-
- block:
-
- # ==================== preparations ========================================
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: create IAM role needed for CodeBuild
- iam_role:
- name: "{{ iam_role_name }}"
- description: Role with permissions for CodeBuild actions.
- assume_role_policy_document: "{{ lookup('file', 'codebuild_iam_trust_policy.json') }}"
- state: present
- <<: *aws_connection_info
- register: codebuild_iam_role
-
- - name: Set variable with aws account id
- set_fact:
- aws_account_id: "{{ codebuild_iam_role.iam_role.arn.split(':')[4] }}"
-
- # ================== integration test ==========================================
-
- - name: create CodeBuild project
- aws_codebuild:
- name: "{{ resource_prefix }}-test-ansible-codebuild"
- description: Build project for testing the Ansible aws_codebuild module
- service_role: "{{ codebuild_iam_role.iam_role.arn }}"
- timeout_in_minutes: 30
- source:
- type: CODEPIPELINE
- buildspec: ''
- artifacts:
- namespace_type: NONE
- packaging: NONE
- type: CODEPIPELINE
- name: test
- environment:
- compute_type: BUILD_GENERAL1_SMALL
- privileged_mode: true
- image: 'aws/codebuild/docker:17.09.0'
- type: LINUX_CONTAINER
- environment_variables:
- - { name: 'FOO_ENV', value: 'other' }
- tags:
- - { key: 'purpose', value: 'ansible-test' }
- state: present
- <<: *aws_connection_info
- register: output
- retries: 10
- delay: 5
- until: output is success
-
- - assert:
- that:
- - "output.project.description == 'Build project for testing the Ansible aws_codebuild module'"
-
- - name: idempotence check rerunning same Codebuild task
- aws_codebuild:
- name: "{{ resource_prefix }}-test-ansible-codebuild"
- description: Build project for testing the Ansible aws_codebuild module
- service_role: "{{ codebuild_iam_role.iam_role.arn }}"
- timeout_in_minutes: 30
- source:
- type: CODEPIPELINE
- buildspec: ''
- artifacts:
- namespace_type: NONE
- packaging: NONE
- type: CODEPIPELINE
- name: test
- encryption_key: 'arn:aws:kms:{{ aws_region }}:{{ aws_account_id }}:alias/aws/s3'
- environment:
- compute_type: BUILD_GENERAL1_SMALL
- privileged_mode: true
- image: 'aws/codebuild/docker:17.09.0'
- type: LINUX_CONTAINER
- environment_variables:
- - { name: 'FOO_ENV', value: 'other' }
- tags:
- - { key: 'purpose', value: 'ansible-test' }
- state: present
- <<: *aws_connection_info
- register: rerun_test_output
-
- - assert:
- that:
- - "rerun_test_output.project.created == output.project.created"
-
- - name: delete CodeBuild project
- aws_codebuild:
- name: "{{ output.project.name }}"
- source:
- type: CODEPIPELINE
- buildspec: ''
- artifacts: {}
- state: absent
- <<: *aws_connection_info
- async: 300
-
- # ============================== cleanup ======================================
-
- always:
-
- - name: cleanup IAM role created for CodeBuild test
- iam_role:
- name: "{{ iam_role_name }}"
- state: absent
- <<: *aws_connection_info
diff --git a/test/integration/targets/aws_codebuild/vars/main.yml b/test/integration/targets/aws_codebuild/vars/main.yml
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/aws_codebuild/vars/main.yml
+++ /dev/null
diff --git a/test/integration/targets/aws_codecommit/aliases b/test/integration/targets/aws_codecommit/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/aws_codecommit/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/aws_codecommit/tasks/main.yml b/test/integration/targets/aws_codecommit/tasks/main.yml
deleted file mode 100644
index 29b9f6b27e..0000000000
--- a/test/integration/targets/aws_codecommit/tasks/main.yml
+++ /dev/null
@@ -1,105 +0,0 @@
----
-- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- # ============================================================
- - name: Create a repository (CHECK MODE)
- aws_codecommit:
- name: "{{ resource_prefix }}_repo"
- description: original comment
- state: present
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: Create a repository
- aws_codecommit:
- name: "{{ resource_prefix }}_repo"
- description: original comment
- state: present
- register: output
- - assert:
- that:
- - output is changed
- - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
- - output.repository_metadata.repository_description == 'original comment'
-
- - name: No-op update to repository
- aws_codecommit:
- name: "{{ resource_prefix }}_repo"
- description: original comment
- state: present
- register: output
- - assert:
- that:
- - output is not changed
- - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
- - output.repository_metadata.repository_description == 'original comment'
-
- - name: Update repository description (CHECK MODE)
- aws_codecommit:
- name: "{{ resource_prefix }}_repo"
- description: new comment
- state: present
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
- - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
- - output.repository_metadata.repository_description == 'original comment'
-
- - name: Update repository description
- aws_codecommit:
- name: "{{ resource_prefix }}_repo"
- description: new comment
- state: present
- register: output
- - assert:
- that:
- - output is changed
- - output.repository_metadata.repository_name == '{{ resource_prefix }}_repo'
- - output.repository_metadata.repository_description == 'new comment'
-
- # ============================================================
- - name: Delete a repository (CHECK MODE)
- aws_codecommit:
- name: "{{ resource_prefix }}_repo"
- state: absent
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: Delete a repository
- aws_codecommit:
- name: "{{ resource_prefix }}_repo"
- state: absent
- register: output
- - assert:
- that:
- - output is changed
-
- - name: Delete a non-existent repository
- aws_codecommit:
- name: "{{ resource_prefix }}_repo"
- state: absent
- register: output
- - assert:
- that:
- - output is not changed
-
- always:
- ###### TEARDOWN STARTS HERE ######
- - name: Delete a repository
- aws_codecommit:
- name: "{{ resource_prefix }}_repo"
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/aws_codepipeline/aliases b/test/integration/targets/aws_codepipeline/aliases
deleted file mode 100644
index a112c3d1bb..0000000000
--- a/test/integration/targets/aws_codepipeline/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group1
diff --git a/test/integration/targets/aws_codepipeline/defaults/main.yml b/test/integration/targets/aws_codepipeline/defaults/main.yml
deleted file mode 100644
index 5f735ba6df..0000000000
--- a/test/integration/targets/aws_codepipeline/defaults/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-# defaults file for aws_codepipeline
-
-codepipeline_name: "{{ resource_prefix }}-test-codepipeline"
-
-# IAM role names have to be less than 64 characters
-# The 8 digit identifier at the end of resource_prefix helps determine during
-# which test something was created and allows tests to be run in parallel
-# Shippable resource_prefixes are in the format shippable-123456-123, so in those cases
-# we need both sets of digits to keep the resource name unique
-unique_id: "{{ resource_prefix | regex_search('(\\d+-?)(\\d+)$') }}"
-codepipeline_service_role_name: "ansible-test-sts-{{ unique_id }}-codepipeline-role"
diff --git a/test/integration/targets/aws_codepipeline/files/codepipeline_iam_trust_policy.json b/test/integration/targets/aws_codepipeline/files/codepipeline_iam_trust_policy.json
deleted file mode 100644
index 9be3f72b62..0000000000
--- a/test/integration/targets/aws_codepipeline/files/codepipeline_iam_trust_policy.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "Service": "codepipeline.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/aws_codepipeline/tasks/main.yml b/test/integration/targets/aws_codepipeline/tasks/main.yml
deleted file mode 100644
index f5fe7b4166..0000000000
--- a/test/integration/targets/aws_codepipeline/tasks/main.yml
+++ /dev/null
@@ -1,156 +0,0 @@
----
-# tasks file for aws_codepipeline
-
-- name: Run aws_codebuild module integration tests
-
- block:
-
- # ==================== preparaions ========================================
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: create IAM role needed for CodePipeline test
- iam_role:
- name: "{{ codepipeline_service_role_name }}"
- description: Role with permissions for CodePipeline actions.
- assume_role_policy_document: "{{ lookup('file', 'codepipeline_iam_trust_policy.json') }}"
- state: present
- <<: *aws_connection_info
- register: codepipeline_iam_role
-
- # ================== integration test ==========================================
-
- - name: create CodePipeline
- aws_codepipeline:
- name: "{{ codepipeline_name }}"
- role_arn: "{{ codepipeline_iam_role.iam_role.arn }}"
- artifact_store:
- type: S3
- location: foo
- stages:
- - name: step_1
- actions:
- - name: action
- actionTypeId:
- category: Source
- owner: AWS
- provider: S3
- version: '1'
- configuration:
- S3Bucket: foo
- S3ObjectKey: bar
- outputArtifacts:
- - { name: step_one_output }
- - name: step_2
- actions:
- - name: action
- actionTypeId:
- category: Build
- owner: AWS
- provider: CodeBuild
- version: '1'
- inputArtifacts:
- - { name: step_one_output }
- outputArtifacts:
- - { name: step_two_output }
- configuration:
- ProjectName: foo
- state: present
- <<: *aws_connection_info
- register: output
- retries: 10
- delay: 5
- until: output is success
-
- - assert:
- that:
- - output.changed == True
- - output.pipeline.name == "{{ codepipeline_name }}"
- - output.pipeline.stages|length > 1
-
- - name: idempotence check rerunning same CodePipeline task
- aws_codepipeline:
- name: "{{ codepipeline_name }}"
- role_arn: "{{ codepipeline_iam_role.iam_role.arn }}"
- artifact_store:
- type: S3
- location: foo
- stages:
- - name: step_1
- actions:
- - name: action
- actionTypeId:
- category: Source
- owner: AWS
- provider: S3
- version: '1'
- configuration:
- S3Bucket: foo
- S3ObjectKey: bar
- outputArtifacts:
- - { name: step_one_output }
- - name: step_2
- actions:
- - name: action
- actionTypeId:
- category: Build
- owner: AWS
- provider: CodeBuild
- version: '1'
- inputArtifacts:
- - { name: step_one_output }
- outputArtifacts:
- - { name: step_two_output }
- configuration:
- ProjectName: foo
- state: present
- <<: *aws_connection_info
- register: rerun_test_output
-
- - assert:
- that:
- - rerun_test_output.changed == False
- - rerun_test_output.pipeline == output.pipeline
-
- - name: Test deletion of CodePipeline
- aws_codepipeline:
- name: "{{ codepipeline_name }}"
- role_arn: ''
- artifact_store: {}
- stages: []
- state: absent
- <<: *aws_connection_info
- register: absent_test_output
-
- - assert:
- that:
- - absent_test_output.changed == True
- - absent_test_output.pipeline is undefined
-
- # ==================== cleanup =======================
-
- always:
-
- - name: Cleanup - delete test CodePipeline
- aws_codepipeline:
- name: "{{ codepipeline_name }}"
- role_arn: ''
- artifact_store: {}
- stages: []
- state: absent
- <<: *aws_connection_info
- ignore_errors: true
-
- - name: Cleanup - delete IAM role needed for CodePipeline test
- iam_role:
- name: "{{ codepipeline_name }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: true
diff --git a/test/integration/targets/aws_config/aliases b/test/integration/targets/aws_config/aliases
deleted file mode 100644
index c598f3e8b4..0000000000
--- a/test/integration/targets/aws_config/aliases
+++ /dev/null
@@ -1,8 +0,0 @@
-cloud/aws
-disabled
-shippable/aws/group2
-aws_config_aggregation_authorization
-aws_config_aggregator
-aws_config_delivery_channel
-aws_config_recorder
-aws_config_rule
diff --git a/test/integration/targets/aws_config/defaults/main.yaml b/test/integration/targets/aws_config/defaults/main.yaml
deleted file mode 100644
index da7b735dfd..0000000000
--- a/test/integration/targets/aws_config/defaults/main.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-config_s3_bucket: '{{ resource_prefix }}-config-records'
-config_sns_name: '{{ resource_prefix }}-delivery-channel-test-topic'
-config_role_name: 'config-recorder-test-{{ resource_prefix }}'
diff --git a/test/integration/targets/aws_config/files/config-trust-policy.json b/test/integration/targets/aws_config/files/config-trust-policy.json
deleted file mode 100644
index 532b3ed5a4..0000000000
--- a/test/integration/targets/aws_config/files/config-trust-policy.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "",
- "Effect": "Allow",
- "Principal": {
- "Service": "config.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/aws_config/tasks/main.yaml b/test/integration/targets/aws_config/tasks/main.yaml
deleted file mode 100644
index 34e3449fc7..0000000000
--- a/test/integration/targets/aws_config/tasks/main.yaml
+++ /dev/null
@@ -1,405 +0,0 @@
----
-- block:
-
- # ============================================================
- # Prerequisites
- # ============================================================
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: true
-
- - name: ensure IAM role exists
- iam_role:
- <<: *aws_connection_info
- name: '{{ config_role_name }}'
- assume_role_policy_document: "{{ lookup('file','config-trust-policy.json') }}"
- state: present
- create_instance_profile: no
- managed_policy:
- - 'arn:aws:iam::aws:policy/service-role/AWSConfigRole'
- register: config_iam_role
-
- - name: ensure SNS topic exists
- sns_topic:
- <<: *aws_connection_info
- name: '{{ config_sns_name }}'
- state: present
- subscriptions:
- - endpoint: "rando_email_address@rando.com"
- protocol: "email"
- register: config_sns_topic
-
- - name: ensure S3 bucket exists
- s3_bucket:
- <<: *aws_connection_info
- name: "{{ config_s3_bucket }}"
-
- - name: ensure S3 access for IAM role
- iam_policy:
- <<: *aws_connection_info
- iam_type: role
- iam_name: '{{ config_role_name }}'
- policy_name: AwsConfigRecorderTestRoleS3Policy
- state: present
- policy_json: "{{ lookup( 'template', 'config-s3-policy.json.j2') }}"
-
- # ============================================================
- # Module requirement testing
- # ============================================================
- - name: test rule with no source parameter
- aws_config_rule:
- <<: *aws_connection_info
- name: random_name
- state: present
- register: output
- ignore_errors: true
-
- - name: assert failure when called with no source parameter
- assert:
- that:
- - output.failed
- - 'output.msg.startswith("missing required arguments:")'
-
- - name: test resource_type delivery_channel with no s3_bucket parameter
- aws_config_delivery_channel:
- <<: *aws_connection_info
- name: random_name
- state: present
- register: output
- ignore_errors: true
-
- - name: assert failure when called with no s3_bucket parameter
- assert:
- that:
- - output.failed
- - 'output.msg.startswith("missing required arguments:")'
-
- - name: test resource_type configuration_recorder with no role_arn parameter
- aws_config_recorder:
- <<: *aws_connection_info
- name: random_name
- state: present
- register: output
- ignore_errors: true
-
- - name: assert failure when called with no role_arn parameter
- assert:
- that:
- - output.failed
- - 'output.msg.startswith("state is present but all of the following are missing")'
-
- - name: test resource_type configuration_recorder with no recording_group parameter
- aws_config_recorder:
- <<: *aws_connection_info
- name: random_name
- state: present
- role_arn: 'arn:aws:iam::123456789012:role/AwsConfigRecorder'
- register: output
- ignore_errors: true
-
- - name: assert failure when called with no recording_group parameter
- assert:
- that:
- - output.failed
- - 'output.msg.startswith("state is present but all of the following are missing")'
-
- - name: test resource_type aggregation_authorization with no authorized_account_id parameter
- aws_config_aggregation_authorization:
- state: present
- <<: *aws_connection_info
- register: output
- ignore_errors: true
-
- - name: assert failure when called with no authorized_account_id parameter
- assert:
- that:
- - output.failed
- - 'output.msg.startswith("missing required arguments:")'
-
- - name: test resource_type aggregation_authorization with no authorized_aws_region parameter
- aws_config_aggregation_authorization:
- <<: *aws_connection_info
- state: present
- authorized_account_id: '123456789012'
- register: output
- ignore_errors: true
-
- - name: assert failure when called with no authorized_aws_region parameter
- assert:
- that:
- - output.failed
- - 'output.msg.startswith("missing required arguments:")'
-
- - name: test resource_type configuration_aggregator with no account_sources parameter
- aws_config_aggregator:
- <<: *aws_connection_info
- name: random_name
- state: present
- register: output
- ignore_errors: true
-
- - name: assert failure when called with no account_sources parameter
- assert:
- that:
- - output.failed
- - 'output.msg.startswith("missing required arguments: account_sources")'
-
- - name: test resource_type configuration_aggregator with no organization_source parameter
- aws_config_aggregator:
- <<: *aws_connection_info
- name: random_name
- state: present
- account_sources: []
- register: output
- ignore_errors: true
-
- - name: assert failure when called with no organization_source parameter
- assert:
- that:
- - output.failed
- - 'output.msg.startswith("missing required arguments: organization_source")'
-
- # ============================================================
- # Creation testing
- # ============================================================
- - name: Create Configuration Recorder for AWS Config
- aws_config_recorder:
- <<: *aws_connection_info
- name: test_configuration_recorder
- state: present
- role_arn: "{{ config_iam_role.arn }}"
- recording_group:
- all_supported: true
- include_global_types: true
- register: output
-
- - assert:
- that:
- - output.changed
-
- - name: Create Delivery Channel for AWS Config
- aws_config_delivery_channel:
- <<: *aws_connection_info
- name: test_delivery_channel
- state: present
- s3_bucket: "{{ config_s3_bucket }}"
- s3_prefix: "foo/bar"
- sns_topic_arn: "{{ config_sns_topic.sns_arn }}"
- delivery_frequency: 'Twelve_Hours'
- register: output
-
- - assert:
- that:
- - output.changed
-
- - name: Create Config Rule for AWS Config
- aws_config_rule:
- <<: *aws_connection_info
- name: test_config_rule
- state: present
- description: 'This AWS Config rule checks for public write access on S3 buckets'
- scope:
- compliance_types:
- - 'AWS::S3::Bucket'
- source:
- owner: AWS
- identifier: 'S3_BUCKET_PUBLIC_WRITE_PROHIBITED'
- register: output
-
- - assert:
- that:
- - output.changed
-
- # ============================================================
- # Update testing
- # ============================================================
- - name: Update Configuration Recorder
- aws_config_recorder:
- <<: *aws_connection_info
- name: test_configuration_recorder
- state: present
- role_arn: "{{ config_iam_role.arn }}"
- recording_group:
- all_supported: false
- include_global_types: false
- resource_types:
- - 'AWS::S3::Bucket'
- register: output
-
- - assert:
- that:
- - output.changed
-
- - name: Update Delivery Channel
- aws_config_delivery_channel:
- <<: *aws_connection_info
- name: test_delivery_channel
- state: present
- s3_bucket: "{{ config_s3_bucket }}"
- sns_topic_arn: "{{ config_sns_topic.sns_arn }}"
- delivery_frequency: 'TwentyFour_Hours'
- register: output
-
- - assert:
- that:
- - output.changed
-
- - name: Update Config Rule
- aws_config_rule:
- <<: *aws_connection_info
- name: test_config_rule
- state: present
- description: 'This AWS Config rule checks for public write access on S3 buckets'
- scope:
- compliance_types:
- - 'AWS::S3::Bucket'
- source:
- owner: AWS
- identifier: 'S3_BUCKET_PUBLIC_READ_PROHIBITED'
- register: output
-
- - assert:
- that:
- - output.changed
-
- # ============================================================
- # Read testing
- # ============================================================
- - name: Don't update Configuration Recorder
- aws_config_recorder:
- <<: *aws_connection_info
- name: test_configuration_recorder
- state: present
- role_arn: "{{ config_iam_role.arn }}"
- recording_group:
- all_supported: false
- include_global_types: false
- resource_types:
- - 'AWS::S3::Bucket'
- register: output
-
- - assert:
- that:
- - not output.changed
-
- - name: Don't update Delivery Channel
- aws_config_delivery_channel:
- <<: *aws_connection_info
- name: test_delivery_channel
- state: present
- s3_bucket: "{{ config_s3_bucket }}"
- sns_topic_arn: "{{ config_sns_topic.sns_arn }}"
- delivery_frequency: 'TwentyFour_Hours'
- register: output
-
- - assert:
- that:
- - not output.changed
-
- - name: Don't update Config Rule
- aws_config_rule:
- <<: *aws_connection_info
- name: test_config_rule
- state: present
- description: 'This AWS Config rule checks for public write access on S3 buckets'
- scope:
- compliance_types:
- - 'AWS::S3::Bucket'
- source:
- owner: AWS
- identifier: 'S3_BUCKET_PUBLIC_READ_PROHIBITED'
- register: output
-
- - assert:
- that:
- - not output.changed
-
- always:
- # ============================================================
- # Destroy testing
- # ============================================================
- - name: Destroy Configuration Recorder
- aws_config_recorder:
- <<: *aws_connection_info
- name: test_configuration_recorder
- state: absent
- register: output
- ignore_errors: yes
-
-# - assert:
-# that:
-# - output.changed
-
- - name: Destroy Delivery Channel
- aws_config_delivery_channel:
- <<: *aws_connection_info
- name: test_delivery_channel
- state: absent
- s3_bucket: "{{ config_s3_bucket }}"
- sns_topic_arn: "{{ config_sns_topic.sns_arn }}"
- delivery_frequency: 'TwentyFour_Hours'
- register: output
- ignore_errors: yes
-
-# - assert:
-# that:
-# - output.changed
-
- - name: Destroy Config Rule
- aws_config_rule:
- <<: *aws_connection_info
- name: test_config_rule
- state: absent
- description: 'This AWS Config rule checks for public write access on S3 buckets'
- scope:
- compliance_types:
- - 'AWS::S3::Bucket'
- source:
- owner: AWS
- identifier: 'S3_BUCKET_PUBLIC_READ_PROHIBITED'
- register: output
- ignore_errors: yes
-
-# - assert:
-# that:
-# - output.changed
-
- # ============================================================
- # Clean up prerequisites
- # ============================================================
- - name: remove S3 access from IAM role
- iam_policy:
- <<: *aws_connection_info
- iam_type: role
- iam_name: '{{ config_role_name }}'
- policy_name: AwsConfigRecorderTestRoleS3Policy
- state: absent
- policy_json: "{{ lookup( 'template', 'config-s3-policy.json.j2') }}"
- ignore_errors: yes
-
- - name: remove IAM role
- iam_role:
- <<: *aws_connection_info
- name: '{{ config_role_name }}'
- state: absent
- ignore_errors: yes
-
- - name: remove SNS topic
- sns_topic:
- <<: *aws_connection_info
- name: '{{ config_sns_name }}'
- state: absent
- ignore_errors: yes
-
- - name: remove S3 bucket
- s3_bucket:
- <<: *aws_connection_info
- name: "{{ config_s3_bucket }}"
- state: absent
- force: yes
- ignore_errors: yes
diff --git a/test/integration/targets/aws_config/templates/config-s3-policy.json.j2 b/test/integration/targets/aws_config/templates/config-s3-policy.json.j2
deleted file mode 100644
index 5309330008..0000000000
--- a/test/integration/targets/aws_config/templates/config-s3-policy.json.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Action": "sns:Publish",
- "Resource": "{{ config_sns_topic.sns_arn }}",
- "Effect": "Allow",
- "Sid": "PublishToSNS"
- },
- {
- "Action": "s3:PutObject",
- "Resource": "arn:aws:s3:::{{ config_s3_bucket }}/*",
- "Effect": "Allow",
- "Sid": "AllowPutS3Object"
- },
- {
- "Action": "s3:GetBucketAcl",
- "Resource": "arn:aws:s3:::{{ config_s3_bucket }}",
- "Effect": "Allow",
- "Sid": "AllowGetS3Acl"
- }
- ]
-}
diff --git a/test/integration/targets/aws_eks_cluster/aliases b/test/integration/targets/aws_eks_cluster/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/aws_eks_cluster/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/aws_eks_cluster/defaults/main.yml b/test/integration/targets/aws_eks_cluster/defaults/main.yml
deleted file mode 100644
index 214f249611..0000000000
--- a/test/integration/targets/aws_eks_cluster/defaults/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-eks_cluster_name: "{{ resource_prefix }}"
-eks_subnets:
- - zone: a
- cidr: 10.0.1.0/24
- - zone: b
- cidr: 10.0.2.0/24
- - zone: c
- cidr: 10.0.3.0/24
-
-eks_security_groups:
- - name: "{{ eks_cluster_name }}-control-plane-sg"
- description: "EKS Control Plane Security Group"
- rules:
- - group_name: "{{ eks_cluster_name }}-workers-sg"
- group_desc: "EKS Worker Security Group"
- ports: 443
- proto: tcp
- rules_egress:
- - group_name: "{{ eks_cluster_name }}-workers-sg"
- group_desc: "EKS Worker Security Group"
- from_port: 1025
- to_port: 65535
- proto: tcp
- - name: "{{ eks_cluster_name }}-workers-sg"
- description: "EKS Worker Security Group"
- rules:
- - group_name: "{{ eks_cluster_name }}-workers-sg"
- proto: tcp
- from_port: 1
- to_port: 65535
- - group_name: "{{ eks_cluster_name }}-control-plane-sg"
- ports: 10250
- proto: tcp
diff --git a/test/integration/targets/aws_eks_cluster/files/eks-trust-policy.json b/test/integration/targets/aws_eks_cluster/files/eks-trust-policy.json
deleted file mode 100644
index 85cfb59dd2..0000000000
--- a/test/integration/targets/aws_eks_cluster/files/eks-trust-policy.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "Service": "eks.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/aws_eks_cluster/meta/main.yml b/test/integration/targets/aws_eks_cluster/meta/main.yml
deleted file mode 100644
index 1810d4bec9..0000000000
--- a/test/integration/targets/aws_eks_cluster/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_remote_tmp_dir
diff --git a/test/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.10.1.yml b/test/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.10.1.yml
deleted file mode 100644
index e4c4b31fe5..0000000000
--- a/test/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.10.1.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-- name: try and use aws_eks_cluster module
- aws_eks_cluster:
- state: absent
- name: my_cluster
- ignore_errors: yes
- register: aws_eks_cluster
-
-- name: ensure that aws_eks fails with friendly error message
- assert:
- that:
- - '"msg" in aws_eks_cluster'
- - aws_eks_cluster is failed
diff --git a/test/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.12.38.yml b/test/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.12.38.yml
deleted file mode 100644
index 4feb7ab48f..0000000000
--- a/test/integration/targets/aws_eks_cluster/tasks/botocore_lt_1.12.38.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-- name: try using aws_eks_cluster wait with state=absent
- aws_eks_cluster:
- state: absent
- name: my_cluster
- wait: yes
- ignore_errors: yes
- register: aws_eks_cluster
-
-- name: ensure that aws_eks fails with friendly error message
- assert:
- that:
- - '"msg" in aws_eks_cluster'
- - aws_eks_cluster is failed
diff --git a/test/integration/targets/aws_eks_cluster/tasks/full_test.yml b/test/integration/targets/aws_eks_cluster/tasks/full_test.yml
deleted file mode 100644
index a48abd45f3..0000000000
--- a/test/integration/targets/aws_eks_cluster/tasks/full_test.yml
+++ /dev/null
@@ -1,245 +0,0 @@
----
-# tasks file for aws_eks modules
-
-- block:
- # If us-west-1 does become supported, change this test to use an unsupported region
- # or if all regions are supported, delete this test
- - name: attempt to use eks in unsupported region
- aws_eks_cluster:
- name: "{{ eks_cluster_name }}"
- state: absent
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: us-west-1
- register: aws_eks_unsupported_region
- ignore_errors: yes
-
- - name: check that aws_eks_cluster did nothing
- assert:
- that:
- - aws_eks_unsupported_region is failed
- - '"msg" in aws_eks_unsupported_region'
-
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: delete an as yet non-existent EKS cluster
- aws_eks_cluster:
- name: "{{ eks_cluster_name }}"
- state: absent
- <<: *aws_connection_info
- register: aws_eks_delete_non_existent
-
- - name: check that aws_eks_cluster did nothing
- assert:
- that:
- - aws_eks_delete_non_existent is not changed
-
- - name: ensure IAM instance role exists
- iam_role:
- name: aws_eks_cluster_role
- assume_role_policy_document: "{{ lookup('file','eks-trust-policy.json') }}"
- state: present
- create_instance_profile: no
- managed_policies:
- - AmazonEKSServicePolicy
- - AmazonEKSClusterPolicy
- <<: *aws_connection_info
- register: iam_role
-
- - name: create a VPC to work in
- ec2_vpc_net:
- cidr_block: 10.0.0.0/16
- state: present
- name: '{{ resource_prefix }}_aws_eks'
- resource_tags:
- Name: '{{ resource_prefix }}_aws_eks'
- <<: *aws_connection_info
- register: setup_vpc
-
- - name: create subnets
- ec2_vpc_subnet:
- az: '{{ aws_region }}{{ item.zone }}'
- tags:
- Name: '{{ resource_prefix }}_aws_eks-subnet-{{ item.zone }}'
- vpc_id: '{{ setup_vpc.vpc.id }}'
- cidr: "{{ item.cidr }}"
- state: present
- <<: *aws_connection_info
- register: setup_subnets
- with_items:
- - "{{ eks_subnets }}"
-
- - name: create security groups to use for EKS
- ec2_group:
- name: "{{ item.name }}"
- description: "{{ item.description }}"
- state: present
- rules: "{{ item.rules }}"
- rules_egress: "{{ item.rules_egress|default(omit) }}"
- vpc_id: '{{ setup_vpc.vpc.id }}'
- <<: *aws_connection_info
- with_items: "{{ eks_security_groups }}"
- register: setup_security_groups
-
- - name: create EKS cluster
- aws_eks_cluster:
- name: "{{ eks_cluster_name }}"
- security_groups: "{{ eks_security_groups | json_query('[].name') }}"
- subnets: "{{ setup_subnets.results | json_query('[].subnet.id') }}"
- role_arn: "{{ iam_role.arn }}"
- <<: *aws_connection_info
- register: eks_create
-
- - name: check that EKS cluster was created
- assert:
- that:
- - eks_create is changed
- - eks_create.name == eks_cluster_name
-
- - name: create EKS cluster with same details but wait for it to become active
- aws_eks_cluster:
- name: "{{ eks_cluster_name }}"
- security_groups: "{{ eks_security_groups | json_query('[].name') }}"
- subnets: "{{ setup_subnets.results | json_query('[].subnet.id') }}"
- role_arn: "{{ iam_role.arn }}"
- wait: yes
- <<: *aws_connection_info
- register: eks_create
-
- - name: Check that EKS cluster is active and has CA and endpoint data
- assert:
- that:
- - eks_create is not changed
- - eks_create.name == eks_cluster_name
- - eks_create.status == "ACTIVE"
- - eks_create.certificate_authority.data is defined
- - eks_create.certificate_authority.data != ""
- - eks_create.endpoint is defined
- - eks_create.endpoint != ""
-
- - name: create EKS cluster with same details but using SG ids
- aws_eks_cluster:
- name: "{{ eks_cluster_name }}"
- security_groups: "{{ setup_security_groups.results | json_query('[].group_id') }}"
- subnets: "{{ setup_subnets.results | json_query('[].subnet.id') }}"
- role_arn: "{{ iam_role.arn }}"
- <<: *aws_connection_info
- register: eks_create
-
- - name: check that EKS cluster did not change
- assert:
- that:
- - eks_create is not changed
- - eks_create.name == eks_cluster_name
-
- - name: remove EKS cluster, waiting until complete
- aws_eks_cluster:
- name: "{{ eks_cluster_name }}"
- state: absent
- wait: yes
- <<: *aws_connection_info
- register: eks_delete
-
- - name: check that EKS cluster was removed
- assert:
- that:
- - eks_delete is changed
-
- - name: create EKS cluster with same details but wait for it to become active
- aws_eks_cluster:
- name: "{{ eks_cluster_name }}"
- security_groups: "{{ eks_security_groups | json_query('[].name') }}"
- subnets: "{{ setup_subnets.results | json_query('[].subnet.id') }}"
- role_arn: "{{ iam_role.arn }}"
- wait: yes
- <<: *aws_connection_info
- register: eks_create
-
- - name: check that EKS cluster was created
- assert:
- that:
- - eks_create is changed
- - eks_create.name == eks_cluster_name
-
- - name: remove EKS cluster, without waiting this time
- aws_eks_cluster:
- name: "{{ eks_cluster_name }}"
- state: absent
- <<: *aws_connection_info
- register: eks_delete
-
- - name: check that EKS cluster remove has started
- assert:
- that:
- - eks_delete is changed
-
- always:
- - name: Announce teardown start
- debug:
- msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****"
-
- - name: remove EKS cluster
- aws_eks_cluster:
- name: "{{ eks_cluster_name }}"
- state: absent
- wait: yes
- <<: *aws_connection_info
- register: eks_delete
- ignore_errors: yes
-
- - debug:
- msg: "{{ eks_security_groups|reverse|list }}"
-
- - name: create list of all additional EKS security groups
- set_fact:
- additional_eks_sg:
- - name: "{{ eks_cluster_name }}-workers-sg"
-
- - name: set all security group rule lists to empty to remove circular dependency
- ec2_group:
- name: "{{ item.name }}"
- description: "{{ item.description }}"
- state: present
- rules: []
- rules_egress: []
- purge_rules: yes
- purge_rules_egress: yes
- vpc_id: '{{ setup_vpc.vpc.id }}'
- <<: *aws_connection_info
- with_items: "{{ eks_security_groups }}"
- ignore_errors: yes
-
- - name: remove security groups
- ec2_group:
- name: '{{ item.name }}'
- state: absent
- vpc_id: '{{ setup_vpc.vpc.id }}'
- <<: *aws_connection_info
- with_items: "{{ eks_security_groups|reverse|list + additional_eks_sg }}"
- ignore_errors: yes
-
- - name: remove setup subnet
- ec2_vpc_subnet:
- az: '{{ aws_region }}{{ item.zone }}'
- vpc_id: '{{ setup_vpc.vpc.id }}'
- cidr: "{{ item.cidr}}"
- state: absent
- <<: *aws_connection_info
- with_items: "{{ eks_subnets }}"
- ignore_errors: yes
-
- - name: remove setup VPC
- ec2_vpc_net:
- cidr_block: 10.0.0.0/16
- state: absent
- name: '{{ resource_prefix }}_aws_eks'
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/aws_eks_cluster/tasks/main.yml b/test/integration/targets/aws_eks_cluster/tasks/main.yml
deleted file mode 100644
index da65e18c66..0000000000
--- a/test/integration/targets/aws_eks_cluster/tasks/main.yml
+++ /dev/null
@@ -1,66 +0,0 @@
-- set_fact:
- virtualenv: "{{ remote_tmp_dir }}/virtualenv"
- virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
-
-- set_fact:
- virtualenv_interpreter: "{{ virtualenv }}/bin/python"
-
-- pip:
- name: virtualenv
-
-# Test graceful failure for missing kubernetes-validate
-
-- pip:
- name:
- - 'botocore<1.10.1'
- - boto3
- - coverage
- virtualenv: "{{ virtualenv }}"
- virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: no
-
-- include_tasks: botocore_lt_1.10.1.yml
- vars:
- ansible_python_interpreter: "{{ virtualenv_interpreter }}"
-
-- file:
- path: "{{ virtualenv }}"
- state: absent
-
-# Test graceful failures when botocore<1.12.38
-
-- pip:
- name:
- - 'botocore>1.10.1,<1.12.38'
- - boto3
- - coverage
- virtualenv: "{{ virtualenv }}"
- virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: no
-
-- include_tasks: botocore_lt_1.12.38.yml
- vars:
- ansible_python_interpreter: "{{ virtualenv_interpreter }}"
-
-- file:
- path: "{{ virtualenv }}"
- state: absent
-
-# Test validate with kubernetes-validate
-
-- pip:
- name:
- - 'botocore>=1.10.1'
- - boto3
- virtualenv: "{{ virtualenv }}"
- virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: no
-
-- include_tasks: full_test.yml
- vars:
- ansible_python_interpreter: "{{ virtualenv_interpreter }}"
- playbook_namespace: ansible-test-k8s-validate
-
-- file:
- path: "{{ virtualenv }}"
- state: absent
diff --git a/test/integration/targets/aws_elasticbeanstalk_app/aliases b/test/integration/targets/aws_elasticbeanstalk_app/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/aws_elasticbeanstalk_app/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/aws_elasticbeanstalk_app/defaults/main.yml b/test/integration/targets/aws_elasticbeanstalk_app/defaults/main.yml
deleted file mode 100644
index 3f38e1a85a..0000000000
--- a/test/integration/targets/aws_elasticbeanstalk_app/defaults/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-# defaults file for aws_elasticbeanstalk_app
-app_name: '{{ resource_prefix }}_eb_ansible_test'
-description: 'eb_ansible_test app description'
-alternate_description: 'eb_ansible_test app alternate_description'
diff --git a/test/integration/targets/aws_elasticbeanstalk_app/meta/main.yml b/test/integration/targets/aws_elasticbeanstalk_app/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/aws_elasticbeanstalk_app/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/aws_elasticbeanstalk_app/tasks/main.yml b/test/integration/targets/aws_elasticbeanstalk_app/tasks/main.yml
deleted file mode 100644
index 56f77d3dbe..0000000000
--- a/test/integration/targets/aws_elasticbeanstalk_app/tasks/main.yml
+++ /dev/null
@@ -1,156 +0,0 @@
----
-# tasks file for aws_elasticbeanstalk_app
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- # ============================================================
- - name: test with no parameters
- aws_elasticbeanstalk_app:
- register: result
- ignore_errors: true
-
- - name: assert failure when called with no parameters
- assert:
- that:
- - 'result.failed'
-
- # ============================================================
- - name: test create app
- aws_elasticbeanstalk_app:
- app_name: "{{ app_name }}"
- description: "{{ description }}"
- state: present
- <<: *aws_connection_info
- register: result
-
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
-
- # ============================================================
- - name: test create when app already exists
- aws_elasticbeanstalk_app:
- app_name: "{{ app_name }}"
- description: "{{ description }}"
- state: present
- <<: *aws_connection_info
- register: result
-
- - name: assert changed is False since the app already exists
- assert:
- that:
- - result.changed == False
-
- # ============================================================
- - name: make an update to an existing app
- aws_elasticbeanstalk_app:
- app_name: "{{ app_name }}"
- description: "{{ alternate_description }}"
- state: present
- <<: *aws_connection_info
- register: result
-
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
-
-# # ============================================================
-# - name: fail deleting an app that has environments that exist
-# aws_elasticbeanstalk_app:
-# app_name: "non_app"
-# state: absent
-# <<: *aws_connection_info
-# register: result
-# ignore_errors: true
-#
-# - name: assert deleteing app with running environments fail
-# assert:
-# that:
-# - result.changed == False
-
-# # ============================================================
-# - name: deleting an app that has environments that exist with terminate_by_force True
-# aws_elasticbeanstalk_app:
-# app_name: "non_app"
-# state: absent
-# terminate_by_force: True
-# <<: *aws_connection_info
-# register: result
-#
-# - name: assert deleteing app with running environments with terminate_by_force True
-# assert:
-# that:
-# - result.changed == True
-#
- # ============================================================
-# - name: retrieve a list of apps
-# aws_elasticbeanstalk_app_facts:
-# <<: *aws_connection_info
-# register: result
-
-# - name: assert changed is True
-# assert:
-# that:
-# - result is success
-
-# # ============================================================
-# - name: deleting an app that has environments that exist with terminate_by_force True
-# aws_elasticbeanstalk_app:
-# app_name: "non_app"
-# state: absent
-# terminate_by_force: True
-# <<: *aws_connection_info
-# register: result
-#
-# - name: assert deleteing app with running environments with terminate_by_force True
-# assert:
-# that:
-# - result.changed == True
-#
- # ============================================================
- - name: delete non existent app
- aws_elasticbeanstalk_app:
- app_name: "non_app"
- state: absent
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: assert deleteing non existant app fails
- assert:
- that:
- - result.changed == False
- - 'result.output.startswith("Application not found")'
-
- # ============================================================
- - name: delete existing app
- aws_elasticbeanstalk_app:
- app_name: "{{ app_name }}"
- state: absent
- <<: *aws_connection_info
- register: result
-
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
-
- # ============================================================
-
- always:
-
- - name: delete existing app
- aws_elasticbeanstalk_app:
- app_name: "{{ app_name }}"
- state: absent
- <<: *aws_connection_info
diff --git a/test/integration/targets/aws_glue_connection/aliases b/test/integration/targets/aws_glue_connection/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/aws_glue_connection/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/aws_glue_connection/tasks/main.yml b/test/integration/targets/aws_glue_connection/tasks/main.yml
deleted file mode 100644
index 84ca6e5eff..0000000000
--- a/test/integration/targets/aws_glue_connection/tasks/main.yml
+++ /dev/null
@@ -1,87 +0,0 @@
-- block:
-
- # TODO: description, match_criteria, security_groups, and subnet_id are unused module options
-
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: create glue connection
- aws_glue_connection:
- name: "{{ resource_prefix }}"
- connection_properties:
- JDBC_CONNECTION_URL: "jdbc:mysql://mydb:3306/{{ resource_prefix }}"
- USERNAME: my-username
- PASSWORD: my-password
- state: present
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- - name: test idempotence creating glue connection
- aws_glue_connection:
- name: "{{ resource_prefix }}"
- connection_properties:
- JDBC_CONNECTION_URL: "jdbc:mysql://mydb:3306/{{ resource_prefix }}"
- USERNAME: my-username
- PASSWORD: my-password
- state: present
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
-
- - name: test updating JDBC connection url
- aws_glue_connection:
- name: "{{ resource_prefix }}"
- connection_properties:
- JDBC_CONNECTION_URL: "jdbc:mysql://mydb:3306/{{ resource_prefix }}-updated"
- USERNAME: my-username
- PASSWORD: my-password
- state: present
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- - name: delete glue connection
- aws_glue_connection:
- name: "{{ resource_prefix }}"
- state: absent
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- - name: test idempotence removing glue connection
- aws_glue_connection:
- name: "{{ resource_prefix }}"
- state: absent
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
-
- always:
-
- - name: delete glue connection
- aws_glue_connection:
- name: "{{ resource_prefix }}"
- state: absent
- <<: *aws_connection_info
diff --git a/test/integration/targets/aws_inspector_target/aliases b/test/integration/targets/aws_inspector_target/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/aws_inspector_target/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/aws_inspector_target/defaults/main.yml b/test/integration/targets/aws_inspector_target/defaults/main.yml
deleted file mode 100644
index 8777873f07..0000000000
--- a/test/integration/targets/aws_inspector_target/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-
-aws_inspector_scan_name: "aws_inspector_scan-{{ ansible_date_time.epoch }}"
diff --git a/test/integration/targets/aws_inspector_target/tasks/main.yml b/test/integration/targets/aws_inspector_target/tasks/main.yml
deleted file mode 100644
index 36a3cfca9e..0000000000
--- a/test/integration/targets/aws_inspector_target/tasks/main.yml
+++ /dev/null
@@ -1,96 +0,0 @@
----
-
-- name: Set Connexion Information for All Tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
-- block:
- - name: Create AWS Inspector Target Group
- aws_inspector_target:
- name: "{{ aws_inspector_scan_name }}"
- state: present
- tags:
- Name: "{{ aws_inspector_scan_name }}"
- changed: "no"
- <<: *aws_connection_info
- register: target_group_create
-
- - name: Create AWS Inspector Target Group (Verify)
- aws_inspector_target:
- name: "{{ aws_inspector_scan_name }}"
- state: present
- tags:
- Name: "{{ aws_inspector_scan_name }}"
- changed: "no"
- <<: *aws_connection_info
- register: target_group_create_verify
-
- - name: Assert Successful AWS Inspector Target Group Creation
- assert:
- that:
- - target_group_create is changed
- - target_group_create.name == aws_inspector_scan_name
- - target_group_create.tags.Name == aws_inspector_scan_name
- - target_group_create.tags.changed == "no"
- - target_group_create_verify is not changed
- - target_group_create_verify.name == aws_inspector_scan_name
- - target_group_create_verify.tags.Name == aws_inspector_scan_name
- - target_group_create_verify.tags.changed == "no"
-
- - name: Change AWS Inspector Target Group Tags
- aws_inspector_target:
- name: "{{ aws_inspector_scan_name }}"
- state: present
- tags:
- Name: "{{ aws_inspector_scan_name }}"
- changed: "yes"
- <<: *aws_connection_info
- register: target_group_tag_change
-
- - name: Change AWS Inspector Target Group Tags (Verify)
- aws_inspector_target:
- name: "{{ aws_inspector_scan_name }}"
- state: present
- tags:
- Name: "{{ aws_inspector_scan_name }}"
- changed: "yes"
- <<: *aws_connection_info
- register: target_group_tag_change_verify
-
- - name: Assert Successful AWS Inspector Target Group Tag Change
- assert:
- that:
- - target_group_tag_change is changed
- - target_group_tag_change.name == aws_inspector_scan_name
- - target_group_tag_change.tags.Name == aws_inspector_scan_name
- - target_group_tag_change.tags.changed == "yes"
- - target_group_tag_change_verify is not changed
- - target_group_tag_change_verify.name == aws_inspector_scan_name
- - target_group_tag_change_verify.tags.Name == aws_inspector_scan_name
- - target_group_tag_change_verify.tags.changed == "yes"
-
- always:
- - name: Delete AWS Inspector Target Group
- aws_inspector_target:
- name: "{{ aws_inspector_scan_name }}"
- state: absent
- <<: *aws_connection_info
- register: target_group_delete
-
- - name: Delete AWS Inspector Target Group (Verify)
- aws_inspector_target:
- name: "{{ aws_inspector_scan_name }}"
- state: absent
- <<: *aws_connection_info
- register: target_group_delete_verify
-
- - name: Assert Successful AWS Inspector Target Group Deletion
- assert:
- that:
- - target_group_delete is changed
- - target_group_delete_verify is not changed
diff --git a/test/integration/targets/aws_kms/aliases b/test/integration/targets/aws_kms/aliases
deleted file mode 100644
index 7ed9252cad..0000000000
--- a/test/integration/targets/aws_kms/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-aws_kms_info
-unsupported
diff --git a/test/integration/targets/aws_kms/meta/main.yml b/test/integration/targets/aws_kms/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/aws_kms/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/aws_kms/tasks/main.yml b/test/integration/targets/aws_kms/tasks/main.yml
deleted file mode 100644
index 38810601af..0000000000
--- a/test/integration/targets/aws_kms/tasks/main.yml
+++ /dev/null
@@ -1,371 +0,0 @@
-- module_defaults:
- group/aws:
- region: "{{ aws_region }}"
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- block:
- # ============================================================
- # PREPARATION
- #
- # Get some information about who we are before starting our tests
- # we'll need this as soon as we start working on the policies
- - name: get ARN of calling user
- aws_caller_info:
- register: aws_caller_info
-
- # IAM Roles completes before the Role is fully instantiated, create it here
- # to ensure it exists when we need it for updating the policies
- - name: create an IAM role that can do nothing
- iam_role:
- name: "{{ resource_prefix }}-kms-role"
- state: present
- assume_role_policy_document: '{"Version": "2012-10-17", "Statement": {"Action": "sts:AssumeRole", "Principal": {"Service": "ec2.amazonaws.com"}, "Effect": "Deny"} }'
- register: iam_role_result
- # ============================================================
- # TESTS
- - name: See whether key exists and its current state
- aws_kms_info:
- filters:
- alias: "{{ resource_prefix }}-kms"
-
- - name: create a key
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- tags:
- Hello: World
- state: present
- enabled: yes
- register: create_kms
-
- - name: assert that state is enabled
- assert:
- that:
- - create_kms.key_state == "Enabled"
- - create_kms.tags['Hello'] == 'World'
- - create_kms.enable_key_rotation == false
-
- - name: enable key rotation
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- tags:
- Hello: World
- state: present
- enabled: yes
- enable_key_rotation: yes
- register: create_kms
-
- - name: assert that key rotation is enabled
- assert:
- that:
- - create_kms.key_state == "Enabled"
- - create_kms.tags['Hello'] == 'World'
- - create_kms.enable_key_rotation == true
-
- - name: find facts about the key
- aws_kms_info:
- filters:
- alias: "{{ resource_prefix }}-kms"
- register: new_key
-
- - name: check that a key was found
- assert:
- that:
- - new_key["keys"]|length == 1
- - new_key["keys"][0]["enable_key_rotation"] == true
-
- - name: Update Policy on key to match AWS Console generate policy
- aws_kms:
- key_id: '{{ new_key["keys"][0]["key_id"] }}'
- policy: "{{ lookup('template', 'console-policy.j2') | to_json }}"
- register: kms_policy_changed
-
- - name: Policy should have been changed
- assert:
- that:
- - kms_policy_changed is changed
-
- - name: Attempt to re-assert the same policy
- aws_kms:
- alias: "alias/{{ resource_prefix }}-kms"
- policy: "{{ lookup('template', 'console-policy.j2') | to_json }}"
- register: kms_policy_changed
-
- - name: Policy should not have changed since it was last set
- assert:
- that:
- - kms_policy_changed is succeeded
-
- - name: grant user-style access to production secrets
- aws_kms:
- mode: grant
- alias: "alias/{{ resource_prefix }}-kms"
- role_name: "{{ resource_prefix }}-kms-role"
- grant_types: "role,role grant"
-
- - name: find facts about the key
- aws_kms_info:
- filters:
- alias: "{{ resource_prefix }}-kms"
- register: new_key
-
- - name: remove access to production secrets from role
- aws_kms:
- mode: deny
- alias: "alias/{{ resource_prefix }}-kms"
- role_arn: "{{ iam_role_result.iam_role.arn }}"
-
- - name: find facts about the key
- aws_kms_info:
- filters:
- alias: "{{ resource_prefix }}-kms"
- register: new_key
-
- - name: Allow the IAM role to use a specific Encryption Context
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- state: present
- purge_grants: yes
- purge_tags: yes
- grants:
- - name: test_grant
- grantee_principal: "{{ iam_role_result.iam_role.arn }}"
- retiring_principal: "{{ aws_caller_info.arn }}"
- constraints:
- encryption_context_equals:
- environment: test
- application: testapp
- operations:
- - Decrypt
- - RetireGrant
- register: grant_one
-
- - name: assert grant added
- assert:
- that:
- - grant_one.changed
- - grant_one.grants|length == 1
-
- - name: Add a second grant
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- state: present
- grants:
- - name: another_grant
- grantee_principal: "{{ iam_role_result.iam_role.arn }}"
- retiring_principal: "{{ aws_caller_info.arn }}"
- constraints:
- encryption_context_equals:
- Environment: second
- Application: anotherapp
- operations:
- - Decrypt
- - RetireGrant
- register: grant_two
-
- - name: assert grant added
- assert:
- that:
- - grant_two.changed
- - grant_two.grants|length == 2
-
- - name: Add a second grant again
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- state: present
- grants:
- - name: another_grant
- grantee_principal: "{{ iam_role_result.iam_role.arn }}"
- retiring_principal: "{{ aws_caller_info.arn }}"
- constraints:
- encryption_context_equals:
- Environment: second
- Application: anotherapp
- operations:
- - Decrypt
- - RetireGrant
- register: grant_two_again
-
- - name: assert grant added
- assert:
- that:
- - not grant_two_again.changed
- - grant_two_again.grants|length == 2
-
- - name: Update the grants with purge_grants set
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- state: present
- purge_grants: yes
- grants:
- - name: third_grant
- grantee_principal: "{{ iam_role_result.iam_role.arn }}"
- retiring_principal: "{{ aws_caller_info.arn }}"
- constraints:
- encryption_context_equals:
- environment: third
- application: onemoreapp
- operations:
- - Decrypt
- - RetireGrant
- register: grant_three
-
- - name: assert grants replaced
- assert:
- that:
- - grant_three.changed
- - grant_three.grants|length == 1
-
- - name: update third grant to change encryption context equals to subset
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- state: present
- grants:
- - name: third_grant
- grantee_principal: "{{ iam_role_result.iam_role.arn }}"
- retiring_principal: "{{ aws_caller_info.arn }}"
- constraints:
- encryption_context_subset:
- environment: third
- application: onemoreapp
- operations:
- - Decrypt
- - RetireGrant
- register: grant_three_update
-
- - name: assert grants replaced
- assert:
- that:
- - "grant_three_update.changed"
- - "grant_three_update.grants|length == 1"
- - "'encryption_context_equals' not in grant_three_update.grants[0].constraints"
- - "'encryption_context_subset' in grant_three_update.grants[0].constraints"
-
- - name: tag encryption key
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- state: present
- tags:
- tag_one: tag_one
- tag_two: tag_two
- register: tag_kms
-
- - name: assert tags added and grants remain in place
- assert:
- that:
- - "tag_kms.changed"
- - "tag_kms.grants|length == 1"
- - "'tag_one' in tag_kms.tags"
- - "'tag_two' in tag_kms.tags"
-
- - name: add, replace, remove tags
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- state: present
- purge_tags: yes
- tags:
- tag_two: tag_two_updated
- tag_three: tag_three
- register: tag_kms_update
-
- - name: assert tags correctly changed
- assert:
- that:
- - "tag_kms_update.changed"
- - "'tag_one' not in tag_kms_update.tags"
- - "'tag_two' in tag_kms_update.tags"
- - "tag_kms_update.tags.tag_two == 'tag_two_updated'"
- - "'tag_three' in tag_kms_update.tags"
-
- - name: make no real tag change
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- state: present
- register: tag_kms_no_update
-
- - name: assert no change to tags
- assert:
- that:
- - "not tag_kms_no_update.changed"
- - "'tag_one' not in tag_kms_no_update.tags"
- - "'tag_two' in tag_kms_no_update.tags"
- - "tag_kms_no_update.tags.tag_two == 'tag_two_updated'"
- - "'tag_three' in tag_kms_no_update.tags"
-
- - name: update the key's description and disable it
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- state: present
- description: test key for testing
- enabled: no
- register: update_key
-
- - name: assert that state is enabled
- assert:
- that:
- - update_key.description == "test key for testing"
- - update_key.key_state == "Disabled"
- - update_key.changed
-
- - name: delete the key
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- state: absent
- register: delete_kms
-
- - name: assert that state is pending deletion
- assert:
- that:
- - delete_kms.key_state == "PendingDeletion"
- - delete_kms.changed
-
- - name: re-delete the key
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- state: absent
- register: delete_kms
-
- - name: assert that state is pending deletion
- assert:
- that:
- - delete_kms.key_state == "PendingDeletion"
- - delete_kms is not changed
-
- - name: undelete and enable the key
- aws_kms:
- alias: "{{ resource_prefix }}-kms"
- state: present
- enabled: yes
- register: undelete_kms
-
- - name: assert that state is enabled
- assert:
- that:
- - undelete_kms.key_state == "Enabled"
- - undelete_kms.changed
-
- - name: delete a non-existant key
- aws_kms:
- key_id: '00000000-0000-0000-0000-000000000000'
- state: absent
- register: delete_kms
-
- - name: assert that state is unchanged
- assert:
- that:
- - delete_kms is not changed
-
- always:
- # ============================================================
- # CLEAN-UP
- - name: finish off by deleting key
- aws_kms:
- state: absent
- alias: "{{ resource_prefix }}-kms"
- register: destroy_result
-
- - name: remove the IAM role
- iam_role:
- name: "{{ resource_prefix }}-kms-role"
- state: absent
- register: iam_role_result
diff --git a/test/integration/targets/aws_kms/templates/console-policy.j2 b/test/integration/targets/aws_kms/templates/console-policy.j2
deleted file mode 100644
index 4b60ba5889..0000000000
--- a/test/integration/targets/aws_kms/templates/console-policy.j2
+++ /dev/null
@@ -1,72 +0,0 @@
-{
- "Id": "key-consolepolicy-3",
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "Enable IAM User Permissions",
- "Effect": "Allow",
- "Principal": {
- "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root"
- },
- "Action": "kms:*",
- "Resource": "*"
- },
- {
- "Sid": "Allow access for Key Administrators",
- "Effect": "Allow",
- "Principal": {
- "AWS": "{{ aws_caller_info.arn }}"
- },
- "Action": [
- "kms:Create*",
- "kms:Describe*",
- "kms:Enable*",
- "kms:List*",
- "kms:Put*",
- "kms:Update*",
- "kms:Revoke*",
- "kms:Disable*",
- "kms:Get*",
- "kms:Delete*",
- "kms:TagResource",
- "kms:UntagResource",
- "kms:ScheduleKeyDeletion",
- "kms:CancelKeyDeletion"
- ],
- "Resource": "*"
- },
- {
- "Sid": "Allow use of the key",
- "Effect": "Allow",
- "Principal": {
- "AWS": "{{ aws_caller_info.arn }}"
- },
- "Action": [
- "kms:Encrypt",
- "kms:Decrypt",
- "kms:ReEncrypt*",
- "kms:GenerateDataKey*",
- "kms:DescribeKey"
- ],
- "Resource": "*"
- },
- {
- "Sid": "Allow attachment of persistent resources",
- "Effect": "Allow",
- "Principal": {
- "AWS": "{{ aws_caller_info.arn }}"
- },
- "Action": [
- "kms:CreateGrant",
- "kms:ListGrants",
- "kms:RevokeGrant"
- ],
- "Resource": "*",
- "Condition": {
- "Bool": {
- "kms:GrantIsForAWSResource": "true"
- }
- }
- }
- ]
-}
diff --git a/test/integration/targets/aws_lambda/aliases b/test/integration/targets/aws_lambda/aliases
deleted file mode 100644
index 67404b7652..0000000000
--- a/test/integration/targets/aws_lambda/aliases
+++ /dev/null
@@ -1,5 +0,0 @@
-cloud/aws
-shippable/aws/group2
-execute_lambda
-lambda
-lamda_info
diff --git a/test/integration/targets/aws_lambda/defaults/main.yml b/test/integration/targets/aws_lambda/defaults/main.yml
deleted file mode 100644
index d227210344..0000000000
--- a/test/integration/targets/aws_lambda/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# defaults file for aws_lambda test
-lambda_function_name: '{{resource_prefix}}'
diff --git a/test/integration/targets/aws_lambda/files/mini_lambda.py b/test/integration/targets/aws_lambda/files/mini_lambda.py
deleted file mode 100644
index b499888ed9..0000000000
--- a/test/integration/targets/aws_lambda/files/mini_lambda.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from __future__ import print_function
-import json
-import os
-
-
-def handler(event, context):
- """
- The handler function is the function which gets called each time
- the lambda is run.
- """
- # printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
- # the log entry.
- print("got event:\n" + json.dumps(event))
-
- # if the name parameter isn't present this can throw an exception
- # which will result in an amazon chosen failure from the lambda
- # which can be completely fine.
-
- name = event["name"]
-
- # we can use environment variables as part of the configuration of the lambda
- # which can change the behaviour of the lambda without needing a new upload
-
- extra = os.environ.get("EXTRA_MESSAGE")
- if extra is not None and len(extra) > 0:
- greeting = "hello {0}. {1}".format(name, extra)
- else:
- greeting = "hello " + name
-
- return {"message": greeting}
-
-
-def main():
- """
- This main function will normally never be called during normal
- lambda use. It is here for testing the lambda program only.
- """
- event = {"name": "james"}
- context = None
- print(handler(event, context))
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/integration/targets/aws_lambda/meta/main.yml b/test/integration/targets/aws_lambda/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/aws_lambda/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/aws_lambda/tasks/main.yml b/test/integration/targets/aws_lambda/tasks/main.yml
deleted file mode 100644
index 3ebd3c028f..0000000000
--- a/test/integration/targets/aws_lambda/tasks/main.yml
+++ /dev/null
@@ -1,515 +0,0 @@
----
-# tasks file for aws_lambda test
-
-- name: set connection information for AWS modules and run tests
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
-
- block:
-
- # ============================================================
- - name: test with no parameters
- lambda:
- register: result
- ignore_errors: true
-
- - name: assert failure when called with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("missing required arguments: name")'
-
- # ============================================================
- - name: test with no parameters except state absent
- lambda:
- state: absent
- register: result
- ignore_errors: true
-
- - name: assert failure when called with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("missing required arguments: name")'
-
- # ============================================================
- - name: test with no role or handler
- lambda:
- name: ansible-testing-fake-should-not-be-created
- runtime: "python2.7"
- register: result
- ignore_errors: true
-
- - name: assert failure when called with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("state is present but all of the following are missing: handler")'
-
- # ============================================================
- - name: test with all module required variables but no region
- lambda:
- name: ansible-testing-fake-should-not-be-created
- runtime: "python2.7"
- handler: "no-handler"
- role: "arn:fake-role-doesnt-exist"
- region:
- register: result
- ignore_errors: true
-
- - name: assert failure when called with only 'name'
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "region must be specified"'
-
- # ============================================================
- - name: test with all module required variables, no region and all possible variables set to blank
- lambda:
- name: ansible-testing-fake-should-not-be-created
- state: present
- runtime: "python2.7"
- role: arn:fake-role-doesnt-exist
- handler:
- s3_bucket:
- s3_key:
- s3_object_version:
- description:
- vpc_subnet_ids:
- vpc_security_group_ids:
- environment_variables:
- dead_letter_arn:
- region:
- register: result
- ignore_errors: true
-
- - name: assert failure when called with only 'name'
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "region must be specified"'
-
- # ============================================================
- # direct zip file upload
- - name: move lambda into place for archive module
- copy:
- src: "mini_lambda.py"
- dest: "{{output_dir}}/mini_lambda.py"
-
- - name: bundle lambda into a zip
- archive:
- format: zip
- path: "{{output_dir}}/mini_lambda.py"
- dest: "{{output_dir}}/mini_lambda.zip"
- register: zip_res
-
- - name: test state=present - upload the lambda
- lambda:
- name: "{{lambda_function_name}}"
- runtime: "python2.7"
- handler: "mini_lambda.handler"
- role: "ansible_lambda_role"
- zip_file: "{{zip_res.dest}}"
- register: result
-
- - name: assert lambda upload succeeded
- assert:
- that:
- - result is not failed
- - result.configuration.tracing_config.mode == "PassThrough"
-
- - name: test lambda works
- execute_lambda:
- name: "{{lambda_function_name}}"
- payload:
- name: "Mr Ansible Tests"
- register: result
-
- - name: assert lambda manages to respond as expected
- assert:
- that:
- - 'result is not failed'
- - 'result.result.output.message == "hello Mr Ansible Tests"'
-
- - name: test lambda config updates
- lambda:
- name: "{{lambda_function_name}}"
- runtime: "nodejs10.x"
- tracing_mode: 'Active'
- handler: "mini_lambda.handler"
- role: "ansible_lambda_role"
- register: update_result
-
- - name: assert that update succeeded
- assert:
- that:
- - update_result is not failed
- - update_result.changed == True
- - update_result.configuration.runtime == 'nodejs10.x'
- - update_result.configuration.tracing_config.mode == 'Active'
-
- - name: test no changes are made with the same parameters
- lambda:
- name: "{{lambda_function_name}}"
- runtime: "nodejs10.x"
- tracing_mode: 'Active'
- handler: "mini_lambda.handler"
- role: "ansible_lambda_role"
- register: update_result
-
- - name: assert that update succeeded
- assert:
- that:
- - update_result is not failed
- - update_result.changed == False
- - update_result.configuration.runtime == 'nodejs10.x'
- - update_result.configuration.tracing_config.mode == 'Active'
-
- - name: reset config updates for the following tests
- lambda:
- name: "{{lambda_function_name}}"
- runtime: "python2.7"
- tracing_mode: 'PassThrough'
- handler: "mini_lambda.handler"
- role: "ansible_lambda_role"
- register: result
-
- - name: assert that reset succeeded
- assert:
- that:
- - result is not failed
- - result.changed == True
- - result.configuration.runtime == 'python2.7'
- - result.configuration.tracing_config.mode == 'PassThrough'
-
- - name: lambda_info | Gather all infos for given lambda function
- lambda_info:
- name: "{{ lambda_function_name }}"
- query: all
- register: lambda_infos_all
-
- - name: lambda_info | Assert successfull retrieval of all information
- assert:
- that:
- - lambda_infos_all is not failed
- - lambda_infos_all.function[lambda_function_name].function_name == lambda_function_name
- - lambda_infos_all.function[lambda_function_name].runtime == "python2.7"
- - lambda_infos_all.function[lambda_function_name].versions is defined
- - lambda_infos_all.function[lambda_function_name].aliases is defined
- - lambda_infos_all.function[lambda_function_name].policy is defined
- - lambda_infos_all.function[lambda_function_name].mappings is defined
- - lambda_infos_all.function[lambda_function_name].description == ""
- - lambda_infos_all.function[lambda_function_name].function_arn is defined
- - lambda_infos_all.function[lambda_function_name].handler == "mini_lambda.handler"
-
- - name: lambda_info | Gather version infos for given lambda function
- lambda_info:
- name: "{{ lambda_function_name }}"
- query: versions
- register: lambda_infos_versions
-
- - name: lambda_info | Assert successfull retrieval of versions information
- assert:
- that:
- - lambda_infos_versions is not failed
- - lambda_infos_versions.function[lambda_function_name].versions|length > 0
- - lambda_infos_versions.function[lambda_function_name].function_name is undefined
-
- - name: lambda_info | Gather config infos for given lambda function
- lambda_info:
- name: "{{ lambda_function_name }}"
- query: config
- register: lambda_infos_config
-
- - name: lambda_info | Assert successfull retrieval of config information
- assert:
- that:
- - lambda_infos_config is not failed
- - lambda_infos_config.function[lambda_function_name].function_name == lambda_function_name
- - lambda_infos_config.function[lambda_function_name].description is defined
- - lambda_infos_config.function[lambda_function_name].versions is undefined
-
- - name: lambda_info | Gather policy infos for given lambda function
- lambda_info:
- name: "{{ lambda_function_name }}"
- query: policy
- register: lambda_infos_policy
-
- - name: lambda_info | Assert successfull retrieval of policy information
- assert:
- that:
- - lambda_infos_policy is not failed
- - lambda_infos_policy.function[lambda_function_name].policy is defined
- - lambda_infos_policy.function[lambda_function_name].versions is undefined
-
- - name: lambda_info | Gather aliases infos for given lambda function
- lambda_info:
- name: "{{ lambda_function_name }}"
- query: aliases
- register: lambda_infos_aliases
-
- - name: lambda_info | Assert successfull retrieval of aliases information
- assert:
- that:
- - lambda_infos_aliases is not failed
- - lambda_infos_aliases.function[lambda_function_name].aliases is defined
-
- - name: lambda_info | Gather mappings infos for given lambda function
- lambda_info:
- name: "{{ lambda_function_name }}"
- query: mappings
- register: lambda_infos_mappings
-
- - name: lambda_info | Assert successfull retrieval of mappings information
- assert:
- that:
- - lambda_infos_mappings is not failed
- - lambda_infos_mappings.function[lambda_function_name].mappings is defined
-
- # ============================================================
- - name: test state=present with security group but no vpc
- lambda:
- name: "{{lambda_function_name}}"
- runtime: "python2.7"
- role: "ansible_lambda_role"
- zip_file: "{{zip_res.dest}}"
- handler:
- description:
- vpc_subnet_ids:
- vpc_security_group_ids: sg-FA6E
- environment_variables:
- dead_letter_arn:
- register: result
- ignore_errors: true
-
- - name: assert lambda fails with proper message
- assert:
- that:
- - 'result is failed'
- - 'result.msg != "MODULE FAILURE"'
- - 'result.changed == False'
- - '"requires at least one security group and one subnet" in result.msg'
-
- # ============================================================
- - name: test state=present with all nullable variables explicitly set to null
- lambda:
- name: "{{lambda_function_name}}"
- runtime: "python2.7"
- role: "ansible_lambda_role"
- zip_file: "{{zip_res.dest}}"
- handler: "mini_lambda.handler"
-# These are not allowed because of mutually exclusive.
-# s3_bucket:
-# s3_key:
-# s3_object_version:
- description:
- vpc_subnet_ids:
- vpc_security_group_ids:
- environment_variables:
- dead_letter_arn:
- register: result
-
- - name: assert lambda remains as before
- assert:
- that:
- - 'result is not failed'
- - 'result.changed == False'
-
- # ============================================================
- - name: test putting an environment variable changes lambda
- lambda:
- name: "{{lambda_function_name}}"
- runtime: "python2.7"
- handler: "mini_lambda.handler"
- role: "ansible_lambda_role"
- zip_file: "{{zip_res.dest}}"
- environment_variables:
- EXTRA_MESSAGE: "I think you are great!!"
- register: result
-
- - name: assert lambda upload succeeded
- assert:
- that:
- - 'result is not failed'
- - 'result.changed == True'
-
- - name: test lambda works
- execute_lambda:
- name: "{{lambda_function_name}}"
- payload:
- name: "Mr Ansible Tests"
- security_token: '{{security_token}}'
- register: result
-
- - name: assert lambda manages to respond as expected
- assert:
- that:
- - 'result is not failed'
- - 'result.result.output.message == "hello Mr Ansible Tests. I think you are great!!"'
-
- # ============================================================
- - name: test state=present triggering a network exception due to bad url
- lambda:
- name: "{{lambda_function_name}}"
- runtime: "python2.7"
- role: "ansible_lambda_role"
- ec2_url: https://noexist.example.com
- ec2_region: '{{ec2_region}}'
- ec2_access_key: 'iamnotreallyanaccesskey'
- ec2_secret_key: 'thisisabadsecretkey'
- security_token: 'andthisisabadsecuritytoken'
- zip_file: "{{zip_res.dest}}"
- register: result
- ignore_errors: true
-
- - name: assert lambda manages to respond as expected
- assert:
- that:
- - 'result is failed'
- - 'result.changed == False'
-
- # ============================================================
- - name: test state=absent (expect changed=False)
- lambda:
- name: "{{lambda_function_name}}"
- state: absent
- register: result
-
- - name: assert state=absent
- assert:
- that:
- - 'result is not failed'
- - 'result.changed == True'
-
- # ============================================================
- # parallel lambda creation
-
- - name: parallel lambda creation 1/4
- lambda:
- name: "{{lambda_function_name}}_1"
- runtime: "python2.7"
- handler: "mini_lambda.handler"
- role: "ansible_lambda_role"
- zip_file: "{{zip_res.dest}}"
- async: 1000
- register: async_1
-
- - name: parallel lambda creation 2/4
- lambda:
- name: "{{lambda_function_name}}_2"
- runtime: "python2.7"
- handler: "mini_lambda.handler"
- role: "ansible_lambda_role"
- zip_file: "{{zip_res.dest}}"
- async: 1000
- register: async_2
-
- - name: parallel lambda creation 3/4
- lambda:
- name: "{{lambda_function_name}}_3"
- runtime: "python2.7"
- handler: "mini_lambda.handler"
- role: "ansible_lambda_role"
- zip_file: "{{zip_res.dest}}"
- async: 1000
- register: async_3
-
- - name: parallel lambda creation 4/4
- lambda:
- name: "{{lambda_function_name}}_4"
- runtime: "python2.7"
- handler: "mini_lambda.handler"
- role: "ansible_lambda_role"
- zip_file: "{{zip_res.dest}}"
- register: result
-
- - name: assert lambda manages to respond as expected
- assert:
- that:
- - 'result is not failed'
-
- - name: wait for async job 1
- async_status: jid={{ async_1.ansible_job_id }}
- register: job_result
- until: job_result is finished
- retries: 30
-
- - name: wait for async job 2
- async_status: jid={{ async_1.ansible_job_id }}
- register: job_result
- until: job_result is finished
- retries: 30
-
- - name: wait for async job 3
- async_status: jid={{ async_3.ansible_job_id }}
- register: job_result
- until: job_result is finished
- retries: 30
-
-
- - name: parallel lambda deletion 1/4
- lambda:
- name: "{{lambda_function_name}}_1"
- state: absent
- zip_file: "{{zip_res.dest}}"
- async: 1000
- register: async_1
-
- - name: parallel lambda deletion 2/4
- lambda:
- name: "{{lambda_function_name}}_2"
- state: absent
- zip_file: "{{zip_res.dest}}"
- async: 1000
- register: async_2
-
- - name: parallel lambda deletion 3/4
- lambda:
- name: "{{lambda_function_name}}_3"
- state: absent
- zip_file: "{{zip_res.dest}}"
- async: 1000
- register: async_3
-
- - name: parallel lambda deletion 4/4
- lambda:
- name: "{{lambda_function_name}}_4"
- state: absent
- zip_file: "{{zip_res.dest}}"
- register: result
-
- - name: assert lambda creation has succeeded
- assert:
- that:
- - 'result is not failed'
-
- - name: wait for async job 1
- async_status: jid={{ async_1.ansible_job_id }}
- register: job_result
- until: job_result is finished
- retries: 30
-
- - name: wait for async job 2
- async_status: jid={{ async_1.ansible_job_id }}
- register: job_result
- until: job_result is finished
- retries: 30
-
- - name: wait for async job 3
- async_status: jid={{ async_3.ansible_job_id }}
- register: job_result
- until: job_result is finished
- retries: 30
-
- # ============================================================
- always:
-
- - name: ensure function is absent at end of test
- lambda:
- name: "{{lambda_function_name}}"
- state: absent
- ignore_errors: true
diff --git a/test/integration/targets/aws_secret/aliases b/test/integration/targets/aws_secret/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/aws_secret/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/aws_secret/defaults/main.yaml b/test/integration/targets/aws_secret/defaults/main.yaml
deleted file mode 100644
index cfdab55249..0000000000
--- a/test/integration/targets/aws_secret/defaults/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-super_secret_string: 'Test12345'
-secret_manager_role: "{{ resource_prefix }}-secrets-manager"
-secret_name: "{{ resource_prefix }}-test-secret-string"
-lambda_name: "{{ resource_prefix }}-hello-world"
diff --git a/test/integration/targets/aws_secret/files/hello_world.zip b/test/integration/targets/aws_secret/files/hello_world.zip
deleted file mode 100644
index 8fd9e058f4..0000000000
--- a/test/integration/targets/aws_secret/files/hello_world.zip
+++ /dev/null
Binary files differ
diff --git a/test/integration/targets/aws_secret/files/secretsmanager-trust-policy.json b/test/integration/targets/aws_secret/files/secretsmanager-trust-policy.json
deleted file mode 100644
index c53e309641..0000000000
--- a/test/integration/targets/aws_secret/files/secretsmanager-trust-policy.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "Service": "lambda.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- },
- {
- "Effect": "Allow",
- "Principal": {
- "Service": "secretsmanager.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/aws_secret/tasks/main.yaml b/test/integration/targets/aws_secret/tasks/main.yaml
deleted file mode 100644
index a7d02b0b60..0000000000
--- a/test/integration/targets/aws_secret/tasks/main.yaml
+++ /dev/null
@@ -1,250 +0,0 @@
----
-- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- - name: retrieve caller facts
- aws_caller_info:
- register: test_caller_facts
-
- - name: ensure IAM role exists
- iam_role:
- name: "{{ secret_manager_role }}"
- assume_role_policy_document: "{{ lookup('file','secretsmanager-trust-policy.json') }}"
- state: present
- create_instance_profile: no
- managed_policy:
- - 'arn:aws:iam::aws:policy/SecretsManagerReadWrite'
- register: iam_role
- ignore_errors: yes
-
- - name: wait 10 seconds for role to become available
- pause:
- seconds: 10
- when: iam_role.changed
-
- # CI does not remove the role and comparing policies has a bug on Python3; fall back to use iam_role_info
- - name: get IAM role
- iam_role_info:
- name: "{{ secret_manager_role }}"
- register: iam_role_info
-
- - name: set iam_role_output
- set_fact:
- iam_role_output: "{{ iam_role_info.iam_roles[0] }}"
- when: iam_role_info is defined
-
- - name: create a temporary directory
- tempfile:
- state: directory
- register: tmp
-
- - name: move lambda into place for upload
- copy:
- src: "files/hello_world.zip"
- dest: "{{ tmp.path }}/hello_world.zip"
-
- - name: dummy lambda for testing
- lambda:
- name: "{{ lambda_name }}"
- state: present
- zip_file: "{{ tmp.path }}/hello_world.zip"
- runtime: 'python2.7'
- role: "{{ iam_role_output.arn }}"
- handler: 'hello_world.lambda_handler'
- register: lambda_output
- until: not lambda_output.failed
- retries: 10
- delay: 5
-
- - debug:
- var: lambda_output
-
- # ============================================================
- # Module parameter testing
- # ============================================================
- - name: test with no parameters
- aws_secret:
- register: result
- ignore_errors: true
- check_mode: true
-
- - name: assert failure when called with no parameters
- assert:
- that:
- - result.failed
- - 'result.msg.startswith("missing required arguments:")'
-
- # ============================================================
- # Creation/Deletion testing
- # ============================================================
- - name: add secret to AWS Secrets Manager
- aws_secret:
- name: "{{ secret_name }}"
- state: present
- secret_type: 'string'
- secret: "{{ super_secret_string }}"
- register: result
-
- - name: assert correct keys are returned
- assert:
- that:
- - result.changed
- - result.arn is not none
- - result.name is not none
- - result.tags is not none
- - result.version_ids_to_stages is not none
-
- - name: no changes to secret
- aws_secret:
- name: "{{ secret_name }}"
- state: present
- secret_type: 'string'
- secret: "{{ super_secret_string }}"
- register: result
-
- - name: assert correct keys are returned
- assert:
- that:
- - not result.changed
- - result.arn is not none
-
- - name: make change to secret
- aws_secret:
- name: "{{ secret_name }}"
- description: 'this is a change to this secret'
- state: present
- secret_type: 'string'
- secret: "{{ super_secret_string }}"
- register: result
-
- - debug:
- var: result
-
- - name: assert correct keys are returned
- assert:
- that:
- - result.changed
- - result.arn is not none
- - result.name is not none
- - result.tags is not none
- - result.version_ids_to_stages is not none
-
- - name: add tags to secret
- aws_secret:
- name: "{{ secret_name }}"
- description: 'this is a change to this secret'
- state: present
- secret_type: 'string'
- secret: "{{ super_secret_string }}"
- tags:
- Foo: 'Bar'
- Test: 'Tag'
- register: result
-
- - name: assert correct keys are returned
- assert:
- that:
- - result.changed
-
- - name: remove tags from secret
- aws_secret:
- name: "{{ secret_name }}"
- description: 'this is a change to this secret'
- state: present
- secret_type: 'string'
- secret: "{{ super_secret_string }}"
- register: result
-
- - name: assert correct keys are returned
- assert:
- that:
- - result.changed
-
- - name: lambda policy for secrets manager
- lambda_policy:
- state: present
- function_name: "{{ lambda_name }}"
- statement_id: LambdaSecretsManagerTestPolicy
- action: 'lambda:InvokeFunction'
- principal: "secretsmanager.amazonaws.com"
-
- - name: add rotation lambda to secret
- aws_secret:
- name: "{{ secret_name }}"
- description: 'this is a change to this secret'
- state: present
- secret_type: 'string'
- secret: "{{ super_secret_string }}"
- rotation_lambda: "arn:aws:lambda:{{ aws_region }}:{{ test_caller_facts.account }}:function:{{ lambda_name }}"
- register: result
- retries: 100
- delay: 5
- until: not result.failed
-
- - name: assert correct keys are returned
- assert:
- that:
- - result.changed
-
- - name: remove rotation lambda from secret
- aws_secret:
- name: "{{ secret_name }}"
- description: 'this is a change to this secret'
- state: present
- secret_type: 'string'
- secret: "{{ super_secret_string }}"
- register: result
-
- - name: assert correct keys are returned
- assert:
- that:
- - result.changed
-
- always:
- - name: remove secret
- aws_secret:
- name: "{{ secret_name }}"
- state: absent
- secret_type: 'string'
- secret: "{{ super_secret_string }}"
- recovery_window: 0
- ignore_errors: yes
-
- - name: remove lambda policy
- lambda_policy:
- state: absent
- function_name: "{{ lambda_name }}"
- statement_id: lambda-secretsmanager-test-policy
- action: lambda:InvokeFunction
- principal: secretsmanager.amazonaws.com
- ignore_errors: yes
-
- - name: remove dummy lambda
- lambda:
- name: "{{ lambda_name }}"
- state: absent
- zip_file: "{{ tmp.path }}/hello_world.zip"
- runtime: 'python2.7'
- role: "{{ secret_manager_role }}"
- handler: 'hello_world.lambda_handler'
- ignore_errors: yes
-
- # CI does not remove the IAM role
- - name: remove IAM role
- iam_role:
- name: "{{ secret_manager_role }}"
- assume_role_policy_document: "{{ lookup('file','secretsmanager-trust-policy.json') }}"
- state: absent
- create_instance_profile: no
- managed_policy:
- - 'arn:aws:iam::aws:policy/SecretsManagerReadWrite'
- ignore_errors: yes
-
- - name: remove temporary dir
- file:
- path: "{{ tmp.path }}"
- state: absent
diff --git a/test/integration/targets/aws_ses_identity/aliases b/test/integration/targets/aws_ses_identity/aliases
deleted file mode 100644
index 157ce0c9d4..0000000000
--- a/test/integration/targets/aws_ses_identity/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group3
diff --git a/test/integration/targets/aws_ses_identity/defaults/main.yaml b/test/integration/targets/aws_ses_identity/defaults/main.yaml
deleted file mode 100644
index f36d01793e..0000000000
--- a/test/integration/targets/aws_ses_identity/defaults/main.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-email_identity: "{{ resource_prefix }}@example.com"
-domain_identity: "{{ resource_prefix }}.example.com"
-notification_queue_name: "{{ resource_prefix }}-notification-queue"
diff --git a/test/integration/targets/aws_ses_identity/meta/main.yaml b/test/integration/targets/aws_ses_identity/meta/main.yaml
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/aws_ses_identity/meta/main.yaml
+++ /dev/null
diff --git a/test/integration/targets/aws_ses_identity/tasks/assert_defaults.yaml b/test/integration/targets/aws_ses_identity/tasks/assert_defaults.yaml
deleted file mode 100644
index 0f74d2f05d..0000000000
--- a/test/integration/targets/aws_ses_identity/tasks/assert_defaults.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-- name: assert returned identity
- assert:
- that:
- - result.identity == identity
-- name: assert returned identity_arn
- assert:
- that:
- - "result.identity_arn|regex_search('^arn:aws:ses:' + ec2_region + ':[0-9]*:identity/' + identity + '$')"
- msg: "'{{ result.identity_arn}}' doesn't match regex '^arn:aws:ses:{{ ec2_region }}:[0-9]*:identity/{{ identity }}'"
-- name: assert verification_attributes.verification_status == 'Pending'
- assert:
- that:
- - result.verification_attributes.verification_status == 'Pending'
-- name: assert notification defaults
- assert:
- that:
- - result.notification_attributes.forwarding_enabled == True
- - result.notification_attributes.headers_in_bounce_notifications_enabled == False
- - result.notification_attributes.headers_in_complaint_notifications_enabled == False
- - result.notification_attributes.headers_in_delivery_notifications_enabled == False
- - "'bounce_topic' not in result.notification_attributes"
- - "'complaint_topic' not in result.notification_attributes"
- - "'delivery_topic' not in result.notification_attributes"
diff --git a/test/integration/targets/aws_ses_identity/tasks/main.yaml b/test/integration/targets/aws_ses_identity/tasks/main.yaml
deleted file mode 100644
index 1be6c95422..0000000000
--- a/test/integration/targets/aws_ses_identity/tasks/main.yaml
+++ /dev/null
@@ -1,648 +0,0 @@
----
-# ============================================================
-- name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
-- name: test register email identity
- block:
- - name: register email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- <<: *aws_connection_info
- register: result
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
- - import_tasks: assert_defaults.yaml
- vars:
- identity: "{{ email_identity }}"
- always:
- - name: cleanup email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test register domain identity
- block:
- - name: register domain identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- register: result
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
- - import_tasks: assert_defaults.yaml
- vars:
- identity: "{{ domain_identity }}"
- - name: assert verification_attributes.verification_token is defined
- assert:
- that:
- - result.verification_attributes.verification_token
- always:
- - name: cleanup domain identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test email_identity unchanged when already existing
- block:
- - name: register identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- <<: *aws_connection_info
- - name: duplicate register identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- <<: *aws_connection_info
- register: result
- - name: assert changed is False
- assert:
- that:
- - result.changed == False
- - import_tasks: assert_defaults.yaml
- vars:
- identity: "{{ email_identity }}"
- always:
- - name: cleanup identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test domain_identity unchanged when already existing
- block:
- - name: register identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- - name: duplicate register identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- register: result
- - name: assert changed is False
- assert:
- that:
- - result.changed == False
- - import_tasks: assert_defaults.yaml
- vars:
- identity: "{{ domain_identity }}"
- always:
- - name: cleanup identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-# Test for https://github.com/ansible/ansible/issues/51531
-# because aws region is explicitly used rather than just to
-# obtain a connection, make sure this still works when
-# region comes from an environment rather than a parameter.
-- name: test register identity without explicit region
- block:
- - name: register email identity without explicit region
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- <<: *aws_connection_info
- region: "{{ omit }}"
- register: result
- environment:
- AWS_DEFAULT_REGION: "{{ aws_region }}"
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
- - import_tasks: assert_defaults.yaml
- vars:
- identity: "{{ email_identity }}"
- always:
- - name: cleanup email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test register email identity check mode
- block:
- - name: register email identity check mode
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- <<: *aws_connection_info
- register: result
- check_mode: True
-
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
-
- - import_tasks: assert_defaults.yaml
- vars:
- identity: "{{ email_identity }}"
-
- always:
- - name: cleanup email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
- register: result
-
- - name: assert nothing to clean up since check mode
- assert:
- that:
- - result.changed == False
-# ============================================================
-- name: test register domain identity check mode
- block:
- - name: register domain identity check mode
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- register: result
- check_mode: True
-
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
-
- - import_tasks: assert_defaults.yaml
- vars:
- identity: "{{ domain_identity }}"
-
- always:
- - name: cleanup domain identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
- register: result
-
- - name: assert nothing to clean up since check mode
- assert:
- that:
- - result.changed == False
-# ============================================================
-- name: remove non-existent email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
- register: result
-- name: assert changed is False
- assert:
- that:
- - result.changed == False
-# ============================================================
-- name: remove non-existent domain identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
- register: result
-- name: assert changed is False
- assert:
- that:
- - result.changed == False
-# ============================================================
-- name: test remove email identity check mode
- block:
- - name: register email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- <<: *aws_connection_info
- register: result
-
- - name: remove email identity check mode
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
- register: result
- check_mode: True
-
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
- always:
- - name: cleanup email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
- register: result
-
- - name: assert something to clean up since remove was check mode
- assert:
- that:
- - result.changed == True
-# ============================================================
-- name: test remove domain identity check mode
- block:
- - name: register domain identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- register: result
-
- - name: remove domain identity check mode
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
- register: result
- check_mode: True
-
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
- always:
- - name: cleanup domain identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
- register: result
-
- - name: assert something to clean up since remove was check mode
- assert:
- that:
- - result.changed == True
-# ============================================================
-- name: test set notification queues
- block:
- - name: test topic
- sns_topic:
- name: "{{ notification_queue_name }}-{{ item }}"
- state: present
- <<: *aws_connection_info
- register: topic_info
- with_items:
- - bounce
- - complaint
- - delivery
- - name: register email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- bounce_notifications:
- topic: "{{ topic_info.results[0].sns_arn }}"
- complaint_notifications:
- topic: "{{ topic_info.results[1].sns_arn }}"
- delivery_notifications:
- topic: "{{ topic_info.results[2].sns_arn }}"
- <<: *aws_connection_info
- register: result
- - name: assert notification settings
- assert:
- that:
- - result.notification_attributes.bounce_topic == topic_info.results[0].sns_arn
- - result.notification_attributes.complaint_topic == topic_info.results[1].sns_arn
- - result.notification_attributes.delivery_topic == topic_info.results[2].sns_arn
- - name: assert notification headers unchanged
- assert:
- that:
- - result.notification_attributes.headers_in_bounce_notifications_enabled == False
- - result.notification_attributes.headers_in_complaint_notifications_enabled == False
- - result.notification_attributes.headers_in_delivery_notifications_enabled == False
- always:
- - name: cleanup topics
- sns_topic:
- name: "{{ notification_queue_name }}-{{ item }}"
- state: absent
- <<: *aws_connection_info
- with_items:
- - bounce
- - complaint
- - delivery
- - name: cleanup email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test change notification queues after create
- block:
- - name: test topic
- sns_topic:
- name: "{{ notification_queue_name }}-{{ item }}"
- state: present
- <<: *aws_connection_info
- register: topic_info
- with_items:
- - bounce
- - complaint
- - delivery
- - name: register email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- <<: *aws_connection_info
- - name: set notification topics
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- bounce_notifications:
- topic: "{{ topic_info.results[0].sns_arn }}"
- complaint_notifications:
- topic: "{{ topic_info.results[1].sns_arn }}"
- delivery_notifications:
- topic: "{{ topic_info.results[2].sns_arn }}"
- <<: *aws_connection_info
- register: result
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
- - name: assert notification settings
- assert:
- that:
- - result.notification_attributes.bounce_topic == topic_info.results[0].sns_arn
- - result.notification_attributes.complaint_topic == topic_info.results[1].sns_arn
- - result.notification_attributes.delivery_topic == topic_info.results[2].sns_arn
- always:
- - name: cleanup topics
- sns_topic:
- name: "{{ notification_queue_name }}-{{ item }}"
- state: absent
- <<: *aws_connection_info
- with_items:
- - bounce
- - complaint
- - delivery
- - name: cleanup email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test change notification settings check mode
- block:
- - name: test topic
- sns_topic:
- name: "{{ notification_queue_name }}-{{ item }}"
- state: present
- <<: *aws_connection_info
- register: topic_info
- with_items:
- - bounce
- - complaint
- - delivery
-
- - name: register email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- <<: *aws_connection_info
-
- - name: set notification settings check mode
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- bounce_notifications:
- topic: "{{ topic_info.results[0].sns_arn }}"
- include_headers: Yes
- complaint_notifications:
- topic: "{{ topic_info.results[1].sns_arn }}"
- include_headers: Yes
- delivery_notifications:
- topic: "{{ topic_info.results[2].sns_arn }}"
- include_headers: Yes
- feedback_forwarding: No
- <<: *aws_connection_info
- register: result
- check_mode: True
-
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
-
- - name: assert notification settings
- assert:
- that:
- - result.notification_attributes.bounce_topic == topic_info.results[0].sns_arn
- - result.notification_attributes.headers_in_bounce_notifications_enabled == True
- - result.notification_attributes.delivery_topic == topic_info.results[2].sns_arn
- - result.notification_attributes.headers_in_delivery_notifications_enabled == True
- - result.notification_attributes.complaint_topic == topic_info.results[1].sns_arn
- - result.notification_attributes.headers_in_complaint_notifications_enabled == True
- - result.notification_attributes.forwarding_enabled == False
-
- - name: re-register base email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- <<: *aws_connection_info
- register: result
-
- - name: assert no change since notifications were check mode
- assert:
- that:
- - result.changed == False
- - "'bounce_topic' not in result.notification_attributes"
- - result.notification_attributes.headers_in_bounce_notifications_enabled == False
- - "'delivery_topic' not in result.notification_attributes"
- - result.notification_attributes.headers_in_delivery_notifications_enabled == False
- - "'complaint_topic' not in result.notification_attributes"
- - result.notification_attributes.headers_in_complaint_notifications_enabled == False
- - result.notification_attributes.forwarding_enabled == True
-
- always:
- - name: cleanup topics
- sns_topic:
- name: "{{ notification_queue_name }}-{{ item }}"
- state: absent
- <<: *aws_connection_info
- with_items:
- - bounce
- - complaint
- - delivery
-
- - name: cleanup email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test include headers on notification queues
- block:
- - name: register email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- bounce_notifications:
- include_headers: Yes
- complaint_notifications:
- include_headers: Yes
- delivery_notifications:
- include_headers: Yes
- <<: *aws_connection_info
- register: result
- - name: assert notification headers enabled
- assert:
- that:
- - result.notification_attributes.headers_in_bounce_notifications_enabled == True
- - result.notification_attributes.headers_in_complaint_notifications_enabled == True
- - result.notification_attributes.headers_in_delivery_notifications_enabled == True
- always:
- - name: cleanup email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test disable feedback forwarding
- block:
- - name: test topic
- sns_topic:
- name: "{{ notification_queue_name }}-{{ item }}"
- state: present
- <<: *aws_connection_info
- register: topic_info
- with_items:
- - bounce
- - complaint
- - name: register email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- bounce_notifications:
- topic: "{{ topic_info.results[0].sns_arn }}"
- complaint_notifications:
- topic: "{{ topic_info.results[1].sns_arn }}"
- feedback_forwarding: No
- <<: *aws_connection_info
- register: result
- - name: assert feedback_forwarding == False
- assert:
- that:
- - result.notification_attributes.forwarding_enabled == False
- always:
- - name: cleanup topics
- sns_topic:
- name: "{{ notification_queue_name }}-{{ item }}"
- state: absent
- <<: *aws_connection_info
- with_items:
- - bounce
- - complaint
- - name: cleanup email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test disable feedback forwarding fails if no topics
- block:
- - name: register identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- feedback_forwarding: No
- <<: *aws_connection_info
- register: result
- failed_when: result.failed == False
- - name: assert error message starts with "Invalid Parameter Value"
- assert:
- that:
- - '"Invalid Parameter Value" in result.msg'
- always:
- - name: cleanup identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test disable feedback forwarding fails if no complaint topic
- block:
- - name: test topic
- sns_topic:
- name: "{{ notification_queue_name }}-bounce"
- state: present
- <<: *aws_connection_info
- register: topic_info
- - name: register email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- bounce_notifications:
- topic: "{{ topic_info.sns_arn }}"
- feedback_forwarding: No
- <<: *aws_connection_info
- register: result
- failed_when: result.failed == False
- - name: assert error message starts with "Invalid Parameter Value"
- assert:
- that:
- - '"Invalid Parameter Value" in result.msg'
- always:
- - name: cleanup topics
- sns_topic:
- name: "{{ notification_queue_name }}-bounce"
- state: absent
- <<: *aws_connection_info
- - name: cleanup identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test disable feedback forwarding fails if no bounce topic
- block:
- - name: test topic
- sns_topic:
- name: "{{ notification_queue_name }}-complaint"
- state: present
- <<: *aws_connection_info
- register: topic_info
- - name: register email identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: present
- complaint_notifications:
- topic: "{{ topic_info.sns_arn }}"
- feedback_forwarding: No
- <<: *aws_connection_info
- register: result
- failed_when: result.failed == False
- - name: assert error message starts with "Invalid Parameter Value"
- assert:
- that:
- - '"Invalid Parameter Value" in result.msg'
- always:
- - name: cleanup topics
- sns_topic:
- name: "{{ notification_queue_name }}-complaint"
- state: absent
- <<: *aws_connection_info
- - name: cleanup identity
- aws_ses_identity:
- identity: "{{ email_identity }}"
- state: absent
- <<: *aws_connection_info
diff --git a/test/integration/targets/aws_ses_identity_policy/aliases b/test/integration/targets/aws_ses_identity_policy/aliases
deleted file mode 100644
index a112c3d1bb..0000000000
--- a/test/integration/targets/aws_ses_identity_policy/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group1
diff --git a/test/integration/targets/aws_ses_identity_policy/defaults/main.yaml b/test/integration/targets/aws_ses_identity_policy/defaults/main.yaml
deleted file mode 100644
index e77f32d08a..0000000000
--- a/test/integration/targets/aws_ses_identity_policy/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-domain_identity: "{{ resource_prefix }}.example.com"
-policy_name: "TestPolicy"
diff --git a/test/integration/targets/aws_ses_identity_policy/tasks/main.yaml b/test/integration/targets/aws_ses_identity_policy/tasks/main.yaml
deleted file mode 100644
index ee10c0b830..0000000000
--- a/test/integration/targets/aws_ses_identity_policy/tasks/main.yaml
+++ /dev/null
@@ -1,334 +0,0 @@
----
-# ============================================================
-- name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-# ============================================================
-- name: test add identity policy
- block:
- - name: register identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- register: identity_info
-
- - name: register identity policy
- aws_ses_identity_policy:
- identity: "{{ domain_identity }}"
- policy_name: "{{ policy_name }}"
- policy: "{{ lookup('template', 'policy.json.j2') }}"
- state: present
- <<: *aws_connection_info
- register: result
-
- - name: assert result.changed == True
- assert:
- that:
- - result.changed == True
-
- - name: assert result.policies contains only policy
- assert:
- that:
- - result.policies|length == 1
- - result.policies|select('equalto', policy_name)|list|length == 1
-
- always:
- - name: clean-up identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test add duplicate identity policy
- block:
- - name: register identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- register: identity_info
-
- - name: register identity policy
- aws_ses_identity_policy:
- identity: "{{ domain_identity }}"
- policy_name: "{{ policy_name }}"
- policy: "{{ lookup('template', 'policy.json.j2') }}"
- state: present
- <<: *aws_connection_info
-
- - name: register duplicate identity policy
- aws_ses_identity_policy:
- identity: "{{ domain_identity }}"
- policy_name: "{{ policy_name }}"
- policy: "{{ lookup('template', 'policy.json.j2') }}"
- state: present
- <<: *aws_connection_info
- register: result
-
- - name: assert result.changed == False
- assert:
- that:
- - result.changed == False
-
- - name: assert result.policies contains only policy
- assert:
- that:
- - result.policies|length == 1
- - result.policies|select('equalto', policy_name)|list|length == 1
-
- always:
- - name: clean-up identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test add identity policy by identity arn
- block:
- - name: register identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- register: identity_info
-
- - name: register identity policy
- aws_ses_identity_policy:
- identity: "{{ identity_info.identity_arn }}"
- policy_name: "{{ policy_name }}"
- policy: "{{ lookup('template', 'policy.json.j2') }}"
- state: present
- <<: *aws_connection_info
- register: result
-
- - name: assert result.changed == True
- assert:
- that:
- - result.changed == True
-
- - name: assert result.policies contains only policy
- assert:
- that:
- - result.policies|length == 1
- - result.policies|select('equalto', policy_name)|list|length == 1
-
- always:
- - name: clean-up identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test add multiple identity policies
- block:
- - name: register identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- register: identity_info
-
- - name: register identity policy
- aws_ses_identity_policy:
- identity: "{{ domain_identity }}"
- policy_name: "{{ policy_name }}-{{ item }}"
- policy: "{{ lookup('template', 'policy.json.j2') }}"
- state: present
- <<: *aws_connection_info
- with_items:
- - 1
- - 2
- register: result
-
- - name: assert result.policies contains policies
- assert:
- that:
- - result.results[1].policies|length == 2
- - result.results[1].policies|select('equalto', policy_name + '-1')|list|length == 1
- - result.results[1].policies|select('equalto', policy_name + '-2')|list|length == 1
-
- always:
- - name: clean-up identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test add inline identity policy
- block:
- - name: register identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- register: identity_info
-
- - name: register identity policy
- aws_ses_identity_policy:
- identity: "{{ domain_identity }}"
- policy_name: "{{ policy_name }}"
- policy:
- Id: SampleAuthorizationPolicy
- Version: "2012-10-17"
- Statement:
- - Sid: DenyAll
- Effect: Deny
- Resource: "{{ identity_info.identity_arn }}"
- Principal: "*"
- Action: "*"
- state: present
- <<: *aws_connection_info
- register: result
-
- - name: assert result.changed == True
- assert:
- that:
- - result.changed == True
-
- - name: assert result.policies contains only policy
- assert:
- that:
- - result.policies|length == 1
- - result.policies|select('equalto', policy_name)|list|length == 1
-
- - name: register duplicate identity policy
- aws_ses_identity_policy:
- identity: "{{ domain_identity }}"
- policy_name: "{{ policy_name }}"
- policy:
- Id: SampleAuthorizationPolicy
- Version: "2012-10-17"
- Statement:
- - Sid: DenyAll
- Effect: Deny
- Resource: "{{ identity_info.identity_arn }}"
- Principal: "*"
- Action: "*"
- state: present
- <<: *aws_connection_info
- register: result
-
- - name: assert result.changed == False
- assert:
- that:
- - result.changed == False
-
- always:
- - name: clean-up identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test remove identity policy
- block:
- - name: register identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- register: identity_info
-
- - name: register identity policy
- aws_ses_identity_policy:
- identity: "{{ domain_identity }}"
- policy_name: "{{ policy_name }}"
- policy: "{{ lookup('template', 'policy.json.j2') }}"
- state: present
- <<: *aws_connection_info
-
- - name: delete identity policy
- aws_ses_identity_policy:
- identity: "{{ domain_identity }}"
- policy_name: "{{ policy_name }}"
- state: absent
- <<: *aws_connection_info
- register: result
-
- - name: assert result.changed == True
- assert:
- that:
- - result.changed == True
-
- - name: assert result.policies empty
- assert:
- that:
- - result.policies|length == 0
-
- always:
- - name: clean-up identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test remove missing identity policy
- block:
- - name: register identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- register: identity_info
-
- - name: delete identity policy
- aws_ses_identity_policy:
- identity: "{{ domain_identity }}"
- policy_name: "{{ policy_name }}"
- state: absent
- <<: *aws_connection_info
- register: result
-
- - name: assert result.changed == False
- assert:
- that:
- - result.changed == False
-
- - name: assert result.policies empty
- assert:
- that:
- - result.policies|length == 0
-
- always:
- - name: clean-up identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
-# ============================================================
-- name: test add identity policy with invalid policy
- block:
- - name: register identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: present
- <<: *aws_connection_info
- register: identity_info
-
- - name: register identity policy
- aws_ses_identity_policy:
- identity: "{{ domain_identity }}"
- policy_name: "{{ policy_name }}"
- policy: '{"noSuchAttribute": 2}'
- state: present
- <<: *aws_connection_info
- register: result
- failed_when: result.failed == False
-
- - name: assert error.code == InvalidPolicy
- assert:
- that:
- - result.error.code == 'InvalidPolicy'
-
- always:
- - name: clean-up identity
- aws_ses_identity:
- identity: "{{ domain_identity }}"
- state: absent
- <<: *aws_connection_info
diff --git a/test/integration/targets/aws_ses_identity_policy/templates/policy.json.j2 b/test/integration/targets/aws_ses_identity_policy/templates/policy.json.j2
deleted file mode 100644
index b198e38f7f..0000000000
--- a/test/integration/targets/aws_ses_identity_policy/templates/policy.json.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "Id": "SampleAuthorizationPolicy",
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "DenyAll",
- "Effect": "Deny",
- "Resource": "{{ identity_info.identity_arn }}",
- "Principal": "*",
- "Action": "*"
- }
- ]
-}
diff --git a/test/integration/targets/aws_ses_rule_set/aliases b/test/integration/targets/aws_ses_rule_set/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/aws_ses_rule_set/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/aws_ses_rule_set/defaults/main.yaml b/test/integration/targets/aws_ses_rule_set/defaults/main.yaml
deleted file mode 100644
index f9fecf7bdf..0000000000
--- a/test/integration/targets/aws_ses_rule_set/defaults/main.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-default_rule_set: "{{ resource_prefix }}-default-rule-set"
-second_rule_set: "{{ resource_prefix }}-second-rule-set"
-
-# See comment in obtain-lock.yaml for definitions of these variables
-max_obtain_lock_attempts: 10
-obtain_lock_delay_seconds: 30
-lock_timeout_seconds: 900
-lock_log_group_prefix: "ansible-testing-locks/aws_ses_rule_set"
diff --git a/test/integration/targets/aws_ses_rule_set/tasks/active-rule-set-tests.yaml b/test/integration/targets/aws_ses_rule_set/tasks/active-rule-set-tests.yaml
deleted file mode 100644
index 99655e85d5..0000000000
--- a/test/integration/targets/aws_ses_rule_set/tasks/active-rule-set-tests.yaml
+++ /dev/null
@@ -1,349 +0,0 @@
----
-# ============================================================
-# These tests all rely on making rule sets active. There can only be
-# a single active rule set so multiple builds must not run these tests
-# in parallel or they will fail intermittently.
-# See the locking block in main.yaml for how this restriction is enforced
-# ============================================================
-
-- name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
-# ============================================================
-- name: mark rule set active
- block:
- - name: create rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- <<: *aws_connection_info
- - name: mark rule set active
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- register: result
- - name: assert changed to active
- assert:
- that:
- - result.changed == True
- - result.active == True
- - name: remark rule set active
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- register: result
- - name: assert changed is False
- assert:
- that:
- - result.changed == False
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
-
-# ============================================================
-- name: create rule set active
- block:
- - name: create rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- register: result
- - name: assert changed to existing and active
- assert:
- that:
- - result.changed == True
- - result.active == True
- - "default_rule_set in result.rule_sets|map(attribute='name')"
- - name: remark rule set active
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- register: result
- - name: assert changed is False
- assert:
- that:
- - result.changed == False
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
-
-# ============================================================
-- name: mark rule set inactive
- block:
- - name: create active rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- - name: mark rule set inactive
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: False
- <<: *aws_connection_info
- register: result
- - name: assert changed to inactive
- assert:
- that:
- - result.changed == True
- - result.active == False
- - name: remark rule set inactive
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: False
- <<: *aws_connection_info
- register: result
- - name: assert changed is False
- assert:
- that:
- - result.changed == False
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
-
-# ============================================================
-- name: Absent active flag does not change active status
- block:
- - name: create active rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- - name: recreate rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- <<: *aws_connection_info
- register: result
- - name: assert not changed and still active
- assert:
- that:
- - result.changed == False
- - result.active == True
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
-
-# ============================================================
-- name: Cannot Remove Active Rule Set
- block:
- - name: create active rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- - name: remove rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- <<: *aws_connection_info
- register: result
- failed_when: "result.error.code != 'CannotDelete'"
- - name: assert error code is CannotDelete
- assert:
- that:
- - "result.error.code == 'CannotDelete'"
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
-
-# ============================================================
-- name: Remove Active Rule Set with Force
- block:
- - name: create active rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- - name: force remove rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
- register: result
- - name: assert changed and absent
- assert:
- that:
- - result.changed == True
- - "default_rule_set not in result.rule_sets|map(attribute='name')"
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
-
-# ============================================================
-- name: Force Remove of Inactive Rule Set does Not Affect Active Rule Set
- block:
- - name: create active rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- - name: create inactive rule set
- aws_ses_rule_set:
- name: "{{ second_rule_set }}"
- active: False
- <<: *aws_connection_info
- - name: force remove inactiave rule set
- aws_ses_rule_set:
- name: "{{ second_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
- register: result
- - name: assert changed and absent
- assert:
- that:
- - result.changed == True
- - "second_rule_set not in result.rule_sets|map(attribute='name')"
- - name: remark active rule set active
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- register: result
- - name: assert no change
- assert:
- that:
- - result.changed == False
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ item }}"
- state: absent
- force: True
- <<: *aws_connection_info
- loop:
- - "{{ default_rule_set }}"
- - "{{ second_rule_set }}"
-
-# ============================================================
-- name: mark rule set inactive in check mode
- block:
- - name: create rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- - name: mark rule set inactive in check mode
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: False
- <<: *aws_connection_info
- register: result
- check_mode: True
- - name: assert changed to inactive
- assert:
- that:
- - result.changed == True
- - result.active == False
- - name: remark rule set inactive
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: False
- <<: *aws_connection_info
- register: result
- - name: assert changed is True since previous inactive was in check mode
- assert:
- that:
- - result.changed == True
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
-
-# ============================================================
-- name: Cannot Remove Active Rule Set in check mode
- block:
- - name: create active rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- - name: remove rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- <<: *aws_connection_info
- register: result
- failed_when: "result.error.code != 'CannotDelete'"
- check_mode: True
- - name: assert error code is CannotDelete
- assert:
- that:
- - "result.error.code == 'CannotDelete'"
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
-
-# ============================================================
-- name: Remove Active Rule Set with Force in check mode
- block:
- - name: create active rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- - name: force remove rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
- register: result
- check_mode: True
- - name: assert changed and absent
- assert:
- that:
- - result.changed == True
- - "default_rule_set not in result.rule_sets|map(attribute='name')"
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
- register: result
- - name: assert changed is True since previous removal was in check mode
- assert:
- that:
- - result.changed == True
diff --git a/test/integration/targets/aws_ses_rule_set/tasks/cleanup-lock.yaml b/test/integration/targets/aws_ses_rule_set/tasks/cleanup-lock.yaml
deleted file mode 100644
index 155bf472e4..0000000000
--- a/test/integration/targets/aws_ses_rule_set/tasks/cleanup-lock.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-# ============================================================
-# Release a lock obtained using obtain-lock.yaml
-# This should be included in the always clause of a block to
-# ensure the lock is released. See obtain-lock.yaml for more
-# details of how the locking works.
-# ============================================================
-
-- cloudwatchlogs_log_group:
- log_group_name: "{{ lock_attempt_log_group_name }}"
- state: absent
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
diff --git a/test/integration/targets/aws_ses_rule_set/tasks/inactive-rule-set-tests.yaml b/test/integration/targets/aws_ses_rule_set/tasks/inactive-rule-set-tests.yaml
deleted file mode 100644
index 4bd5250a73..0000000000
--- a/test/integration/targets/aws_ses_rule_set/tasks/inactive-rule-set-tests.yaml
+++ /dev/null
@@ -1,187 +0,0 @@
----
-# ============================================================
-# These tests work on rule sets without making them active.
-# so multiple builds can safely run these tests as is normal.
-#
-# DO NOT ADD TESTS THAT RELY ON ACTIVE RULE SETS TO THIS FILE
-#
-# Any test that make rule sets active must be added in
-# active-rule-set-tests.yaml or you will have intermittent failures
-# from multiple builds interacting
-# ============================================================
-
-- name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
-# ============================================================
-- name: test create rule sets
- block:
- - name: create rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- <<: *aws_connection_info
- register: result
- - name: assert changed to exists inactive
- assert:
- that:
- - result.changed == True
- - result.active == False
- - "default_rule_set in result.rule_sets|map(attribute='name')"
- - name: recreate rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- <<: *aws_connection_info
- register: result
- - name: assert changed is False
- assert:
- that:
- - result.changed == False
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
-# ============================================================
-- name: Remove No Such Rules Set
- block:
- - name: remove ruleset
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- <<: *aws_connection_info
- register: result
- - name: assert not changed and absent
- assert:
- that:
- - result.changed == False
- - "default_rule_set not in result.rule_sets|map(attribute='name')"
-# ============================================================
-- name: Remove Inactive Rule Set
- block:
- - name: create rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- <<: *aws_connection_info
- - name: remove rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- <<: *aws_connection_info
- register: result
- - name: assert changed and removed
- assert:
- that:
- - result.changed == True
- - "default_rule_set not in result.rule_sets|map(attribute='name')"
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
-# ============================================================
-- name: test create in check mode
- block:
- - name: create rule set in check mode
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- <<: *aws_connection_info
- register: result
- check_mode: True
- - name: assert changed inactive and present
- assert:
- that:
- - result.changed == True
- - result.active == False
- - "default_rule_set in result.rule_sets|map(attribute='name')"
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
- register: result
- - name: assert nothing to clean up since create was in check mode
- assert:
- that:
- - result.changed == False
-# ============================================================
-- name: mark rule set active in check mode
- block:
- - name: create rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- <<: *aws_connection_info
- - name: mark rule set active in check mode
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: True
- <<: *aws_connection_info
- register: result
- check_mode: True
- - name: assert changed and active
- assert:
- that:
- - result.changed == True
- - result.active == True
- # We check the rule set is still inactive rather than making
- # it active again as that way this test can be run in
- # parallel
- - name: Ensure rule set is inactive
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- active: False
- <<: *aws_connection_info
- register: result
- - name: assert unchanged since activation was in check mode
- assert:
- that:
- - result.changed == False
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
-# ============================================================
-- name: Remove Inactive Rule Set in check mode
- block:
- - name: create rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- <<: *aws_connection_info
- - name: remove rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- <<: *aws_connection_info
- register: result
- check_mode: True
- - name: assert changed and removed
- assert:
- that:
- - result.changed == True
- - "default_rule_set not in result.rule_sets|map(attribute='name')"
- always:
- - name: cleanup rule set
- aws_ses_rule_set:
- name: "{{ default_rule_set }}"
- state: absent
- force: True
- <<: *aws_connection_info
- register: result
- - name: assert changed is True since previous removal was in check mode
- assert:
- that:
- - result.changed == True
diff --git a/test/integration/targets/aws_ses_rule_set/tasks/main.yaml b/test/integration/targets/aws_ses_rule_set/tasks/main.yaml
deleted file mode 100644
index fe0e787797..0000000000
--- a/test/integration/targets/aws_ses_rule_set/tasks/main.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- include_tasks: inactive-rule-set-tests.yaml
-
-# ============================================================
-# There can only be a single active rule set, tests that
-# relies on the active state of the rule cannot be run in
-# parallel.
-# To prevent failures due to parallel runs in the integration
-# builds, the below block creates a lock to ensure that only
-# one process will be running these tests in the same region
-# and same AWS account.
-# See obtain-lock.yaml for explanation of how the lock is
-# constructed.
-# ============================================================
-- name: Active Rule Set Tests
- block:
- - name: Obtain Lock
- include_tasks: obtain-lock-wrapper.yaml
- # Use of loop here is a workaround for lack of support for
- # do-until loops on includes. See:
- # https://github.com/ansible/ansible/issues/17098
- loop: "{{ range(0, max_obtain_lock_attempts, 1)|list }}"
- loop_control:
- loop_var: obtain_lock_attempt
-
- # Because of the above workaround we have to explicitly check
- # that the lock was obtained
- - name: Check Obtained Lock
- assert:
- msg: "Could not obtain lock after {{ max_obtain_lock_attempts }} attempts."
- that: won_lock|bool
-
- - include_tasks: active-rule-set-tests.yaml
-
- always:
- - include_tasks: cleanup-lock.yaml
diff --git a/test/integration/targets/aws_ses_rule_set/tasks/obtain-lock-wrapper.yaml b/test/integration/targets/aws_ses_rule_set/tasks/obtain-lock-wrapper.yaml
deleted file mode 100644
index 36969897cd..0000000000
--- a/test/integration/targets/aws_ses_rule_set/tasks/obtain-lock-wrapper.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# ============================================================
-# Do While loops cannot be used on task includes.
-# See: https://github.com/ansible/ansible/issues/17098
-#
-# So as a workaround we use a regular loop to repeatedly attempt
-# obtaining a lock.
-#
-# For this to work we need to skip the subsequent iterations
-# once we get a lock, and delay between iterations if we
-# did not obtain the lock.
-#
-# This file encapsulates this logic to reduce the spam from
-# skipped tasks in the ansible log.
-# ============================================================
-
-- include_tasks: obtain-lock.yaml
- # Skip obtaining a lock if we've already succeeded in getting it
- when: "not won_lock|default(False)|bool"
-
-- name: Lock Retry Delay
- wait_for:
- # Add some random jitter to the delay to reduce lock contention
- timeout: "{{ obtain_lock_delay_seconds + 15|random }}"
- # Only delay if we're retrying, so skip the delay if we're
- # on the last attempt or have got the lock
- when: "obtain_lock_attempt < (max_obtain_lock_attempts - 1) and not won_lock|bool"
diff --git a/test/integration/targets/aws_ses_rule_set/tasks/obtain-lock.yaml b/test/integration/targets/aws_ses_rule_set/tasks/obtain-lock.yaml
deleted file mode 100644
index d12c57c6a6..0000000000
--- a/test/integration/targets/aws_ses_rule_set/tasks/obtain-lock.yaml
+++ /dev/null
@@ -1,126 +0,0 @@
-# ============================================================
-# This file attempts to obtain a global lock (for a given
-# region / account combination.
-#
-# This makes one attempt to get the lock and will set the
-# won_lock variable to True or False to indicate whether
-# or not we got the lock.
-#
-# It's expected that this will be executed in a retry loop
-# so that if we don't get the lock we delay then try again.
-#
-# This should only be used in a block with cleanup-lock.yaml
-# included in the always clause to ensure the lock is released.
-#
-# There are several variables that control the locking behaviour:
-# * lock_timeout_seconds
-# How old a lock must be before it's assumed to be an expired
-# lock that was not cleaned up by the owner. Any locks older
-# than this will not prevent a lock being obtained and will
-# be deleted when a new process obtains the lock.
-# * lock_log_group_prefix
-# The log_group prefix that represents the lock being obtained.
-# This must be the same across all processes trying to obtain
-# the lock.
-# * lock_process_id
-# A unique identifier of this process. Each process that might
-# attempt to lock the process must have a different identifier.
-# This defaults to the resource_prefix which is generally
-# appropriate.
-# * max_obtain_lock_attempts
-# How many attempts to make to get the lock before giving up
-# NB: This is actually done in main.yaml
-# * obtain_lock_delay_seconds:
-# How long to delay after failing to get the lock before
-# trying again.
-# NB: This is actually done in obtain-lock-wrapper.yaml
-#
-# The locking here is based around creating cloudwatch log groups.
-# This resource was chosen because:
-# A) it's free
-# B) we have a built in grouping concept because of the hierarchy
-# that allows us to easily group attempts for the same lock
-# C) the creation time is tracked and returned which gives us
-# a mechanism for deterministically picking a winner
-#
-# Each lock is represented by a log group prefix. Each attempt
-# to obtain the lock is a log group of the lock_process_id below
-# that prefix.
-#
-# The winning lock is the one with the earliest creation time.
-#
-# To prevent a hanging lock from permanently hanging the build
-# lock attempts older than the lock timeout are ignored and
-# cleaned up by the next process to win the lock.
-# ============================================================
-
-- name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
-- name: Set lock_attempt_log_group_name
- set_fact:
- lock_attempt_log_group_name: "{{ lock_log_group_prefix }}/{{ lock_process_id|default(resource_prefix) }}"
-
- # Note the overwrite below to ensure that the creation time
- # is upated. This is important as we calculate expiry relative
- # the attempt creation.
- #
- # Because of this it's imporatnt that we delete the attempt
- # if we don't get the lock. Otherwise we can get a deadlock
- # where the stale atttempt from one process wins, but then
- # because that process updates the creation date it doesn't
- # consider its self to havewone.
-- name: Create Lock Attempt Log Group
- cloudwatchlogs_log_group:
- log_group_name: "{{ lock_attempt_log_group_name }}"
- state: present
- overwrite: True
- <<: *aws_connection_info
- register: lock_attempt_log_group_result
-
-- name: Get Lock Attempt Lock Groups
- cloudwatchlogs_log_group_info:
- log_group_name: "{{ lock_log_group_prefix }}/"
- <<: *aws_connection_info
- register: lock_attempt_log_groups
-
-- name: Calculate Expired Lock Attempt Timestamp
- set_fact:
- expired_lock_timestamp: "{{ lock_attempt_log_group_result.creation_time - (lock_timeout_seconds * 1000) }}"
-
-- name: Get Expired and Active Lock Attempts
- set_fact:
- expired_lock_attempts: "{{ lock_attempt_log_groups.log_groups|selectattr('creation_time', 'lt', expired_lock_timestamp|int)|list }}"
- active_lock_attempts: "{{ lock_attempt_log_groups.log_groups|selectattr('creation_time', 'ge', expired_lock_timestamp|int)|list }}"
-
-- name: Pick Winning Lock Attempt
- set_fact:
- winning_lock_attempt: "{{ active_lock_attempts|sort(attribute='creation_time')|first }}"
-
-- name: Determine if Won Lock
- set_fact:
- won_lock: "{{ winning_lock_attempt.log_group_name == lock_attempt_log_group_name }}"
-
- # Remove the lock attempt if we didn't get the lock. This prevents
- # our stale lock attempt blocking another process from getting the lock.
- # See more detailed comment above Create Lock Attempt Log Group
-- name: Remove Failed Lock Attempt Log Group
- cloudwatchlogs_log_group:
- log_group_name: "{{ lock_attempt_log_group_name }}"
- state: absent
- <<: *aws_connection_info
- when: "not won_lock|bool"
-
-- name: Delete Expired Lock Attempts
- cloudwatchlogs_log_group:
- log_group_name: "{{ item.log_group_name }}"
- state: absent
- <<: *aws_connection_info
- when: "won_lock|bool"
- loop: "{{ expired_lock_attempts }}"
diff --git a/test/integration/targets/aws_ssm_parameter_store/aliases b/test/integration/targets/aws_ssm_parameter_store/aliases
deleted file mode 100644
index 72a9fb4f57..0000000000
--- a/test/integration/targets/aws_ssm_parameter_store/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group4
diff --git a/test/integration/targets/aws_ssm_parameter_store/defaults/main.yml b/test/integration/targets/aws_ssm_parameter_store/defaults/main.yml
deleted file mode 100644
index 13f8ba31ae..0000000000
--- a/test/integration/targets/aws_ssm_parameter_store/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# defaults file for aws_lambda test
-ssm_key_prefix: '{{resource_prefix}}'
diff --git a/test/integration/targets/aws_ssm_parameter_store/tasks/main.yml b/test/integration/targets/aws_ssm_parameter_store/tasks/main.yml
deleted file mode 100644
index 7bfb105e93..0000000000
--- a/test/integration/targets/aws_ssm_parameter_store/tasks/main.yml
+++ /dev/null
@@ -1,131 +0,0 @@
----
-#
-# Author: Michael De La Rue
-# based on aws_lambda test cases
-- block:
-
- # ============================================================
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
- # ============================================================
- - name: Create or update key/value pair in aws parameter store
- aws_ssm_parameter_store:
- name: "/{{ssm_key_prefix}}/Hello"
- description: "This is your first key"
- value: "World"
- <<: *aws_connection_info
-
- - name: Check that parameter was stored correctly
- assert:
- that:
- - "'{{lookup('aws_ssm', '/' ~ ssm_key_prefix ~ '/Hello', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token )}}' == 'World'"
-
- # ============================================================
- - name: Create or update key/value pair in aws parameter store
- aws_ssm_parameter_store:
- name: "/{{ssm_key_prefix}}/path/wonvar"
- description: "This is your first key"
- value: "won value"
- <<: *aws_connection_info
-
- - name: Create or update key/value pair in aws parameter store
- aws_ssm_parameter_store:
- name: "/{{ssm_key_prefix}}/path/toovar"
- description: "This is your first key"
- value: "too value"
- <<: *aws_connection_info
-
- - name: Create or update key/value pair in aws parameter store
- aws_ssm_parameter_store:
- name: "/{{ssm_key_prefix}}/path/tree/treevar"
- description: "This is your first key"
- value: "tree value"
- <<: *aws_connection_info
-
- # ============================================================
- - name: Create or update key/value pair in aws parameter store
- aws_ssm_parameter_store:
- name: "/{{ssm_key_prefix}}/deeppath/wondir/samevar"
- description: "This is your first key"
- value: "won value"
- <<: *aws_connection_info
-
- - name: Create or update key/value pair in aws parameter store
- aws_ssm_parameter_store:
- name: "/{{ssm_key_prefix}}/deeppath/toodir/samevar"
- description: "This is your first key"
- value: "too value"
- <<: *aws_connection_info
-
- # ============================================================
- - name: debug the lookup
- debug:
- msg: "{{lookup('aws_ssm', '/' ~ ssm_key_prefix ~ '/path', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token, bypath=True )}}'"
-
- - name: Check that parameter path is stored and retrieved
- assert:
- that:
- - "'{{lookup('aws_ssm', '/' ~ ssm_key_prefix ~ '/path', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token, bypath=True, shortnames=true ) | to_json }}' == '{\"toovar\": \"too value\", \"wonvar\": \"won value\"}'"
-
- # ============================================================
- - name: Returns empty value in case we don't find a named parameter and default filter works
- assert:
- that:
- - "'{{lookup('aws_ssm', '/' ~ ssm_key_prefix ~ '/Goodbye', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token )}}' == ''"
- - "'{{lookup('aws_ssm', '/' ~ ssm_key_prefix ~ '/Goodbye', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token ) | default('I_can_has_default', true)}}' == 'I_can_has_default'"
-
- # ============================================================
- - name: Handle multiple paths with one that doesn't exist - default to full names.
- assert:
- that:
- - "'{{lookup('aws_ssm', '/' ~ ssm_key_prefix ~ '/path', '/' ~ ssm_key_prefix ~ '/dont_create_this_path_you_will_break_the_ansible_tests', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token, bypath=True ) | to_json }}' in ( '[{\"/' ~ ssm_key_prefix ~ '/path/toovar\": \"too value\", \"/' ~ ssm_key_prefix ~ '/path/wonvar\": \"won value\"}, {}]', '[{\"/' ~ ssm_key_prefix ~ '/path/wonvar\": \"won value\", \"/' ~ ssm_key_prefix ~ '/path/toovar\": \"too value\"}, {}]' )"
-
-
- # ============================================================
- # this may be a bit of a nasty test case; we should perhaps accept _either_ value that was stored
- # in the two variables named 'samevar'
-
- - name: Handle multiple paths with one that doesn't exist - shortnames - including overlap.
- assert:
- that:
- - "'{{lookup('aws_ssm', '/' ~ ssm_key_prefix ~ '/path', '/' ~ ssm_key_prefix ~ '/dont_create_this_path_you_will_break_the_ansible_tests', '/' ~ ssm_key_prefix ~ '/deeppath', region=ec2_region, aws_access_key=ec2_access_key, aws_secret_key=ec2_secret_key, aws_security_token=security_token, bypath=True, shortnames=true, recursive=true ) | to_json }}' == '[{\"toovar\": \"too value\", \"treevar\": \"tree value\", \"wonvar\": \"won value\"}, {}, {\"samevar\": \"won value\"}]'"
-
-
- # ============================================================
- - name: Delete key/value pair in aws parameter store
- aws_ssm_parameter_store:
- name: "/{{ssm_key_prefix}}/Hello"
- state: absent
- <<: *aws_connection_info
-
- # ============================================================
- - name: Attempt delete key/value pair in aws parameter store again
- aws_ssm_parameter_store:
- name: "/{{ssm_key_prefix}}/Hello"
- state: absent
- <<: *aws_connection_info
- register: result
-
- - name: assert that changed is False since parameter should be deleted
- assert:
- that:
- - result.changed == False
- always:
- # ============================================================
- - name: Delete remaining key/value pairs in aws parameter store
- aws_ssm_parameter_store:
- name: "{{item}}"
- state: absent
- <<: *aws_connection_info
- with_items:
- - "/{{ssm_key_prefix}}/Hello"
- - "/{{ssm_key_prefix}}/path/wonvar"
- - "/{{ssm_key_prefix}}/path/toovar"
- - "/{{ssm_key_prefix}}/path/tree/treevar"
- - "/{{ssm_key_prefix}}/deeppath/wondir/samevar"
diff --git a/test/integration/targets/aws_step_functions_state_machine/aliases b/test/integration/targets/aws_step_functions_state_machine/aliases
deleted file mode 100644
index 65b315eb47..0000000000
--- a/test/integration/targets/aws_step_functions_state_machine/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-shippable/aws/group2
-aws_step_functions_state_machine_execution
diff --git a/test/integration/targets/aws_step_functions_state_machine/defaults/main.yml b/test/integration/targets/aws_step_functions_state_machine/defaults/main.yml
deleted file mode 100644
index 273a0c783b..0000000000
--- a/test/integration/targets/aws_step_functions_state_machine/defaults/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-# the random_num is generated in a set_fact task at the start of the testsuite
-state_machine_name: "{{ resource_prefix }}_step_functions_state_machine_ansible_test_{{ random_num }}"
-step_functions_role_name: "ansible-test-sts-{{ resource_prefix }}-step_functions-role"
-execution_name: "{{ resource_prefix }}_sfn_execution"
diff --git a/test/integration/targets/aws_step_functions_state_machine/files/alternative_state_machine.json b/test/integration/targets/aws_step_functions_state_machine/files/alternative_state_machine.json
deleted file mode 100644
index 7b51bebb1a..0000000000
--- a/test/integration/targets/aws_step_functions_state_machine/files/alternative_state_machine.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "StartAt": "HelloWorld",
- "States": {
- "HelloWorld": {
- "Type": "Pass",
- "Result": "Some other result",
- "Next": "Wait"
- },
- "Wait": {
- "Type": "Wait",
- "Seconds": 30,
- "End": true
- }
- }
-} \ No newline at end of file
diff --git a/test/integration/targets/aws_step_functions_state_machine/files/state_machine.json b/test/integration/targets/aws_step_functions_state_machine/files/state_machine.json
deleted file mode 100644
index c07d5cebad..0000000000
--- a/test/integration/targets/aws_step_functions_state_machine/files/state_machine.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "StartAt": "HelloWorld",
- "States": {
- "HelloWorld": {
- "Type": "Pass",
- "Result": "Hello World!",
- "End": true
- }
- }
-} \ No newline at end of file
diff --git a/test/integration/targets/aws_step_functions_state_machine/files/state_machines_iam_trust_policy.json b/test/integration/targets/aws_step_functions_state_machine/files/state_machines_iam_trust_policy.json
deleted file mode 100644
index 48d627220f..0000000000
--- a/test/integration/targets/aws_step_functions_state_machine/files/state_machines_iam_trust_policy.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "Service": "states.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-} \ No newline at end of file
diff --git a/test/integration/targets/aws_step_functions_state_machine/tasks/main.yml b/test/integration/targets/aws_step_functions_state_machine/tasks/main.yml
deleted file mode 100644
index 0a28ca3624..0000000000
--- a/test/integration/targets/aws_step_functions_state_machine/tasks/main.yml
+++ /dev/null
@@ -1,300 +0,0 @@
----
-
-- name: Integration test for AWS Step Function state machine module
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
-
- # ==== Setup ==================================================
-
- - name: Create IAM service role needed for Step Functions
- iam_role:
- name: "{{ step_functions_role_name }}"
- description: Role with permissions for AWS Step Functions actions.
- assume_role_policy_document: "{{ lookup('file', 'state_machines_iam_trust_policy.json') }}"
- state: present
- register: step_functions_role
-
- - name: Pause a few seconds to ensure IAM role is available to next task
- pause:
- seconds: 10
-
- # ==== Tests ===================================================
-
- - name: Create a random component for state machine name
- set_fact:
- random_num: "{{ 999999999 | random }}"
-
- - name: Create a new state machine -- check_mode
- aws_step_functions_state_machine:
- name: "{{ state_machine_name }}"
- definition: "{{ lookup('file','state_machine.json') }}"
- role_arn: "{{ step_functions_role.iam_role.arn }}"
- tags:
- project: helloWorld
- state: present
- register: creation_check
- check_mode: yes
-
- - assert:
- that:
- - creation_check.changed == True
- - creation_check.output == 'State machine would be created.'
-
- - name: Create a new state machine
- aws_step_functions_state_machine:
- name: "{{ state_machine_name }}"
- definition: "{{ lookup('file','state_machine.json') }}"
- role_arn: "{{ step_functions_role.iam_role.arn }}"
- tags:
- project: helloWorld
- state: present
- register: creation_output
-
- - assert:
- that:
- - creation_output.changed == True
-
- - name: Pause a few seconds to ensure state machine role is available
- pause:
- seconds: 5
-
- - name: Idempotent rerun of same state function -- check_mode
- aws_step_functions_state_machine:
- name: "{{ state_machine_name }}"
- definition: "{{ lookup('file','state_machine.json') }}"
- role_arn: "{{ step_functions_role.iam_role.arn }}"
- tags:
- project: helloWorld
- state: present
- register: result
- check_mode: yes
-
- - assert:
- that:
- - result.changed == False
- - result.output == 'State is up-to-date.'
-
- - name: Idempotent rerun of same state function
- aws_step_functions_state_machine:
- name: "{{ state_machine_name }}"
- definition: "{{ lookup('file','state_machine.json') }}"
- role_arn: "{{ step_functions_role.iam_role.arn }}"
- tags:
- project: helloWorld
- state: present
- register: result
-
- - assert:
- that:
- - result.changed == False
-
- - name: Update an existing state machine -- check_mode
- aws_step_functions_state_machine:
- name: "{{ state_machine_name }}"
- definition: "{{ lookup('file','alternative_state_machine.json') }}"
- role_arn: "{{ step_functions_role.iam_role.arn }}"
- tags:
- differentTag: different_tag
- state: present
- register: update_check
- check_mode: yes
-
- - assert:
- that:
- - update_check.changed == True
- - "update_check.output == 'State machine would be updated: {{ creation_output.state_machine_arn }}'"
-
- - name: Update an existing state machine
- aws_step_functions_state_machine:
- name: "{{ state_machine_name }}"
- definition: "{{ lookup('file','alternative_state_machine.json') }}"
- role_arn: "{{ step_functions_role.iam_role.arn }}"
- tags:
- differentTag: different_tag
- state: present
- register: update_output
-
- - assert:
- that:
- - update_output.changed == True
- - update_output.state_machine_arn == creation_output.state_machine_arn
-
- - name: Start execution of state machine -- check_mode
- aws_step_functions_state_machine_execution:
- name: "{{ execution_name }}"
- execution_input: "{}"
- state_machine_arn: "{{ creation_output.state_machine_arn }}"
- register: start_execution_output
- check_mode: yes
-
- - assert:
- that:
- - start_execution_output.changed == True
- - "start_execution_output.output == 'State machine execution would be started.'"
-
- - name: Start execution of state machine
- aws_step_functions_state_machine_execution:
- name: "{{ execution_name }}"
- execution_input: "{}"
- state_machine_arn: "{{ creation_output.state_machine_arn }}"
- register: start_execution_output
-
- - assert:
- that:
- - start_execution_output.changed
- - "'execution_arn' in start_execution_output"
- - "'start_date' in start_execution_output"
-
- - name: Start execution of state machine (check for idempotency) (check mode)
- aws_step_functions_state_machine_execution:
- name: "{{ execution_name }}"
- execution_input: "{}"
- state_machine_arn: "{{ creation_output.state_machine_arn }}"
- register: start_execution_output_idem_check
- check_mode: yes
-
- - assert:
- that:
- - not start_execution_output_idem_check.changed
- - "start_execution_output_idem_check.output == 'State machine execution already exists.'"
-
- - name: Start execution of state machine (check for idempotency)
- aws_step_functions_state_machine_execution:
- name: "{{ execution_name }}"
- execution_input: "{}"
- state_machine_arn: "{{ creation_output.state_machine_arn }}"
- register: start_execution_output_idem
-
- - assert:
- that:
- - not start_execution_output_idem.changed
-
- - name: Stop execution of state machine -- check_mode
- aws_step_functions_state_machine_execution:
- action: stop
- execution_arn: "{{ start_execution_output.execution_arn }}"
- cause: "cause of the failure"
- error: "error code of the failure"
- register: stop_execution_output
- check_mode: yes
-
- - assert:
- that:
- - stop_execution_output.changed
- - "stop_execution_output.output == 'State machine execution would be stopped.'"
-
- - name: Stop execution of state machine
- aws_step_functions_state_machine_execution:
- action: stop
- execution_arn: "{{ start_execution_output.execution_arn }}"
- cause: "cause of the failure"
- error: "error code of the failure"
- register: stop_execution_output
-
- - assert:
- that:
- - stop_execution_output.changed
- - "'stop_date' in stop_execution_output"
-
- - name: Stop execution of state machine (check for idempotency)
- aws_step_functions_state_machine_execution:
- action: stop
- execution_arn: "{{ start_execution_output.execution_arn }}"
- cause: "cause of the failure"
- error: "error code of the failure"
- register: stop_execution_output
-
- - assert:
- that:
- - not stop_execution_output.changed
-
- - name: Try stopping a non-running execution -- check_mode
- aws_step_functions_state_machine_execution:
- action: stop
- execution_arn: "{{ start_execution_output.execution_arn }}"
- cause: "cause of the failure"
- error: "error code of the failure"
- register: stop_execution_output
- check_mode: yes
-
- - assert:
- that:
- - not stop_execution_output.changed
- - "stop_execution_output.output == 'State machine execution is not running.'"
-
- - name: Try stopping a non-running execution
- aws_step_functions_state_machine_execution:
- action: stop
- execution_arn: "{{ start_execution_output.execution_arn }}"
- cause: "cause of the failure"
- error: "error code of the failure"
- register: stop_execution_output
- check_mode: yes
-
- - assert:
- that:
- - not stop_execution_output.changed
-
- - name: Start execution of state machine with the same execution name
- aws_step_functions_state_machine_execution:
- name: "{{ execution_name }}"
- state_machine_arn: "{{ creation_output.state_machine_arn }}"
- register: start_execution_output_again
-
- - assert:
- that:
- - not start_execution_output_again.changed
-
- - name: Remove state machine -- check_mode
- aws_step_functions_state_machine:
- name: "{{ state_machine_name }}"
- state: absent
- register: deletion_check
- check_mode: yes
-
- - assert:
- that:
- - deletion_check.changed == True
- - "deletion_check.output == 'State machine would be deleted: {{ creation_output.state_machine_arn }}'"
-
- - name: Remove state machine
- aws_step_functions_state_machine:
- name: "{{ state_machine_name }}"
- state: absent
- register: deletion_output
-
- - assert:
- that:
- - deletion_output.changed == True
- - deletion_output.state_machine_arn == creation_output.state_machine_arn
-
- - name: Non-existent state machine is absent
- aws_step_functions_state_machine:
- name: "non_existing_state_machine"
- state: absent
- register: result
-
- - assert:
- that:
- - result.changed == False
-
- # ==== Cleanup ====================================================
-
- always:
-
- - name: Cleanup - delete state machine
- aws_step_functions_state_machine:
- name: "{{ state_machine_name }}"
- state: absent
- ignore_errors: true
-
- - name: Cleanup - delete IAM role needed for Step Functions test
- iam_role:
- name: "{{ step_functions_role_name }}"
- state: absent
- ignore_errors: true
diff --git a/test/integration/targets/aws_waf_web_acl/aliases b/test/integration/targets/aws_waf_web_acl/aliases
deleted file mode 100644
index ed29166723..0000000000
--- a/test/integration/targets/aws_waf_web_acl/aliases
+++ /dev/null
@@ -1,6 +0,0 @@
-cloud/aws
-aws_waf_info
-aws_waf_web_acl
-aws_waf_web_match
-aws_waf_web_rule
-unsupported
diff --git a/test/integration/targets/aws_waf_web_acl/tasks/main.yml b/test/integration/targets/aws_waf_web_acl/tasks/main.yml
deleted file mode 100644
index 6b58e0b392..0000000000
--- a/test/integration/targets/aws_waf_web_acl/tasks/main.yml
+++ /dev/null
@@ -1,1199 +0,0 @@
-- block:
- - name: set yaml anchor
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- no_log: yes
-
-
- ##################################################
- # aws_waf_condition tests
- ##################################################
-
- - name: create WAF IP condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- filters:
- - ip_address: "10.0.0.0/8"
- type: ip
- <<: *aws_connection_info
- register: create_waf_ip_condition
-
- - name: add an IP address to WAF condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- filters:
- - ip_address: "10.0.0.0/8"
- - ip_address: "192.168.0.0/24"
- type: ip
- <<: *aws_connection_info
- register: add_ip_address_to_waf_condition
-
- - name: check expected waf filter length
- assert:
- that:
- - add_ip_address_to_waf_condition.condition.ip_set_descriptors|length == 2
-
- - name: add an IP address to WAF condition (rely on purge_filters defaulting to false)
- aws_waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- filters:
- - ip_address: "192.168.10.0/24"
- type: ip
- <<: *aws_connection_info
- register: add_ip_address_to_waf_condition_no_purge
-
- - name: check waf filter length has increased
- assert:
- that:
- - add_ip_address_to_waf_condition_no_purge.condition.ip_set_descriptors|length == 3
- - add_ip_address_to_waf_condition_no_purge.changed
-
- - name: add an IP address to WAF condition (set purge_filters)
- aws_waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- filters:
- - ip_address: "192.168.20.0/24"
- purge_filters: yes
- type: ip
- <<: *aws_connection_info
- register: add_ip_address_to_waf_condition_purge
-
- - name: check waf filter length has reduced
- assert:
- that:
- - add_ip_address_to_waf_condition_purge.condition.ip_set_descriptors|length == 1
- - add_ip_address_to_waf_condition_purge.changed
-
- - name: create WAF byte condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_byte_condition"
- filters:
- - field_to_match: header
- position: STARTS_WITH
- target_string: Hello
- header: Content-type
- type: byte
- <<: *aws_connection_info
- register: create_waf_byte_condition
-
- - name: recreate WAF byte condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_byte_condition"
- filters:
- - field_to_match: header
- position: STARTS_WITH
- target_string: Hello
- header: Content-type
- type: byte
- <<: *aws_connection_info
- register: recreate_waf_byte_condition
-
- - name: assert that no change was made
- assert:
- that:
- - not recreate_waf_byte_condition.changed
-
- - name: create WAF geo condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_geo_condition"
- filters:
- - country: US
- - country: AU
- - country: AT
- type: geo
- <<: *aws_connection_info
- register: create_waf_geo_condition
-
- - name: create WAF size condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_size_condition"
- filters:
- - field_to_match: query_string
- size: 300
- comparison: GT
- type: size
- <<: *aws_connection_info
- register: create_waf_size_condition
-
- - name: create WAF sql condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_sql_condition"
- filters:
- - field_to_match: query_string
- transformation: url_decode
- type: sql
- <<: *aws_connection_info
- register: create_waf_sql_condition
-
- - name: create WAF xss condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_xss_condition"
- filters:
- - field_to_match: query_string
- transformation: url_decode
- type: xss
- <<: *aws_connection_info
- register: create_waf_xss_condition
-
- - name: create WAF regex condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition"
- filters:
- - field_to_match: query_string
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
- <<: *aws_connection_info
- register: create_waf_regex_condition
-
- - name: create a second WAF regex condition with the same regex
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition_part_2"
- filters:
- - field_to_match: header
- header: cookie
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
- <<: *aws_connection_info
- register: create_second_waf_regex_condition
-
- - name: check that the pattern is shared
- assert:
- that:
- - >
- create_waf_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id ==
- create_second_waf_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id
- - create_second_waf_regex_condition.changed
-
-
- - name: delete first WAF regex condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition"
- filters:
- - field_to_match: query_string
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
- state: absent
- <<: *aws_connection_info
- register: delete_waf_regex_condition
-
- - name: delete second WAF regex condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition_part_2"
- filters:
- - field_to_match: header
- header: cookie
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
- state: absent
- <<: *aws_connection_info
- register: delete_second_waf_regex_condition
-
- - name: create WAF regex condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition"
- filters:
- - field_to_match: query_string
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
- <<: *aws_connection_info
- register: recreate_waf_regex_condition
-
- - name: check that a new pattern is created (because the first pattern should have been deleted once unused)
- assert:
- that:
- - >
- recreate_waf_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id !=
- create_waf_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id
-
- - name: create WAF Regional IP condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- filters:
- - ip_address: "10.0.0.0/8"
- type: ip
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: create_waf_regional_ip_condition
-
- - name: add an IP address to WAF Regional condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- filters:
- - ip_address: "10.0.0.0/8"
- - ip_address: "192.168.0.0/24"
- type: ip
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: add_ip_address_to_waf_regional_condition
-
- - name: check expected WAF Regional filter length
- assert:
- that:
- - add_ip_address_to_waf_regional_condition.condition.ip_set_descriptors|length == 2
-
- - name: add an IP address to WAF Regional condition (rely on purge_filters defaulting to false)
- aws_waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- filters:
- - ip_address: "192.168.10.0/24"
- type: ip
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: add_ip_address_to_waf_regional_condition_no_purge
-
- - name: check WAF Regional filter length has increased
- assert:
- that:
- - add_ip_address_to_waf_regional_condition_no_purge.condition.ip_set_descriptors|length == 3
- - add_ip_address_to_waf_regional_condition_no_purge.changed
-
- - name: add an IP address to WAF Regional condition (set purge_filters)
- aws_waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- filters:
- - ip_address: "192.168.20.0/24"
- purge_filters: yes
- type: ip
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: add_ip_address_to_waf_regional_condition_purge
-
- - name: check WAF Regional filter length has reduced
- assert:
- that:
- - add_ip_address_to_waf_regional_condition_purge.condition.ip_set_descriptors|length == 1
- - add_ip_address_to_waf_regional_condition_purge.changed
-
- - name: create WAF Regional byte condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_byte_condition"
- filters:
- - field_to_match: header
- position: STARTS_WITH
- target_string: Hello
- header: Content-type
- type: byte
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: create_waf_regional_byte_condition
-
- - name: recreate WAF Regional byte condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_byte_condition"
- filters:
- - field_to_match: header
- position: STARTS_WITH
- target_string: Hello
- header: Content-type
- type: byte
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: recreate_waf_regional_byte_condition
-
- - name: assert that no change was made
- assert:
- that:
- - not recreate_waf_regional_byte_condition.changed
-
- - name: create WAF Regional geo condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_geo_condition"
- filters:
- - country: US
- - country: AU
- - country: AT
- type: geo
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: create_waf_regional_geo_condition
-
- - name: create WAF Regional size condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_size_condition"
- filters:
- - field_to_match: query_string
- size: 300
- comparison: GT
- type: size
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: create_waf_regional_size_condition
-
- - name: create WAF Regional sql condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_sql_condition"
- filters:
- - field_to_match: query_string
- transformation: url_decode
- type: sql
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: create_waf_regional_sql_condition
-
- - name: create WAF Regional xss condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_xss_condition"
- filters:
- - field_to_match: query_string
- transformation: url_decode
- type: xss
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: create_waf_regional_xss_condition
-
- - name: create WAF Regional regex condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition"
- filters:
- - field_to_match: query_string
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: create_waf_regional_regex_condition
-
- - name: create a second WAF Regional regex condition with the same regex
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition_part_2"
- filters:
- - field_to_match: header
- header: cookie
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: create_second_waf_regional_regex_condition
-
- - name: check that the pattern is shared
- assert:
- that:
- - >
- create_waf_regional_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id ==
- create_second_waf_regional_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id
- - create_second_waf_regional_regex_condition.changed
-
-
- - name: delete first WAF Regional regex condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition"
- filters:
- - field_to_match: query_string
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: delete_waf_regional_regex_condition
-
- - name: delete second WAF Regional regex condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition_part_2"
- filters:
- - field_to_match: header
- header: cookie
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: delete_second_waf_regional_regex_condition
-
- - name: create WAF Regional regex condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition"
- filters:
- - field_to_match: query_string
- regex_pattern:
- name: greetings
- regex_strings:
- - '[hH]ello'
- - '^Hi there'
- - '.*Good Day to You'
- type: regex
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: recreate_waf_regional_regex_condition
-
- - name: check that a new pattern is created (because the first pattern should have been deleted once unused)
- assert:
- that:
- - >
- recreate_waf_regional_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id !=
- create_waf_regional_regex_condition.condition.regex_match_tuples[0].regex_pattern_set_id
-
- ##################################################
- # aws_waf_rule tests
- ##################################################
-
- - name: create WAF rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- conditions:
- - name: "{{ resource_prefix }}_regex_condition"
- type: regex
- negated: no
- - name: "{{ resource_prefix }}_geo_condition"
- type: geo
- negated: no
- - name: "{{ resource_prefix }}_byte_condition"
- type: byte
- negated: no
- purge_conditions: yes
- <<: *aws_connection_info
- register: create_aws_waf_rule
-
- - name: check WAF rule
- assert:
- that:
- - create_aws_waf_rule.changed
- - create_aws_waf_rule.rule.predicates|length == 3
-
- - name: recreate WAF rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- conditions:
- - name: "{{ resource_prefix }}_regex_condition"
- type: regex
- negated: no
- - name: "{{ resource_prefix }}_geo_condition"
- type: geo
- negated: no
- - name: "{{ resource_prefix }}_byte_condition"
- type: byte
- negated: no
- <<: *aws_connection_info
- register: create_aws_waf_rule
-
- - name: check WAF rule did not change
- assert:
- that:
- - not create_aws_waf_rule.changed
- - create_aws_waf_rule.rule.predicates|length == 3
-
- - name: add further WAF rules relying on purge_conditions defaulting to false
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- conditions:
- - name: "{{ resource_prefix }}_ip_condition"
- type: ip
- negated: yes
- - name: "{{ resource_prefix }}_sql_condition"
- type: sql
- negated: no
- - name: "{{ resource_prefix }}_xss_condition"
- type: xss
- negated: no
- <<: *aws_connection_info
- register: add_conditions_to_aws_waf_rule
-
- - name: check WAF rule added rules
- assert:
- that:
- - add_conditions_to_aws_waf_rule.changed
- - add_conditions_to_aws_waf_rule.rule.predicates|length == 6
-
- - name: remove some rules through purging conditions
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- conditions:
- - name: "{{ resource_prefix }}_ip_condition"
- type: ip
- negated: yes
- - name: "{{ resource_prefix }}_xss_condition"
- type: xss
- negated: no
- - name: "{{ resource_prefix }}_byte_condition"
- type: byte
- negated: no
- - name: "{{ resource_prefix }}_size_condition"
- type: size
- negated: no
- purge_conditions: yes
- <<: *aws_connection_info
- register: add_and_remove_waf_rule_conditions
-
- - name: check WAF rules were updated as expected
- assert:
- that:
- - add_and_remove_waf_rule_conditions.changed
- - add_and_remove_waf_rule_conditions.rule.predicates|length == 4
-
- - name: attempt to remove an in use condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_size_condition"
- type: size
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
- register: remove_in_use_condition
-
- - name: check failure was sensible
- assert:
- that:
- - remove_in_use_condition.failed
- - "'Condition {{ resource_prefix }}_size_condition is in use' in remove_in_use_condition.msg"
-
- - name: create WAF Regional rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- conditions:
- - name: "{{ resource_prefix }}_regex_condition"
- type: regex
- negated: no
- - name: "{{ resource_prefix }}_geo_condition"
- type: geo
- negated: no
- - name: "{{ resource_prefix }}_byte_condition"
- type: byte
- negated: no
- purge_conditions: yes
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: create_aws_waf_regional_rule
-
- - name: check WAF Regional rule
- assert:
- that:
- - create_aws_waf_regional_rule.changed
- - create_aws_waf_regional_rule.rule.predicates|length == 3
-
- - name: recreate WAF Regional rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- conditions:
- - name: "{{ resource_prefix }}_regex_condition"
- type: regex
- negated: no
- - name: "{{ resource_prefix }}_geo_condition"
- type: geo
- negated: no
- - name: "{{ resource_prefix }}_byte_condition"
- type: byte
- negated: no
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: create_aws_waf_regional_rule
-
- - name: check WAF Regional rule did not change
- assert:
- that:
- - not create_aws_waf_regional_rule.changed
- - create_aws_waf_regional_rule.rule.predicates|length == 3
-
- - name: add further WAF Regional rules relying on purge_conditions defaulting to false
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- conditions:
- - name: "{{ resource_prefix }}_ip_condition"
- type: ip
- negated: yes
- - name: "{{ resource_prefix }}_sql_condition"
- type: sql
- negated: no
- - name: "{{ resource_prefix }}_xss_condition"
- type: xss
- negated: no
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: add_conditions_to_aws_waf_regional_rule
-
- - name: check WAF Regional rule added rules
- assert:
- that:
- - add_conditions_to_aws_waf_regional_rule.changed
- - add_conditions_to_aws_waf_regional_rule.rule.predicates|length == 6
-
- - name: remove some rules through purging conditions
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- conditions:
- - name: "{{ resource_prefix }}_ip_condition"
- type: ip
- negated: yes
- - name: "{{ resource_prefix }}_xss_condition"
- type: xss
- negated: no
- - name: "{{ resource_prefix }}_byte_condition"
- type: byte
- negated: no
- - name: "{{ resource_prefix }}_size_condition"
- type: size
- negated: no
- purge_conditions: yes
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: add_and_remove_waf_regional_rule_conditions
-
- - name: check WAF Regional rules were updated as expected
- assert:
- that:
- - add_and_remove_waf_regional_rule_conditions.changed
- - add_and_remove_waf_regional_rule_conditions.rule.predicates|length == 4
-
- - name: attempt to remove an WAF Regional in use condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_size_condition"
- type: size
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
- register: remove_in_use_condition
-
- - name: check failure was sensible
- assert:
- that:
- - remove_in_use_condition.failed
- - "'Condition {{ resource_prefix }}_size_condition is in use' in remove_in_use_condition.msg"
-
- ##################################################
- # aws_waf_web_acl tests
- ##################################################
-
- - name: create web ACL
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- rules:
- - name: "{{ resource_prefix }}_rule"
- priority: 1
- action: block
- default_action: block
- purge_rules: yes
- state: present
- <<: *aws_connection_info
- register: create_web_acl
-
- - name: recreate web acl
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- rules:
- - name: "{{ resource_prefix }}_rule"
- priority: 1
- action: block
- default_action: block
- state: present
- <<: *aws_connection_info
- register: recreate_web_acl
-
- - name: check web acl was not changed
- assert:
- that:
- - not recreate_web_acl.changed
- - recreate_web_acl.web_acl.rules|length == 1
-
- - name: create a second WAF rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule_2"
- conditions:
- - name: "{{ resource_prefix }}_ip_condition"
- type: ip
- negated: yes
- - name: "{{ resource_prefix }}_sql_condition"
- type: sql
- negated: no
- - name: "{{ resource_prefix }}_xss_condition"
- type: xss
- negated: no
- <<: *aws_connection_info
-
- - name: add a new rule to the web acl
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- rules:
- - name: "{{ resource_prefix }}_rule_2"
- priority: 2
- action: allow
- default_action: block
- state: present
- <<: *aws_connection_info
- register: web_acl_add_rule
-
- - name: check that rule was added to the web acl
- assert:
- that:
- - web_acl_add_rule.changed
- - web_acl_add_rule.web_acl.rules|length == 2
-
- - name: use purge rules to remove the first rule
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- rules:
- - name: "{{ resource_prefix }}_rule_2"
- priority: 2
- action: allow
- purge_rules: yes
- default_action: block
- state: present
- <<: *aws_connection_info
- register: web_acl_add_rule
-
- - name: check that rule was removed from the web acl
- assert:
- that:
- - web_acl_add_rule.changed
- - web_acl_add_rule.web_acl.rules|length == 1
-
- - name: swap two rules of same priority
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- rules:
- - name: "{{ resource_prefix }}_rule"
- priority: 2
- action: allow
- purge_rules: yes
- default_action: block
- state: present
- <<: *aws_connection_info
- register: web_acl_swap_rule
-
- - name: attempt to delete the inuse first rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
- register: remove_inuse_rule
-
- - name: check that removing in-use rule fails
- assert:
- that:
- - remove_inuse_rule.failed
-
- - name: delete the web acl
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- state: absent
- <<: *aws_connection_info
- register: delete_web_acl
-
- - name: check that web acl was deleted
- assert:
- that:
- - delete_web_acl.changed
- - not delete_web_acl.web_acl
-
- - name: delete the no longer in use first rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- state: absent
- <<: *aws_connection_info
-
- - name: create WAF Regional web ACL
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- rules:
- - name: "{{ resource_prefix }}_rule"
- priority: 1
- action: block
- default_action: block
- purge_rules: yes
- state: present
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: create_waf_regional_web_acl
-
- - name: recreate WAF Regional web acl
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- rules:
- - name: "{{ resource_prefix }}_rule"
- priority: 1
- action: block
- default_action: block
- state: present
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: recreate_waf_regional_web_acl
-
- - name: check WAF Regional web acl was not changed
- assert:
- that:
- - not recreate_waf_regional_web_acl.changed
- - recreate_waf_regional_web_acl.web_acl.rules|length == 1
-
- - name: create a second WAF Regional rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule_2"
- conditions:
- - name: "{{ resource_prefix }}_ip_condition"
- type: ip
- negated: yes
- - name: "{{ resource_prefix }}_sql_condition"
- type: sql
- negated: no
- - name: "{{ resource_prefix }}_xss_condition"
- type: xss
- negated: no
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
-
- - name: add a new rule to the WAF Regional web acl
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- rules:
- - name: "{{ resource_prefix }}_rule_2"
- priority: 2
- action: allow
- default_action: block
- state: present
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: waf_regional_web_acl_add_rule
-
- - name: check that rule was added to the WAF Regional web acl
- assert:
- that:
- - waf_regional_web_acl_add_rule.changed
- - waf_regional_web_acl_add_rule.web_acl.rules|length == 2
-
- - name: use purge rules to remove the WAF Regional first rule
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- rules:
- - name: "{{ resource_prefix }}_rule_2"
- priority: 2
- action: allow
- purge_rules: yes
- default_action: block
- state: present
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: waf_regional_web_acl_add_rule
-
- - name: check that rule was removed from the WAF Regional web acl
- assert:
- that:
- - waf_regional_web_acl_add_rule.changed
- - waf_regional_web_acl_add_rule.web_acl.rules|length == 1
-
- - name: swap two WAF Regional rules of same priority
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- rules:
- - name: "{{ resource_prefix }}_rule"
- priority: 2
- action: allow
- purge_rules: yes
- default_action: block
- state: present
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: waf_regional_web_acl_swap_rule
-
- - name: attempt to delete the WAF Regional inuse first rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
- register: remove_waf_regional_inuse_rule
-
- - name: check that removing WAF Regional in-use rule fails
- assert:
- that:
- - remove_waf_regional_inuse_rule.failed
-
- - name: delete the WAF Regional web acl
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- register: delete_waf_regional_web_acl
-
- - name: check that WAF Regional web acl was deleted
- assert:
- that:
- - delete_waf_regional_web_acl.changed
- - not delete_waf_regional_web_acl.web_acl
-
- - name: delete the no longer in use WAF Regional first rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
-
- ##################################################
- # TEARDOWN
- ##################################################
-
- always:
- - debug:
- msg: "****** TEARDOWN STARTS HERE ******"
-
- - name: delete the web acl
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- state: absent
- purge_rules: yes
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove second WAF rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule_2"
- state: absent
- purge_conditions: yes
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove WAF rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- state: absent
- purge_conditions: yes
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove XSS condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_xss_condition"
- type: xss
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove SQL condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_sql_condition"
- type: sql
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove size condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_size_condition"
- type: size
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove geo condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_geo_condition"
- type: geo
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove byte condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_byte_condition"
- type: byte
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ip address condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- type: ip
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove regex part 2 condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition_part_2"
- type: regex
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove first regex condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition"
- type: regex
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: delete the WAF Regional web acl
- aws_waf_web_acl:
- name: "{{ resource_prefix }}_web_acl"
- state: absent
- purge_rules: yes
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove second WAF Regional rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule_2"
- state: absent
- purge_conditions: yes
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove WAF Regional rule
- aws_waf_rule:
- name: "{{ resource_prefix }}_rule"
- state: absent
- purge_conditions: yes
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove WAF Regional XSS condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_xss_condition"
- type: xss
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove WAF Regional SQL condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_sql_condition"
- type: sql
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove WAF Regional size condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_size_condition"
- type: size
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove WAF Regional geo condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_geo_condition"
- type: geo
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove WAF Regional byte condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_byte_condition"
- type: byte
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove WAF Regional ip address condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_ip_condition"
- type: ip
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove WAF Regional regex part 2 condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition_part_2"
- type: regex
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove first WAF Regional regex condition
- aws_waf_condition:
- name: "{{ resource_prefix }}_regex_condition"
- type: regex
- state: absent
- region: "{{ aws_region }}"
- waf_regional: true
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/cloudformation_exports_info/aliases b/test/integration/targets/cloudformation_exports_info/aliases
deleted file mode 100644
index 157ce0c9d4..0000000000
--- a/test/integration/targets/cloudformation_exports_info/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group3
diff --git a/test/integration/targets/cloudformation_exports_info/defaults/main.yml b/test/integration/targets/cloudformation_exports_info/defaults/main.yml
deleted file mode 100644
index 4edd7475e9..0000000000
--- a/test/integration/targets/cloudformation_exports_info/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
-stack_name: "{{ resource_prefix }}"
diff --git a/test/integration/targets/cloudformation_exports_info/files/test_stack.yml b/test/integration/targets/cloudformation_exports_info/files/test_stack.yml
deleted file mode 100644
index f1dcba3034..0000000000
--- a/test/integration/targets/cloudformation_exports_info/files/test_stack.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-AWSTemplateFormatVersion: 2010-09-09
-Description: Create some item in Exports
-Parameters:
- TestParamValue:
- Type: String
- Description: A param Value to be placed in Exports
- TestParamName:
- Type: String
- Description: A param Name for SSM Parameter Store
- BucketSuffix:
- Type: String
-Resources:
- TestBucket:
- Type: AWS::S3::Bucket
- Properties:
- BucketName:
- Fn::Sub: "cf-export-${BucketSuffix}"
-Outputs:
- TestParamValue:
- Value:
- Ref: TestParamValue
- Export:
- Name:
- Fn::Sub: "${TestParamName}"
diff --git a/test/integration/targets/cloudformation_exports_info/tasks/main.yml b/test/integration/targets/cloudformation_exports_info/tasks/main.yml
deleted file mode 100644
index 87823ccafe..0000000000
--- a/test/integration/targets/cloudformation_exports_info/tasks/main.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-- name: set connection information for aws modules and run tasks
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
-
- block:
- - name: Create a minimal stack with an export set by parameter
- cloudformation:
- stack_name: "{{ stack_name }}"
- template_body: "{{ lookup('file','test_stack.yml') }}"
- template_parameters:
- TestParamName: "cf-exports-param"
- TestParamValue: "Set By CF Exports"
- BucketSuffix: "{{ resource_prefix }}"
- register: cf_stack
- - name: Read from Exports
- cloudformation_exports_info:
- region: "{{ aws_region }}"
- register: exports_result
- - set_fact:
- export_items: "{{ exports_result['export_items'] }}"
- - assert:
- that:
- - export_items is defined
- - export_items['cf-exports-param'] is defined
- # - export_items | length == 1
-
-
-# Cleanup
- always:
-
- - name: delete stack
- cloudformation:
- stack_name: "{{ stack_name }}"
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/cloudformation_stack_set/aliases b/test/integration/targets/cloudformation_stack_set/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/cloudformation_stack_set/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/cloudformation_stack_set/files/test_bucket_stack.yml b/test/integration/targets/cloudformation_stack_set/files/test_bucket_stack.yml
deleted file mode 100644
index dfbc522419..0000000000
--- a/test/integration/targets/cloudformation_stack_set/files/test_bucket_stack.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-AWSTemplateFormatVersion: "2010-09-09"
-Parameters: {}
-Resources:
- Bukkit:
- Type: "AWS::S3::Bucket"
- Properties: {}
diff --git a/test/integration/targets/cloudformation_stack_set/files/test_modded_bucket_stack.yml b/test/integration/targets/cloudformation_stack_set/files/test_modded_bucket_stack.yml
deleted file mode 100644
index 68df61c617..0000000000
--- a/test/integration/targets/cloudformation_stack_set/files/test_modded_bucket_stack.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-AWSTemplateFormatVersion: "2010-09-09"
-Parameters: {}
-Resources:
- Bukkit:
- Type: "AWS::S3::Bucket"
- Properties: {}
- other:
- Type: "AWS::SNS::Topic"
- Properties: {}
diff --git a/test/integration/targets/cloudformation_stack_set/playbooks/full_test.yml b/test/integration/targets/cloudformation_stack_set/playbooks/full_test.yml
deleted file mode 100644
index 257e1e48a5..0000000000
--- a/test/integration/targets/cloudformation_stack_set/playbooks/full_test.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-- hosts: localhost
- connection: local
- environment: "{{ ansible_test.environment }}"
-
- roles:
- - ../../cloudformation_stack_set
diff --git a/test/integration/targets/cloudformation_stack_set/runme.sh b/test/integration/targets/cloudformation_stack_set/runme.sh
deleted file mode 100755
index d499c679b2..0000000000
--- a/test/integration/targets/cloudformation_stack_set/runme.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-
-# Run full test suite
-source virtualenv.sh
-pip install 'botocore>1.10.26' boto3
-ansible-playbook -i ../../inventory -v playbooks/full_test.yml "$@"
diff --git a/test/integration/targets/cloudformation_stack_set/tasks/main.yml b/test/integration/targets/cloudformation_stack_set/tasks/main.yml
deleted file mode 100644
index 1b11ce603d..0000000000
--- a/test/integration/targets/cloudformation_stack_set/tasks/main.yml
+++ /dev/null
@@ -1,186 +0,0 @@
----
-# tasks file for cloudformation_stack_set module tests
-# These tests require access to two separate AWS accounts
-
-- name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- aws_secondary_connection_info: &aws_secondary_connection_info
- aws_access_key: "{{ secondary_aws_access_key }}"
- aws_secret_key: "{{ secondary_aws_secret_key }}"
- security_token: "{{ secondary_security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
-- block:
- - name: Get current account ID
- aws_caller_info:
- <<: *aws_connection_info
- register: whoami
- - name: Get current account ID
- aws_caller_info:
- <<: *aws_secondary_connection_info
- register: target_acct
-
- - name: Policy to allow assuming stackset execution role
- iam_managed_policy:
- policy_name: AssumeCfnStackSetExecRole
- state: present
- <<: *aws_connection_info
- policy:
- Version: '2012-10-17'
- Statement:
- - Action: 'sts:AssumeRole'
- Effect: Allow
- Resource: arn:aws:iam::*:role/CfnStackSetExecRole
- policy_description: Assume CfnStackSetExecRole
-
- - name: Create an execution role for us to use
- iam_role:
- name: CfnStackSetExecRole
- <<: *aws_secondary_connection_info
- assume_role_policy_document:
- Version: '2012-10-17'
- Statement:
- - Action: 'sts:AssumeRole'
- Effect: Allow
- Principal:
- AWS: '{{ whoami.account }}'
- managed_policy:
- - arn:aws:iam::aws:policy/PowerUserAccess
-
- - name: Create an administration role for us to use
- iam_role:
- name: CfnStackSetAdminRole
- <<: *aws_connection_info
- assume_role_policy_document:
- Version: '2012-10-17'
- Statement:
- - Action: 'sts:AssumeRole'
- Effect: Allow
- Principal:
- Service: 'cloudformation.amazonaws.com'
- managed_policy:
- - arn:aws:iam::{{ whoami.account }}:policy/AssumeCfnStackSetExecRole
- #- arn:aws:iam::aws:policy/PowerUserAccess
-
- - name: Should fail without account/regions
- cloudformation_stack_set:
- <<: *aws_connection_info
- name: TestSetOne
- description: TestStack Prime
- tags:
- Some: Thing
- Type: Test
- wait: true
- template: test_bucket_stack.yml
- register: result
- ignore_errors: true
- - name: assert that running with no account fails
- assert:
- that:
- - result is failed
- - >
- "Can't create a stack set without choosing at least one account" in result.msg
- - name: Should fail without roles
- cloudformation_stack_set:
- <<: *aws_connection_info
- name: TestSetOne
- description: TestStack Prime
- tags:
- Some: Thing
- Type: Test
- wait: true
- regions:
- - '{{ aws_region }}'
- accounts:
- - '{{ whoami.account }}'
- template_body: '{{ lookup("file", "test_bucket_stack.yml") }}'
- register: result
- ignore_errors: true
- - name: assert that running with no account fails
- assert:
- that:
- - result is failed
-
- - name: Create an execution role for us to use
- iam_role:
- name: CfnStackSetExecRole
- state: absent
- <<: *aws_connection_info
- assume_role_policy_document:
- Version: '2012-10-17'
- Statement:
- - Action: 'sts:AssumeRole'
- Effect: Allow
- Principal:
- AWS: arn:aws:iam::{{ whoami.account }}:root
- managed_policy:
- - arn:aws:iam::aws:policy/PowerUserAccess
-
- - name: Create stack with roles
- cloudformation_stack_set:
- <<: *aws_connection_info
- name: TestSetTwo
- description: TestStack Dos
- tags:
- Some: Thing
- Type: Test
- wait: true
- regions:
- - '{{ aws_region }}'
- accounts:
- - '{{ target_acct.account }}'
- exec_role_name: CfnStackSetExecRole
- admin_role_arn: arn:aws:iam::{{ whoami.account }}:role/CfnStackSetAdminRole
- template_body: '{{ lookup("file", "test_bucket_stack.yml") }}'
- register: result
-
- - name: Update stack with roles
- cloudformation_stack_set:
- <<: *aws_connection_info
- name: TestSetTwo
- description: TestStack Dos
- tags:
- Some: Thing
- Type: Test
- wait: true
- regions:
- - '{{ aws_region }}'
- accounts:
- - '{{ target_acct.account }}'
- exec_role_name: CfnStackSetExecRole
- admin_role_arn: arn:aws:iam::{{ whoami.account }}:role/CfnStackSetAdminRole
- template_body: '{{ lookup("file", "test_modded_bucket_stack.yml") }}'
- always:
- - name: Clean up stack one
- cloudformation_stack_set:
- <<: *aws_connection_info
- name: TestSetOne
- wait: true
- regions:
- - '{{ aws_region }}'
- accounts:
- - '{{ whoami.account }}'
- purge_stacks: true
- state: absent
- - name: Clean up stack two
- cloudformation_stack_set:
- <<: *aws_connection_info
- name: TestSetTwo
- description: TestStack Dos
- purge_stacks: true
- tags:
- Some: Thing
- Type: Test
- wait: true
- regions:
- - '{{ aws_region }}'
- accounts:
- - '{{ target_acct.account }}'
- template_body: '{{ lookup("file", "test_bucket_stack.yml") }}'
- state: absent
diff --git a/test/integration/targets/cloudfront_distribution/aliases b/test/integration/targets/cloudfront_distribution/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/cloudfront_distribution/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/cloudfront_distribution/defaults/main.yml b/test/integration/targets/cloudfront_distribution/defaults/main.yml
deleted file mode 100644
index b88dbc244f..0000000000
--- a/test/integration/targets/cloudfront_distribution/defaults/main.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-cloudfront_hostname: "{{ resource_prefix }}01"
-
-# Use a domain that has a wildcard DNS
-# Using an alias requires also having an SSL cert...
-#cloudfront_alias: "{{ cloudfront_hostname }}.github.io"
-#cloudfront_viewer_cert:
-# acm_certificate_arn: ...
-# certificate: ...
-# certificate_source: ...
-# minimum_protocol_version: ...
-# ssl_support_method: ...
-
-cloudfront_test_cache_behaviors:
- - path_pattern: /test/path
- forwarded_values:
- headers:
- - Host
- - X-HTTP-Forwarded-For
- - CloudFront-Forwarded-Proto
- - Origin
- - Referer
- allowed_methods:
- items:
- - GET
- - HEAD
- - POST
- - PATCH
- - PUT
- - OPTIONS
- - DELETE
- cached_methods:
- - GET
- - HEAD
- - path_pattern: /another/path
- forwarded_values:
- cookies:
- forward: whitelist
- whitelisted_names:
- - my_header
- query_string: yes
- query_string_cache_keys:
- - whatever
- allowed_methods:
- items:
- - GET
- - HEAD
- cached_methods:
- - GET
- - HEAD
diff --git a/test/integration/targets/cloudfront_distribution/meta/main.yml b/test/integration/targets/cloudfront_distribution/meta/main.yml
deleted file mode 100644
index 32cf5dda7e..0000000000
--- a/test/integration/targets/cloudfront_distribution/meta/main.yml
+++ /dev/null
@@ -1 +0,0 @@
-dependencies: []
diff --git a/test/integration/targets/cloudfront_distribution/tasks/main.yml b/test/integration/targets/cloudfront_distribution/tasks/main.yml
deleted file mode 100644
index 9411a37a6e..0000000000
--- a/test/integration/targets/cloudfront_distribution/tasks/main.yml
+++ /dev/null
@@ -1,422 +0,0 @@
-- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- cloudfront_distribution:
- alias: "{{ cloudfront_alias | default(omit) }}"
- viewer_certificate: "{{ cloudfront_viewer_cert | default(omit) }}"
- block:
-
- - name: create cloudfront distribution using defaults
- cloudfront_distribution:
- origins:
- - domain_name: "{{ cloudfront_hostname }}-origin.example.com"
- id: "{{ cloudfront_hostname }}-origin.example.com"
- default_cache_behavior:
- target_origin_id: "{{ cloudfront_hostname }}-origin.example.com"
- state: present
- purge_origins: yes
- register: cf_distribution
-
- - set_fact:
- distribution_id: '{{ cf_distribution.id }}'
-
- - name: re-run cloudfront distribution with same defaults
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ cloudfront_hostname }}-origin.example.com"
- state: present
- register: cf_dist_no_update
-
- - name: ensure distribution was not updated
- assert:
- that:
- - not cf_dist_no_update.changed
-
- - name: re-run cloudfront distribution using distribution id
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- purge_origins: no
- state: present
- register: cf_dist_with_id
-
- - name: ensure distribution was not updated
- assert:
- that:
- - not cf_dist_with_id.changed
-
- - name: update origin http port
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ cloudfront_hostname }}-origin.example.com"
- custom_origin_config:
- http_port: 8080
- state: present
- register: update_origin_http_port
-
- - name: ensure http port was updated
- assert:
- that:
- - update_origin_http_port.changed
-
- - name: update restrictions
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- restrictions:
- geo_restriction:
- restriction_type: "whitelist"
- items:
- - "US"
- state: present
- register: update_restrictions
-
- - name: ensure restrictions was updated
- assert:
- that:
- - update_restrictions.changed
-
- - name: set a random comment
- set_fact:
- comment: "{{'ABCDEFabcdef123456'|shuffle|join }}"
-
- - name: update comment
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- comment: "{{ comment }}"
- state: present
- register: cf_comment
-
- - name: ensure comment was updated
- assert:
- that:
- - cf_comment.changed
- - 'cf_comment.comment == comment'
-
- - name: create second origin
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}2.example.com"
- id: "{{ resource_prefix }}2.example.com"
- default_root_object: index.html
- state: present
- wait: yes
- register: cf_add_origin
-
- - name: ensure origin was added
- assert:
- that:
- - cf_add_origin.origins.quantity == 2
- - cf_add_origin.changed
- - "cf_add_origin.default_root_object == 'index.html'"
-
- - name: re-run second origin
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ cloudfront_hostname }}-origin.example.com"
- custom_origin_config:
- http_port: 8080
- - domain_name: "{{ resource_prefix }}2.example.com"
- default_root_object: index.html
- wait: yes
- state: present
- register: cf_rerun_second_origin
-
- - name: ensure nothing changed after re-run
- assert:
- that:
- - cf_rerun_second_origin.origins.quantity == 2
- - not cf_rerun_second_origin.changed
-
- - name: run with origins in reverse order
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}2.example.com"
- - domain_name: "{{ cloudfront_hostname }}-origin.example.com"
- custom_origin_config:
- http_port: 8080
- state: present
- register: cf_rerun_second_origin_reversed
-
- - name: ensure nothing changed after reversed re-run
- assert:
- that:
- - cf_rerun_second_origin_reversed.origins.quantity == 2
- - not cf_rerun_second_origin_reversed.changed
-
-
- - name: purge first origin
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}2.example.com"
- default_cache_behavior:
- target_origin_id: "{{ resource_prefix }}2.example.com"
- purge_origins: yes
- state: present
- register: cf_purge_origin
-
- - name: ensure origin was removed
- assert:
- that:
- - cf_purge_origin.origins.quantity == 1
- - cf_purge_origin.changed
-
- - name: update default_root_object of existing distribution
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}2.example.com"
- default_root_object: index.php
- state: present
- register: cf_update_default_root_object
-
- - name: ensure origin was updated
- assert:
- that:
- - "cf_update_default_root_object.default_root_object == 'index.php'"
- - cf_update_default_root_object.changed
-
- - name: add tags to existing distribution
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}2.example.com"
- tags:
- ATag: tag1
- Another: tag
- default_root_object: index.php
- state: present
- register: cf_add_tags
-
- - name: ensure tags were added
- assert:
- that:
- - cf_add_tags.changed
- - cf_add_tags.tags|length == 2
-
- - name: delete distribution
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- enabled: no
- wait: yes
- state: absent
-
- - name: create distribution with tags
- cloudfront_distribution:
- origins:
- - domain_name: "{{ resource_prefix }}2.example.com"
- id: "{{ resource_prefix }}2.example.com"
- tags:
- ATag: tag1
- Another: tag
- state: present
- register: cf_second_distribution
-
- - set_fact:
- distribution_id: '{{ cf_second_distribution.id }}'
-
- - name: ensure tags were set on creation
- assert:
- that:
- - cf_second_distribution.changed
- - cf_second_distribution.tags|length == 2
- - "'ATag' in cf_second_distribution.tags"
- - "'Another' in cf_second_distribution.tags"
-
- - name: re-run create distribution with same tags and purge_tags
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}2.example.com"
- id: "{{ resource_prefix }}2.example.com"
- tags:
- ATag: tag1
- Another: tag
- purge_tags: yes
- state: present
- register: rerun_with_purge_tags
-
- - name: ensure that re-running didn't change
- assert:
- that:
- - not rerun_with_purge_tags.changed
- - rerun_with_purge_tags.tags|length == 2
-
- - name: add new tag to distribution
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}2.example.com"
- tags:
- Third: thing
- purge_tags: no
- state: present
- register: update_with_new_tag
-
- - name: ensure tags are correct
- assert:
- that:
- - update_with_new_tag.changed
- - "'Third' in update_with_new_tag.tags"
- - "'Another' in update_with_new_tag.tags"
- - "'Atag' in update_with_new_tag.tags"
- - update_with_new_tag.tags|length == 3
-
- - name: create some cache behaviors
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}2.example.com"
- cache_behaviors: "{{ cloudfront_test_cache_behaviors }}"
- state: present
- register: add_cache_behaviors
-
- - name: reverse some cache behaviors
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}2.example.com"
- cache_behaviors: "{{ cloudfront_test_cache_behaviors|reverse|list }}"
- state: present
- register: reverse_cache_behaviors
-
- - name: check that reversing cache behaviors changes nothing when purge_cache_behaviors unset
- assert:
- that:
- - not reverse_cache_behaviors.changed
- - reverse_cache_behaviors.cache_behaviors|length == 2
-
- - name: reverse some cache behaviors properly
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}2.example.com"
- cache_behaviors: "{{ cloudfront_test_cache_behaviors|reverse|list }}"
- purge_cache_behaviors: yes
- state: present
- register: reverse_cache_behaviors_with_purge
-
- - name: check that reversing cache behaviors changes nothing when purge_cache_behaviors unset
- assert:
- that:
- - reverse_cache_behaviors_with_purge.changed
- - reverse_cache_behaviors_with_purge.cache_behaviors|length == 2
-
- - name: update origin that changes target id (failure expected)
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}3.example.com"
- id: "{{ resource_prefix }}3.example.com"
- purge_origins: yes
- state: present
- register: remove_origin_in_use
- ignore_errors: yes
-
- - name: check that removing in use origin fails
- assert:
- that:
- - remove_origin_in_use.failed
-
- # FIXME: This currently fails due to AWS side problems
- # not clear whether to hope they fix or prevent this issue from happening
- #- name: update origin and update cache behavior to point to new origin
- # cloudfront_distribution:
- # origins:
- # - domain_name: "{{ resource_prefix }}3.example.com"
- # id: "{{ resource_prefix }}3.example.com"
- # cache_behaviors:
- # - path_pattern: /test/path
- # target_origin_id: "{{ resource_prefix }}3.example.com"
- # - path_pattern: /another/path
- # target_origin_id: "{{ resource_prefix }}3.example.com"
- # state: present
- # aws_access_key: "{{ aws_access_key|default(omit) }}"
- # aws_secret_key: "{{ aws_secret_key|default(omit) }}"
- # security_token: "{{ security_token|default(omit) }}"
- # profile: "{{ profile|default(omit) }}"
- # register: update_cache_behaviors in use
-
- - name: create an s3 bucket for next test
- # note that although public-read allows reads that we want to stop with origin_access_identity,
- # we also need to test without origin_access_identity and it's hard to change bucket perms later
- aws_s3:
- bucket: "{{ resource_prefix }}-bucket"
- mode: create
-
- - name: update origin to point to the s3 bucket
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}-bucket.s3.amazonaws.com"
- id: "{{ resource_prefix }}3.example.com"
- s3_origin_access_identity_enabled: yes
- state: present
- register: update_origin_to_s3
-
- - name: check that s3 origin access is in result
- assert:
- that:
- - item.s3_origin_config.origin_access_identity.startswith('origin-access-identity/cloudfront/')
- when: "'s3_origin_config' in item"
- loop: "{{ update_origin_to_s3.origins['items'] }}"
-
- - name: update origin to remove s3 origin access identity
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}-bucket.s3.amazonaws.com"
- id: "{{ resource_prefix }}3.example.com"
- s3_origin_access_identity_enabled: no
- state: present
- register: update_origin_to_s3_without_origin_access
-
- - name: check that s3 origin access is not in result
- assert:
- that:
- - not item.s3_origin_config.origin_access_identity
- when: "'s3_origin_config' in item"
- loop: "{{ update_origin_to_s3_without_origin_access.origins['items'] }}"
-
- - name: delete the s3 bucket
- aws_s3:
- bucket: "{{ resource_prefix }}-bucket"
- mode: delete
-
- - name: check that custom_origin_config can't be used with origin_access_identity enabled
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- origins:
- - domain_name: "{{ resource_prefix }}-bucket.s3.amazonaws.com"
- id: "{{ resource_prefix }}3.example.com"
- s3_origin_access_identity_enabled: yes
- custom_origin_config:
- origin_protocol_policy: 'http-only'
- state: present
- register: update_origin_to_s3_with_origin_access_and_with_custom_origin_config
- ignore_errors: True
-
- - name: check that custom origin with origin access identity fails
- assert:
- that:
- - update_origin_to_s3_with_origin_access_and_with_custom_origin_config.failed
-
- always:
- # TEARDOWN STARTS HERE
- - name: delete the s3 bucket
- aws_s3:
- bucket: "{{ resource_prefix }}-bucket"
- mode: delete
-
- - name: clean up cloudfront distribution
- cloudfront_distribution:
- distribution_id: "{{ distribution_id }}"
- enabled: no
- wait: yes
- state: absent
diff --git a/test/integration/targets/cloudtrail/aliases b/test/integration/targets/cloudtrail/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/cloudtrail/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/cloudtrail/defaults/main.yml b/test/integration/targets/cloudtrail/defaults/main.yml
deleted file mode 100644
index 7338e364da..0000000000
--- a/test/integration/targets/cloudtrail/defaults/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-cloudtrail_name: '{{ resource_prefix }}-cloudtrail'
-s3_bucket_name: '{{ resource_prefix }}-cloudtrail-bucket'
-kms_alias: '{{ resource_prefix }}-cloudtrail'
-sns_topic: '{{ resource_prefix }}-cloudtrail-notifications'
-cloudtrail_prefix: 'test-prefix'
-cloudwatch_log_group: '{{ resource_prefix }}-cloudtrail'
-cloudwatch_role: '{{ resource_prefix }}-cloudtrail'
diff --git a/test/integration/targets/cloudtrail/tasks/main.yml b/test/integration/targets/cloudtrail/tasks/main.yml
deleted file mode 100644
index 9806d9093f..0000000000
--- a/test/integration/targets/cloudtrail/tasks/main.yml
+++ /dev/null
@@ -1,1423 +0,0 @@
----
-# General Tests:
-# - s3_bucket_name required when state is 'present'
-# - Creation / Deletion
-# - Enable/Disable logging
-# - Enable/Disable log file validation option
-# - Manipulation of Global Event logging option
-# - Manipulation of Multi-Region logging option
-# - Manipulation of S3 bucket option
-# - Manipulation of Encryption option
-# - Manipulation of SNS options
-# - Manipulation of CloudWatch Log group options
-# - Manipulation of Tags
-#
-# Notes:
-# - results include the updates, even when check_mode is true
-# - Poor handling of disable global + enable multi-region
-# botocore.errorfactory.InvalidParameterCombinationException: An error
-# occurred (InvalidParameterCombinationException) when calling the
-# UpdateTrail operation: Multi-Region trail must include global service
-# events.
-# - Using blank string for KMS ID doesn't remove encryption
-# - Using blank string for SNS Topic doesn't remove it
-# - Using blank string for CloudWatch Log Group / Role doesn't remove them
-#
-# Possible Bugs:
-# - output.exists == false when creating
-# - Changed reports true when using a KMS alias
-# - Tags Keys are being lower-cased
-
-- module_defaults:
- group/aws:
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token | default(omit) }}'
- region: '{{ aws_region }}'
- # Add this as a default because we (almost) always need it
- cloudtrail:
- s3_bucket_name: '{{ s3_bucket_name }}'
- block:
-
- # ============================================================
- # Argument Tests
- # ============================================================
- - name: 'S3 Bucket required when state is "present"'
- module_defaults: { cloudtrail: {} }
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- ignore_errors: yes
- - assert:
- that:
- - output is failed
- - '"s3_bucket_name" in output.msg'
-
- - name: 'CloudWatch cloudwatch_logs_log_group_arn required when cloudwatch_logs_role_arn passed'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- cloudwatch_logs_role_arn: 'SomeValue'
- register: output
- ignore_errors: yes
- - assert:
- that:
- - output is failed
- - '"parameters are required together" in output.msg'
- - '"cloudwatch_logs_log_group_arn" in output.msg'
-
- - name: 'CloudWatch cloudwatch_logs_role_arn required when cloudwatch_logs_log_group_arn passed'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- cloudwatch_logs_log_group_arn: 'SomeValue'
- register: output
- ignore_errors: yes
- - assert:
- that:
- - output is failed
- - '"parameters are required together" in output.msg'
- - '"cloudwatch_logs_role_arn" in output.msg'
-
- #- name: 'Global Logging must be enabled when enabling Multi-region'
- # cloudtrail:
- # state: present
- # name: '{{ cloudtrail_name }}'
- # include_global_events: no
- # is_multi_region_trail: yes
- # register: output
- # ignore_errors: yes
- #- assert:
- # that:
- # - output is failed
-
- # ============================================================
- # Preparation
- # ============================================================
- - name: 'Retrieve caller facts'
- aws_caller_info: {}
- register: aws_caller_info
-
- - name: 'Create S3 bucket'
- vars:
- bucket_name: '{{ s3_bucket_name }}'
- s3_bucket:
- state: present
- name: '{{ bucket_name }}'
- policy: '{{ lookup("template", "s3-policy.j2") }}'
- - name: 'Create second S3 bucket'
- vars:
- bucket_name: '{{ s3_bucket_name }}-2'
- s3_bucket:
- state: present
- name: '{{ bucket_name }}'
- policy: '{{ lookup("template", "s3-policy.j2") }}'
-
- - name: 'Create SNS Topic'
- vars:
- sns_topic_name: '{{ sns_topic }}'
- sns_topic:
- state: present
- name: '{{ sns_topic_name }}'
- display_name: 'Used for testing SNS/CloudWatch integration'
- policy: "{{ lookup('template', 'sns-policy.j2') | to_json }}"
- register: output_sns_topic
- - name: 'Create second SNS Topic'
- vars:
- sns_topic_name: '{{ sns_topic }}-2'
- sns_topic:
- state: present
- name: '{{ sns_topic_name }}'
- display_name: 'Used for testing SNS/CloudWatch integration'
- policy: "{{ lookup('template', 'sns-policy.j2') | to_json }}"
-
- - name: 'Create KMS Key'
- aws_kms:
- state: present
- alias: '{{ kms_alias }}'
- enabled: yes
- policy: "{{ lookup('template', 'kms-policy.j2') | to_json }}"
- register: kms_key
- - name: 'Create second KMS Key'
- aws_kms:
- state: present
- alias: '{{ kms_alias }}-2'
- enabled: yes
- policy: "{{ lookup('template', 'kms-policy.j2') | to_json }}"
- register: kms_key2
-
- - name: 'Create CloudWatch IAM Role'
- iam_role:
- state: present
- name: '{{ cloudwatch_role }}'
- assume_role_policy_document: "{{ lookup('template', 'cloudwatch-assume-policy.j2') }}"
- register: output_cloudwatch_role
- - name: 'Create CloudWatch Log Group'
- cloudwatchlogs_log_group:
- state: present
- log_group_name: '{{ cloudwatch_log_group }}'
- retention: 1
- register: output_cloudwatch_log_group
- - name: 'Create second CloudWatch Log Group'
- cloudwatchlogs_log_group:
- state: present
- log_group_name: '{{ cloudwatch_log_group }}-2'
- retention: 1
- register: output_cloudwatch_log_group2
- - name: 'Add inline policy to CloudWatch Role'
- iam_policy:
- state: present
- iam_type: role
- iam_name: '{{ cloudwatch_role }}'
- policy_name: 'CloudWatch'
- policy_json: "{{ lookup('template', 'cloudwatch-policy.j2') | to_json }}"
-
- # ============================================================
- # Tests
- # ============================================================
-
- - name: 'Create a trail (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Create a trail'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- - assert:
- that:
- - output is changed
- # XXX This appears to be a bug...
- #- output.exists == True
- - output.trail.name == cloudtrail_name
-
- - name: 'No-op update to trail'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.exists == True
- # Check everything is what we expect before we start making changes
- - output.trail.name == cloudtrail_name
- - output.trail.home_region == aws_region
- - output.trail.include_global_service_events == True
- - output.trail.is_multi_region_trail == False
- - output.trail.is_logging == True
- - output.trail.log_file_validation_enabled == False
- - output.trail.s3_bucket_name == s3_bucket_name
- - output.trail.s3_key_prefix is none
- - output.trail.kms_key_id is none
- - output.trail.sns_topic_arn is none
- - output.trail.sns_topic_name is none
- - output.trail.tags | length == 0
-
- # ============================================================
-
- - name: 'Set S3 prefix (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_key_prefix: '{{ cloudtrail_prefix }}'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Set S3 prefix'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_key_prefix: '{{ cloudtrail_prefix }}'
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.s3_key_prefix == cloudtrail_prefix
-
- - name: 'Set S3 prefix (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_key_prefix: '{{ cloudtrail_prefix }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.s3_key_prefix == cloudtrail_prefix
-
- - name: 'No-op update to trail'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.s3_key_prefix == cloudtrail_prefix
-
- - name: 'Update S3 prefix (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_key_prefix: '{{ cloudtrail_prefix }}-2'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Update S3 prefix'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_key_prefix: '{{ cloudtrail_prefix }}-2'
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - 'output.trail.s3_key_prefix == "{{ cloudtrail_prefix }}-2"'
-
- - name: 'Update S3 prefix (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_key_prefix: '{{ cloudtrail_prefix }}-2'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - 'output.trail.s3_key_prefix == "{{ cloudtrail_prefix }}-2"'
-
- - name: 'Remove S3 prefix (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_key_prefix: '/'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Remove S3 prefix'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_key_prefix: '/'
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.s3_key_prefix is none
-
- - name: 'Remove S3 prefix (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_key_prefix: '/'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.s3_key_prefix is none
-
- # ============================================================
-
- - name: 'Add Tag (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- tags:
- tag1: Value1
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Add Tag'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- tags:
- tag1: Value1
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.tags | length == 1
- - '("tag1" in output.trail.tags) and (output.trail.tags["tag1"] == "Value1")'
-
- - name: 'Add Tag (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- tags:
- tag1: Value1
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.tags | length == 1
- - '("tag1" in output.trail.tags) and (output.trail.tags["tag1"] == "Value1")'
-
- - name: 'Change tags (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- tags:
- tag2: Value2
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Change tags'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- tags:
- tag2: Value2
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.tags | length == 1
- - '("tag2" in output.trail.tags) and (output.trail.tags["tag2"] == "Value2")'
-
- - name: 'Change tags (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- tags:
- tag2: Value2
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.tags | length == 1
- - '("tag2" in output.trail.tags) and (output.trail.tags["tag2"] == "Value2")'
-
- - name: 'Change tags (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- tags:
- tag2: Value2
- Tag3: Value3
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Change tags'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- tags:
- tag2: Value2
- Tag3: Value3
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.tags | length == 2
- - '("tag2" in output.trail.tags) and (output.trail.tags["tag2"] == "Value2")'
- #- '("Tag3" in output.trail.tags) and (output.trail.tags["Tag3"] == "Value3")'
-
- - name: 'Change tags (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- tags:
- tag2: Value2
- Tag3: Value3
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.tags | length == 2
- - '("tag2" in output.trail.tags) and (output.trail.tags["tag2"] == "Value2")'
- #- '("Tag3" in output.trail.tags) and (output.trail.tags["Tag3"] == "Value3")'
-
- - name: 'Remove tags (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Remove tags'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.tags | length == 0
-
- - name: 'Remove tags (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.tags | length == 0
-
- # ============================================================
-
- - name: 'Set SNS Topic (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- sns_topic_name: '{{ sns_topic }}'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Set SNS Topic'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- sns_topic_name: '{{ sns_topic }}'
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.sns_topic_name == sns_topic
-
- - name: 'Set SNS Topic (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- sns_topic_name: '{{ sns_topic }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.sns_topic_name == sns_topic
-
- - name: 'No-op update to trail'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.sns_topic_name == sns_topic
-
- - name: 'Update SNS Topic (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- sns_topic_name: '{{ sns_topic }}-2'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Update SNS Topic'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- sns_topic_name: '{{ sns_topic }}-2'
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - 'output.trail.sns_topic_name == "{{ sns_topic }}-2"'
-
- - name: 'Update SNS Topic (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- sns_topic_name: '{{ sns_topic }}-2'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - 'output.trail.sns_topic_name == "{{ sns_topic }}-2"'
-
- #- name: 'Remove SNS Topic (CHECK MODE)'
- # cloudtrail:
- # state: present
- # name: '{{ cloudtrail_name }}'
- # sns_topic_name: ''
- # register: output
- # check_mode: yes
- #- assert:
- # that:
- # - output is changed
-
- #- name: 'Remove SNS Topic'
- # cloudtrail:
- # state: present
- # name: '{{ cloudtrail_name }}'
- # sns_topic_name: ''
- # register: output
- #- assert:
- # that:
- # - output is changed
- # - output.trail.name == cloudtrail_name
- # - output.trail.sns_topic_name is none
-
- #- name: 'Remove SNS Topic (no change)'
- # cloudtrail:
- # state: present
- # name: '{{ cloudtrail_name }}'
- # sns_topic_name: ''
- # register: output
- #- assert:
- # that:
- # - output is not changed
- # - output.trail.name == cloudtrail_name
- # - output.trail.sns_topic_name is none
-
-
- # ============================================================
-
- - name: 'Set CloudWatch Log Group (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
- cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Set CloudWatch Log Group'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
- cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
- - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
-
- - name: 'Set CloudWatch Log Group (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
- cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
- - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
-
- - name: 'No-op update to trail'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn
- - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
-
- - name: 'Update CloudWatch Log Group (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}'
- cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
- - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
- - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
-
- - name: 'Update CloudWatch Log Group'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}'
- cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
- - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
-
- - name: 'Update CloudWatch Log Group (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}'
- cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn
- - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn
-
- #- name: 'Remove CloudWatch Log Group (CHECK MODE)'
- # cloudtrail:
- # state: present
- # name: '{{ cloudtrail_name }}'
- # cloudwatch_logs_log_group_arn: ''
- # cloudwatch_logs_role_arn: ''
- # register: output
- # check_mode: yes
- #- assert:
- # that:
- # - output is changed
- # - output.trail.name == cloudtrail_name
- # - output.trail.cloud_watch_logs_log_group_arn is none
- # - output.trail.cloud_watch_logs_role_arn is none
-
- #- name: 'Remove CloudWatch Log Group'
- # cloudtrail:
- # state: present
- # name: '{{ cloudtrail_name }}'
- # cloudwatch_logs_log_group_arn: ''
- # cloudwatch_logs_role_arn: ''
- # register: output
- #- assert:
- # that:
- # - output is changed
- # - output.trail.name == cloudtrail_name
- # - output.trail.cloud_watch_logs_log_group_arn is none
- # - output.trail.cloud_watch_logs_role_arn is none
-
- #- name: 'Remove CloudWatch Log Group (no change)'
- # cloudtrail:
- # state: present
- # name: '{{ cloudtrail_name }}'
- # cloudwatch_logs_log_group_arn: ''
- # cloudwatch_logs_role_arn: ''
- # register: output
- #- assert:
- # that:
- # - output is not changed
- # - output.trail.name == cloudtrail_name
- # - output.trail.cloud_watch_logs_log_group_arn is none
- # - output.trail.cloud_watch_logs_role_arn is none
-
- # ============================================================
-
- - name: 'Update S3 bucket (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_bucket_name: '{{ s3_bucket_name }}-2'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Update S3 bucket'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_bucket_name: '{{ s3_bucket_name }}-2'
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - 'output.trail.s3_bucket_name == "{{ s3_bucket_name }}-2"'
-
- - name: 'Update S3 bucket (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_bucket_name: '{{ s3_bucket_name }}-2'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - 'output.trail.s3_bucket_name == "{{ s3_bucket_name }}-2"'
-
- - name: 'Reset S3 bucket'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- - assert:
- that:
- - output.trail.name == cloudtrail_name
- - output.trail.s3_bucket_name == s3_bucket_name
-
- # ============================================================
-
- - name: 'Disable logging (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- enable_logging: no
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Disable logging'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- enable_logging: no
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.is_logging == False
-
- - name: 'Disable logging (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- enable_logging: no
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.is_logging == False
-
- # Ansible Documentation lists logging as explicitly defaulting to enabled
-
- - name: 'Enable logging (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- enable_logging: yes
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Enable logging'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- enable_logging: yes
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.is_logging == True
-
- - name: 'Enable logging (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- enable_logging: yes
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.is_logging == True
-
- # ============================================================
-
- - name: 'Disable global logging (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- include_global_events: no
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Disable global logging'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- include_global_events: no
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.include_global_service_events == False
-
- - name: 'Disable global logging (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- include_global_events: no
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.include_global_service_events == False
-
- # Ansible Documentation lists Global-logging as explicitly defaulting to enabled
-
- - name: 'Enable global logging (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- include_global_events: yes
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Enable global logging'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- include_global_events: yes
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.include_global_service_events == True
-
- - name: 'Enable global logging (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- include_global_events: yes
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.include_global_service_events == True
-
- # ============================================================
-
- - name: 'Enable multi-region logging (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- is_multi_region_trail: yes
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Enable multi-region logging'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- is_multi_region_trail: yes
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.is_multi_region_trail == True
-
- - name: 'Enable multi-region logging (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- is_multi_region_trail: yes
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.is_multi_region_trail == True
-
- # Ansible Documentation lists Multi-Region-logging as explicitly defaulting to disabled
-
- - name: 'Disable multi-region logging (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- is_multi_region_trail: no
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Disable multi-region logging'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- is_multi_region_trail: no
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.is_multi_region_trail == False
-
- - name: 'Disable multi-region logging (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- is_multi_region_trail: no
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.is_multi_region_trail == False
-
- # ============================================================
-
- - name: 'Enable logfile validation (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- enable_log_file_validation: yes
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Enable logfile validation'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- enable_log_file_validation: yes
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.log_file_validation_enabled == True
-
- - name: 'Enable logfile validation (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- enable_log_file_validation: yes
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.log_file_validation_enabled == True
-
- - name: 'No-op update to trail'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.log_file_validation_enabled == True
-
- - name: 'Disable logfile validation (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- enable_log_file_validation: no
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Disable logfile validation'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- enable_log_file_validation: no
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.name == cloudtrail_name
- - output.trail.log_file_validation_enabled == False
-
- - name: 'Disable logfile validation (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- enable_log_file_validation: no
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.name == cloudtrail_name
- - output.trail.log_file_validation_enabled == False
-
- # ============================================================
-
- - name: 'Enable logging encryption (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- kms_key_id: '{{ kms_key.key_arn }}'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Enable logging encryption'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- kms_key_id: '{{ kms_key.key_arn }}'
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.kms_key_id == kms_key.key_arn
-
- - name: 'Enable logging encryption (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- kms_key_id: '{{ kms_key.key_arn }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.kms_key_id == kms_key.key_arn
-
- - name: 'No-op update to trail'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.kms_key_id == kms_key.key_arn
-
- - name: 'Update logging encryption key (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- kms_key_id: '{{ kms_key2.key_arn }}'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Update logging encryption key'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- kms_key_id: '{{ kms_key2.key_arn }}'
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.kms_key_id == kms_key2.key_arn
-
- - name: 'Update logging encryption key (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- kms_key_id: '{{ kms_key2.key_arn }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.trail.kms_key_id == kms_key2.key_arn
-
- - name: 'Update logging encryption to alias (CHECK MODE)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- kms_key_id: 'alias/{{ kms_alias }}'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Update logging encryption to alias'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- kms_key_id: 'alias/{{ kms_alias }}'
- register: output
- - assert:
- that:
- - output is changed
- - output.trail.kms_key_id == kms_key.key_arn
-
- - name: 'Update logging encryption to alias (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- kms_key_id: 'alias/{{ kms_alias }}'
- register: output
- - assert:
- that:
- # - output is not changed
- - output.trail.kms_key_id == kms_key.key_arn
-
- #- name: 'Disable logging encryption (CHECK MODE)'
- # cloudtrail:
- # state: present
- # name: '{{ cloudtrail_name }}'
- # kms_key_id: ''
- # register: output
- # check_mode: yes
- #- assert:
- # that:
- # - output is changed
-
- #- name: 'Disable logging encryption'
- # cloudtrail:
- # state: present
- # name: '{{ cloudtrail_name }}'
- # kms_key_id: ''
- # register: output
- #- assert:
- # that:
- # - output.trail.kms_key_id == None
- # - output is changed
-
- #- name: 'Disable logging encryption (no change)'
- # cloudtrail:
- # state: present
- # name: '{{ cloudtrail_name }}'
- # kms_key_id: ''
- # register: output
- #- assert:
- # that:
- # - output.kms_key_id == None
- # - output is not changed
-
- # ============================================================
-
- - name: 'Delete a trail without providing bucket_name (CHECK MODE)'
- module_defaults: { cloudtrail: {} }
- cloudtrail:
- state: absent
- name: '{{ cloudtrail_name }}'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Delete a trail while providing bucket_name (CHECK MODE)'
- cloudtrail:
- state: absent
- name: '{{ cloudtrail_name }}'
- register: output
- check_mode: yes
- - assert:
- that:
- - output is changed
-
- - name: 'Delete a trail'
- cloudtrail:
- state: absent
- name: '{{ cloudtrail_name }}'
- register: output
- - assert:
- that:
- - output is changed
- - output.exists == False
-
- - name: 'Delete a non-existent trail'
- cloudtrail:
- state: absent
- name: '{{ cloudtrail_name }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.exists == False
-
- # ============================================================
-
- - name: 'Test creation of a complex Trail (all features)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_key_prefix: '{{ cloudtrail_prefix }}'
- sns_topic_name: '{{ sns_topic }}'
- cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
- cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
- is_multi_region_trail: yes
- include_global_events: yes
- enable_log_file_validation: yes
- kms_key_id: '{{ kms_key.key_arn }}'
- register: output
- - assert:
- that:
- - output is changed
- #- output.exists == True
- - output.trail.name == cloudtrail_name
- - output.trail.home_region == aws_region
- - output.trail.include_global_service_events == True
- - output.trail.is_multi_region_trail == True
- - output.trail.is_logging == True
- - output.trail.log_file_validation_enabled == True
- - output.trail.s3_bucket_name == s3_bucket_name
- - output.trail.s3_key_prefix == cloudtrail_prefix
- - output.trail.kms_key_id == kms_key.key_arn
- - output.trail.sns_topic_arn == output_sns_topic.sns_arn
- - output.trail.sns_topic_name == sns_topic
- - output.trail.tags | length == 0
-
- - name: 'Test creation of a complex Trail (no change)'
- cloudtrail:
- state: present
- name: '{{ cloudtrail_name }}'
- s3_key_prefix: '{{ cloudtrail_prefix }}'
- sns_topic_name: '{{ sns_topic }}'
- cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}'
- cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}'
- is_multi_region_trail: yes
- include_global_events: yes
- enable_log_file_validation: yes
- kms_key_id: '{{ kms_key.key_arn }}'
- register: output
- - assert:
- that:
- - output is not changed
- - output.exists == True
- - output.trail.name == cloudtrail_name
- - output.trail.home_region == aws_region
- - output.trail.include_global_service_events == True
- - output.trail.is_multi_region_trail == True
- - output.trail.is_logging == True
- - output.trail.log_file_validation_enabled == True
- - output.trail.s3_bucket_name == s3_bucket_name
- - output.trail.s3_key_prefix == cloudtrail_prefix
- - output.trail.kms_key_id == kms_key.key_arn
- - output.trail.sns_topic_arn == output_sns_topic.sns_arn
- - output.trail.sns_topic_name == sns_topic
- - output.trail.tags | length == 0
-
- always:
- # ============================================================
- # Cleanup
- # ============================================================
- - name: 'Delete test trail'
- cloudtrail:
- state: absent
- name: '{{ cloudtrail_name }}'
- ignore_errors: yes
- - name: 'Delete S3 bucket'
- s3_bucket:
- state: absent
- name: '{{ s3_bucket_name }}'
- force: yes
- ignore_errors: yes
- - name: 'Delete second S3 bucket'
- s3_bucket:
- state: absent
- name: '{{ s3_bucket_name }}-2'
- force: yes
- ignore_errors: yes
- - name: 'Delete KMS Key'
- aws_kms:
- state: absent
- alias: '{{ kms_alias }}'
- ignore_errors: yes
- - name: 'Delete second KMS Key'
- aws_kms:
- state: absent
- alias: '{{ kms_alias }}-2'
- ignore_errors: yes
- - name: 'Delete SNS Topic'
- sns_topic:
- state: absent
- name: '{{ sns_topic }}'
- ignore_errors: yes
- - name: 'Delete second SNS Topic'
- sns_topic:
- state: absent
- name: '{{ sns_topic }}-2'
- ignore_errors: yes
- - name: 'Delete CloudWatch Log Group'
- cloudwatchlogs_log_group:
- state: absent
- log_group_name: '{{ cloudwatch_log_group }}'
- ignore_errors: yes
- - name: 'Delete second CloudWatch Log Group'
- cloudwatchlogs_log_group:
- state: absent
- log_group_name: '{{ cloudwatch_log_group }}-2'
- ignore_errors: yes
- - name: 'Remove inline policy to CloudWatch Role'
- iam_policy:
- state: absent
- iam_type: role
- iam_name: '{{ cloudwatch_role }}'
- policy_name: 'CloudWatch'
- ignore_errors: yes
- - name: 'Delete CloudWatch IAM Role'
- iam_role:
- state: absent
- name: '{{ cloudwatch_role }}'
- ignore_errors: yes
diff --git a/test/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2 b/test/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2
deleted file mode 100644
index 6d7fb7b889..0000000000
--- a/test/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "AssumeFromCloudTrails",
- "Effect": "Allow",
- "Principal": {
- "Service": "cloudtrail.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/cloudtrail/templates/cloudwatch-policy.j2 b/test/integration/targets/cloudtrail/templates/cloudwatch-policy.j2
deleted file mode 100644
index 8f354a7028..0000000000
--- a/test/integration/targets/cloudtrail/templates/cloudwatch-policy.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "CloudTrail2CloudWatch",
- "Effect": "Allow",
- "Action": [
- "logs:CreateLogStream",
- "logs:PutLogEvents"
- ],
- "Resource": [
- "arn:aws:logs:{{ aws_region }}:{{ aws_caller_info.account }}:log-group:{{ cloudwatch_log_group }}:log-stream:*",
- "arn:aws:logs:{{ aws_region }}:{{ aws_caller_info.account }}:log-group:{{ cloudwatch_log_group }}-2:log-stream:*"
- ]
- }
- ]
-}
diff --git a/test/integration/targets/cloudtrail/templates/kms-policy.j2 b/test/integration/targets/cloudtrail/templates/kms-policy.j2
deleted file mode 100644
index 35730f1d2f..0000000000
--- a/test/integration/targets/cloudtrail/templates/kms-policy.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-{
- "Version": "2012-10-17",
- "Id": "CloudTrailPolicy",
- "Statement": [
- {
- "Sid": "EncryptLogs",
- "Effect": "Allow",
- "Principal": { "Service": "cloudtrail.amazonaws.com" },
- "Action": "kms:GenerateDataKey*",
- "Resource": "*",
- "Condition": {
- "StringLike": {
- "kms:EncryptionContext:aws:cloudtrail:arn": [
- "arn:aws:cloudtrail:*:{{ aws_caller_info.account }}:trail/{{ resource_prefix }}*"
- ]
- }
- }
- },
- {
- "Sid": "DescribeKey",
- "Effect": "Allow",
- "Principal": { "Service": "cloudtrail.amazonaws.com" },
- "Action": "kms:DescribeKey",
- "Resource": "*"
- },
- {
- "Sid": "AnsibleTestManage",
- "Effect": "Allow",
- "Principal": { "AWS": "{{ aws_caller_info.arn }}" },
- "Action": "*",
- "Resource": "*"
- }
- ]
-}
diff --git a/test/integration/targets/cloudtrail/templates/s3-policy.j2 b/test/integration/targets/cloudtrail/templates/s3-policy.j2
deleted file mode 100644
index 78c056e30b..0000000000
--- a/test/integration/targets/cloudtrail/templates/s3-policy.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "CloudTrailCheckAcl",
- "Effect": "Allow",
- "Principal": { "Service": "cloudtrail.amazonaws.com" },
- "Action": "s3:GetBucketAcl",
- "Resource": "arn:aws:s3:::{{ bucket_name }}",
- },
- {
- "Sid": "CloudTrailWriteLogs",
- "Effect": "Allow",
- "Principal": { "Service": "cloudtrail.amazonaws.com" },
- "Action": "s3:PutObject",
- "Resource": [
- "arn:aws:s3:::{{ bucket_name }}/AWSLogs/{{ aws_caller_info.account }}/*",
- "arn:aws:s3:::{{ bucket_name }}/{{ cloudtrail_prefix }}*/AWSLogs/{{ aws_caller_info.account }}/*"
- ],
- "Condition": {
- "StringEquals": {
- "s3:x-amz-acl": "bucket-owner-full-control"
- }
- }
- },
- {
- "Sid": "AnsibleTestManage",
- "Effect": "Allow",
- "Principal": { "AWS": "{{ aws_caller_info.arn }}" },
- "Action": "*",
- "Resource": "arn:aws:s3:::{{ bucket_name }}"
- }
- ]
-}
diff --git a/test/integration/targets/cloudtrail/templates/sns-policy.j2 b/test/integration/targets/cloudtrail/templates/sns-policy.j2
deleted file mode 100644
index 3c267b8004..0000000000
--- a/test/integration/targets/cloudtrail/templates/sns-policy.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-{
- "Version": "2008-10-17",
- "Id": "AnsibleSNSTesting",
- "Statement": [
- {
- "Sid": "CloudTrailSNSPolicy",
- "Effect": "Allow",
- "Principal": {
- "Service": "cloudtrail.amazonaws.com"
- },
- "Action": "sns:Publish",
- "Resource": "arn:aws:sns:{{ aws_region }}:{{ aws_caller_info.account }}:{{ sns_topic_name }}"
- },
- {
- "Sid": "AnsibleTestManage",
- "Effect": "Allow",
- "Principal": {
- "AWS": "{{ aws_caller_info.arn }}"
- },
- "Action": [
- "sns:Subscribe",
- "sns:ListSubscriptionsByTopic",
- "sns:DeleteTopic",
- "sns:GetTopicAttributes",
- "sns:Publish",
- "sns:RemovePermission",
- "sns:AddPermission",
- "sns:Receive",
- "sns:SetTopicAttributes"
- ],
- "Resource": "arn:aws:sns:{{ aws_region }}:{{ aws_caller_info.account }}:{{ sns_topic_name }}"
- }
- ]
-}
diff --git a/test/integration/targets/cloudwatchlogs/aliases b/test/integration/targets/cloudwatchlogs/aliases
deleted file mode 100644
index 500ca9a8ac..0000000000
--- a/test/integration/targets/cloudwatchlogs/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/aws
-shippable/aws/group1
-cloudwatchlogs_log_group
-cloudwatchlogs_log_group_metric_filter
diff --git a/test/integration/targets/cloudwatchlogs/defaults/main.yml b/test/integration/targets/cloudwatchlogs/defaults/main.yml
deleted file mode 100644
index c6db709f32..0000000000
--- a/test/integration/targets/cloudwatchlogs/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-log_group_name: '{{ resource_prefix }}/integrationtest'
-filter_name: '{{ resource_prefix }}/AnsibleTest' \ No newline at end of file
diff --git a/test/integration/targets/cloudwatchlogs/tasks/main.yml b/test/integration/targets/cloudwatchlogs/tasks/main.yml
deleted file mode 100644
index a36c664342..0000000000
--- a/test/integration/targets/cloudwatchlogs/tasks/main.yml
+++ /dev/null
@@ -1,157 +0,0 @@
----
-
-- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
-
- block:
- - name: create cloudwatch log group for integration test
- cloudwatchlogs_log_group:
- state: present
- log_group_name: '{{ log_group_name }}'
- retention: 1
-
- - name: check_mode set metric filter on '{{ log_group_name }}'
- cloudwatchlogs_log_group_metric_filter:
- log_group_name: '{{ log_group_name }}'
- filter_name: '{{ filter_name }}'
- filter_pattern: '{ ($.value = *) && ($.hostname = "box")}'
- state: present
- metric_transformation:
- metric_name: box_free_space
- metric_namespace: fluentd_metrics
- metric_value: "$.value"
- check_mode: yes
- register: out
-
- - name: check_mode state must be changed
- assert:
- that:
- - out is changed
- - out.metric_filters | count == 1
-
- - name: set metric filter on '{{ log_group_name }}'
- cloudwatchlogs_log_group_metric_filter:
- log_group_name: '{{ log_group_name }}'
- filter_name: '{{ filter_name }}'
- filter_pattern: '{ ($.value = *) && ($.hostname = "box")}'
- state: present
- metric_transformation:
- metric_name: box_free_space
- metric_namespace: fluentd_metrics
- metric_value: "$.value"
- register: out
-
- - name: create metric filter
- assert:
- that:
- - out is changed
- - out.metric_filters | count == 1
-
- - name: re-set metric filter on '{{ log_group_name }}'
- cloudwatchlogs_log_group_metric_filter:
- log_group_name: '{{ log_group_name }}'
- filter_name: '{{ filter_name }}'
- filter_pattern: '{ ($.value = *) && ($.hostname = "box")}'
- state: present
- metric_transformation:
- metric_name: box_free_space
- metric_namespace: fluentd_metrics
- metric_value: "$.value"
- register: out
-
- - name: metric filter must not change
- assert:
- that:
- - out is not changed
-
- - name: update metric transformation on '{{ log_group_name }}'
- cloudwatchlogs_log_group_metric_filter:
- log_group_name: '{{ log_group_name }}'
- filter_name: '{{ filter_name }}'
- filter_pattern: '{ ($.value = *) && ($.hostname = "box")}'
- state: present
- metric_transformation:
- metric_name: box_free_space
- metric_namespace: made_with_ansible
- metric_value: "$.value"
- default_value: 3.1415
- register: out
-
- - name: update metric filter
- assert:
- that:
- - out is changed
- - out.metric_filters[0].metric_namespace == "made_with_ansible"
- - out.metric_filters[0].default_value == 3.1415
-
- - name: update filter_pattern on '{{ log_group_name }}'
- cloudwatchlogs_log_group_metric_filter:
- log_group_name: '{{ log_group_name }}'
- filter_name: '{{ filter_name }}'
- filter_pattern: '{ ($.value = *) && ($.hostname = "ansible")}'
- state: present
- metric_transformation:
- metric_name: box_free_space
- metric_namespace: made_with_ansible
- metric_value: "$.value"
- register: out
-
- - name: update metric filter
- assert:
- that:
- - out is changed
- - out.metric_filters[0].metric_namespace == "made_with_ansible"
-
- - name: checkmode delete metric filter on '{{ log_group_name }}'
- cloudwatchlogs_log_group_metric_filter:
- log_group_name: '{{ log_group_name }}'
- filter_name: '{{ filter_name }}'
- state: absent
- check_mode: yes
- register: out
-
- - name: check_mode state must be changed
- assert:
- that:
- - out is changed
-
- - name: delete metric filter on '{{ log_group_name }}'
- cloudwatchlogs_log_group_metric_filter:
- log_group_name: '{{ log_group_name }}'
- filter_name: '{{ filter_name }}'
- state: absent
- register: out
-
- - name: delete metric filter
- assert:
- that:
- - out is changed
-
- - name: delete metric filter on '{{ log_group_name }}' which does not exist
- cloudwatchlogs_log_group_metric_filter:
- log_group_name: '{{ log_group_name }}'
- filter_name: '{{ filter_name }}'
- state: absent
- register: out
-
- - name: delete metric filter
- assert:
- that:
- - out is not changed
-
- always:
- - name: delete metric filter
- cloudwatchlogs_log_group_metric_filter:
- log_group_name: '{{ log_group_name }}'
- filter_name: '{{ filter_name }}'
- state: absent
-
- - name: delete cloudwatch log group for integration test
- cloudwatchlogs_log_group:
- state: absent
- log_group_name: '{{ log_group_name }}'
- ignore_errors: true
diff --git a/test/integration/targets/connection_aws_ssm/aliases b/test/integration/targets/connection_aws_ssm/aliases
deleted file mode 100644
index 0031909d6d..0000000000
--- a/test/integration/targets/connection_aws_ssm/aliases
+++ /dev/null
@@ -1,7 +0,0 @@
-cloud/aws
-destructive
-shippable/aws/group4
-non_local
-needs/root
-needs/target/connection
-disabled
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup.yml b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup.yml
deleted file mode 100644
index 7cd735b9a1..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-- hosts: localhost
- roles:
- - role: aws_ssm_integration_test_setup
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/README.md b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/README.md
deleted file mode 100644
index bc12a83e1d..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# AWS SSM Integration Test Setup
-
-## aws_ssm_integration_test_setup_teardown
-
-An Ansible role was created to perform integration test across aws_ssm connection plugin. The role performs the following actions.
-
-- Create AWS Resources in user specified region.
-- Perform integration Test across aws_ssm connection plugin.
-- TearDown/Remove AWS Resources that are created for testing plugin.
-
-### Prerequisites
-
-- Make sure the machine used for testing already has Ansible repo with ssm connection plugin.
-- AWS CLI/IAM-Role configured to the machine which has permissions to spin-up AWS resources.
-
-### Variables referred in Ansible Role
-
-The following table provide details about variables referred within Ansible Role.
-
-| Variable Name | Details |
-| ------ | ------ |
-| aws_region | Name of AWS-region |
-| iam_role_name | Name of IAM Role which will be attached to newly-created EC2-Instance |
-| iam_policy_name | Name of IAM Policy which will be attached to the IAM role referred above |
-| instance_type | Instance type user for creating EC2-Instance |
-| instance_id | AWS EC2 Instance-Id (This gets populated by role) |
-| bucket_name | Name of S3 buckted used by SSM (This gets populated by role) |
-
-### Example Playbook
-
-A sample example to demonstrate the usage of role within Ansible-playbook.(Make sure the respective variables are passed as parameters.)
-
-```yaml
- - hosts: localhost
- roles:
- - aws_ssm_integration_test_setup_teardown
-```
-
-#### Author's Information
-
-Krishna Nand Choudhary (krishnanandchoudhary)
-Nikhil Araga (araganik)
-Gaurav Ashtikar (gau1991)
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/defaults/main.yml b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/defaults/main.yml
deleted file mode 100644
index d6e025594f..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/defaults/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-instance_type: t2.micro
-linux_ami_name: amzn-ami-hvm-2018.03.0.20190611-x86_64-ebs
-windows_ami_name: Windows_Server-2019-English-Full-Base-2019.11.13
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/files/ec2-trust-policy.json b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/files/ec2-trust-policy.json
deleted file mode 100644
index 63d22eaecd..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/files/ec2-trust-policy.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "Version": "2008-10-17",
- "Statement": [
- {
- "Sid": "",
- "Effect": "Allow",
- "Principal": {
- "Service": "ec2.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
- }
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/debian.yml b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/debian.yml
deleted file mode 100644
index b75f3ec30a..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/debian.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Download SSM plugin
- get_url:
- url: https://s3.amazonaws.com/session-manager-downloads/plugin/latest/ubuntu_64bit/session-manager-plugin.deb
- dest: /tmp/session-manager-plugin.deb
- mode: 0440
- tags: setup_infra
-
-# We are not install deb package here, as deb package has systemd service which fails during the installation
-# on containerized env. As we will only session-manager-plugin executable, we are extracting and copying deb file.
-- name: Extract SSM plugin Deb File
- shell: ar x session-manager-plugin.deb
- args:
- chdir: /tmp
- tags: setup_infra
-
-- name: Extract SSM Plugin Control File
- shell: tar -zxvf data.tar.gz -C /
- args:
- chdir: /tmp
- tags: setup_infra
-
-- name: Check the SSM Plugin
- shell: /usr/local/sessionmanagerplugin/bin/session-manager-plugin --version
- tags: setup_infra
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/main.yml b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/main.yml
deleted file mode 100644
index dae7e27747..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/main.yml
+++ /dev/null
@@ -1,156 +0,0 @@
----
-## Task file for setup/teardown AWS resources for aws_ssm integration testing
-- block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{aws_access_key}}"
- aws_secret_key: "{{aws_secret_key}}"
- security_token: "{{security_token}}"
- region: "{{aws_region}}"
- no_log: yes
-
- - name: AMI Lookup
- ec2_ami_info:
- owners: 'amazon'
- filters:
- name: '{{ item }}'
- <<: *aws_connection_info
- register: ec2_amis
- loop:
- - '{{ linux_ami_name }}'
- - '{{ windows_ami_name }}'
-
- - name: Set facts with latest AMIs
- vars:
- latest_linux_ami: '{{ ec2_amis.results[0].images | sort(attribute="creation_date") | last }}'
- latest_windows_ami: '{{ ec2_amis.results[1].images | sort(attribute="creation_date") | last }}'
- set_fact:
- linux_ami_id: '{{ latest_linux_ami.image_id }}'
- windows_ami_id: '{{ latest_windows_ami.image_id }}'
-
- - name: Install Session Manager Plugin for Debian/Ubuntu
- include_tasks: debian.yml
- when: ansible_distribution == "Ubuntu" or ansible_distribution == "Debian"
- register: install_plugin_debian
-
- - name: Install Session Manager Plugin for RedHat/Amazon
- include_tasks: redhat.yml
- when: ansible_distribution == "CentOS" or ansible_distribution == "RedHat" or ansible_distribution == "Amazon"
- register: install_plugin_redhat
-
- - name: Fail if the plugin was not installed
- fail:
- msg: The distribution does not contain the required Session Manager Plugin
- when:
- - install_plugin_debian is skipped
- - install_plugin_redhat is skipped
-
- - name: Install Boto3
- pip:
- name: boto3
-
- - name: Install Boto
- pip:
- name: boto
-
- - name: Ensure IAM instance role exists
- iam_role:
- name: "ansible-test-{{resource_prefix}}-aws-ssm-role"
- assume_role_policy_document: "{{ lookup('file','ec2-trust-policy.json') }}"
- state: present
- create_instance_profile: yes
- managed_policy:
- - AmazonEC2RoleforSSM
- <<: *aws_connection_info
- register: role_output
-
- - name: Create S3 bucket
- s3_bucket:
- name: "{{resource_prefix}}-aws-ssm-s3"
- <<: *aws_connection_info
- register: s3_output
-
- - name: Wait for IAM Role getting created
- pause:
- seconds: 10
-
- - name: Create Linux EC2 instance
- ec2:
- instance_type: "{{instance_type}}"
- image: "{{linux_ami_id}}"
- wait: "yes"
- count: 1
- instance_profile_name: "{{role_output.iam_role.role_name}}"
- instance_tags:
- Name: "{{resource_prefix}}-integration-test-aws-ssm-linux"
- user_data: |
- #!/bin/sh
- sudo systemctl start amazon-ssm-agent
- state: present
- <<: *aws_connection_info
- register: linux_output
-
- - name: Create Windows EC2 instance
- ec2:
- instance_type: "{{instance_type}}"
- image: "{{windows_ami_id}}"
- wait: "yes"
- count: 1
- instance_profile_name: "{{role_output.iam_role.role_name}}"
- instance_tags:
- Name: "{{resource_prefix}}-integration-test-aws-ssm-windows"
- user_data: |
- <powershell>
- Invoke-WebRequest -Uri "https://amazon-ssm-us-east-1.s3.amazonaws.com/latest/windows_amd64/AmazonSSMAgentSetup.exe" -OutFile "C:\AmazonSSMAgentSetup.exe"
- Start-Process -FilePath C:\AmazonSSMAgentSetup.exe -ArgumentList "/S","/v","/qn" -Wait
- Restart-Service AmazonSSMAgent
- </powershell>
- state: present
- <<: *aws_connection_info
- register: windows_output
-
- - name: Wait for EC2 to be available
- wait_for_connection:
- delay: 300
-
- - name: Create Inventory file for Linux host
- template:
- dest: "{{playbook_dir}}/inventory-linux.aws_ssm"
- src: inventory-linux.aws_ssm.j2
-
- - name: Create Inventory file for Windows host
- template:
- dest: "{{playbook_dir}}/inventory-windows.aws_ssm"
- src: inventory-windows.aws_ssm.j2
-
- - name: Create AWS Keys Environement
- template:
- dest: "{{playbook_dir}}/aws-env-vars.sh"
- src: aws-env-vars.j2
- no_log: yes
-
- always:
- - name: Create EC2 Linux vars_to_delete.yml
- template:
- dest: "{{playbook_dir}}/ec2_linux_vars_to_delete.yml"
- src: ec2_linux_vars_to_delete.yml.j2
- ignore_errors: yes
-
- - name: Create EC2 Windows vars_to_delete.yml
- template:
- dest: "{{playbook_dir}}/ec2_windows_vars_to_delete.yml"
- src: ec2_windows_vars_to_delete.yml.j2
- ignore_errors: yes
-
- - name: Create S3 vars_to_delete.yml
- template:
- dest: "{{playbook_dir}}/s3_vars_to_delete.yml"
- src: s3_vars_to_delete.yml.j2
- ignore_errors: yes
-
- - name: Create IAM Role vars_to_delete.yml
- template:
- dest: "{{playbook_dir}}/iam_role_vars_to_delete.yml"
- src: iam_role_vars_to_delete.yml.j2
- ignore_errors: yes
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/redhat.yml b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/redhat.yml
deleted file mode 100644
index 6bf73a02b7..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/tasks/redhat.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Download SSM plugin
- get_url:
- url: https://s3.amazonaws.com/session-manager-downloads/plugin/latest/linux_64bit/session-manager-plugin.rpm
- dest: /tmp/session-manager-plugin.rpm
- mode: 0440
- tags: setup_infra
-
-- name: Install SSM Plugin
- yum:
- name: /tmp/session-manager-plugin.rpm
- state: present
- tags: setup_infra
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/aws-env-vars.j2 b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/aws-env-vars.j2
deleted file mode 100644
index 1e3821ad84..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/aws-env-vars.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-export AWS_ACCESS_KEY_ID={{aws_access_key}}
-export AWS_SECRET_ACCESS_KEY={{aws_secret_key}}
-export AWS_SECURITY_TOKEN={{security_token}}
-export AWS_REGION={{aws_region}}
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_linux_vars_to_delete.yml.j2 b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_linux_vars_to_delete.yml.j2
deleted file mode 100644
index 8af1e3b514..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_linux_vars_to_delete.yml.j2
+++ /dev/null
@@ -1,2 +0,0 @@
----
-linux_instance_id: {{linux_output.instance_ids[0]}}
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_windows_vars_to_delete.yml.j2 b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_windows_vars_to_delete.yml.j2
deleted file mode 100644
index d216f37225..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/ec2_windows_vars_to_delete.yml.j2
+++ /dev/null
@@ -1,2 +0,0 @@
----
-windows_instance_id: {{windows_output.instance_ids[0]}}
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/iam_role_vars_to_delete.yml.j2 b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/iam_role_vars_to_delete.yml.j2
deleted file mode 100644
index 0d87d3ed6f..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/iam_role_vars_to_delete.yml.j2
+++ /dev/null
@@ -1,2 +0,0 @@
----
-iam_role_name: {{role_output.iam_role.role_name}}
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-linux.aws_ssm.j2 b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-linux.aws_ssm.j2
deleted file mode 100644
index 7e97e5f830..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-linux.aws_ssm.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-[aws_ssm]
-{{linux_output.instance_ids[0]}} ansible_aws_ssm_instance_id={{linux_output.instance_ids[0]}} ansible_aws_ssm_region={{aws_region}}
-
-[aws_ssm:vars]
-ansible_connection=aws_ssm
-ansible_aws_ssm_bucket_name={{s3_output.name}}
-ansible_aws_ssm_plugin=/usr/local/sessionmanagerplugin/bin/session-manager-plugin
-ansible_python_interpreter=/usr/bin/env python
-
-# support tests that target testhost
-[testhost:children]
-aws_ssm
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-windows.aws_ssm.j2 b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-windows.aws_ssm.j2
deleted file mode 100644
index 0b6a28c8a9..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/inventory-windows.aws_ssm.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-[aws_ssm]
-{{windows_output.instance_ids[0]}} ansible_aws_ssm_instance_id={{windows_output.instance_ids[0]}} ansible_aws_ssm_region={{aws_region}}
-
-[aws_ssm:vars]
-ansible_shell_type=powershell
-ansible_connection=aws_ssm
-ansible_aws_ssm_bucket_name={{s3_output.name}}
-ansible_aws_ssm_plugin=/usr/local/sessionmanagerplugin/bin/session-manager-plugin
-
-# support tests that target testhost
-[testhost:children]
-aws_ssm
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/s3_vars_to_delete.yml.j2 b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/s3_vars_to_delete.yml.j2
deleted file mode 100644
index 3839fb3c6e..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_setup/templates/s3_vars_to_delete.yml.j2
+++ /dev/null
@@ -1,2 +0,0 @@
----
-bucket_name: {{s3_output.name}}
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown.yml b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown.yml
deleted file mode 100644
index 13c62c1f90..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-- hosts: localhost
- roles:
- - role: aws_ssm_integration_test_teardown
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/README.md b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/README.md
deleted file mode 100644
index bc12a83e1d..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# AWS SSM Integration Test Setup
-
-## aws_ssm_integration_test_setup_teardown
-
-An Ansible role was created to perform integration test across aws_ssm connection plugin. The role performs the following actions.
-
-- Create AWS Resources in user specified region.
-- Perform integration Test across aws_ssm connection plugin.
-- TearDown/Remove AWS Resources that are created for testing plugin.
-
-### Prerequisites
-
-- Make sure the machine used for testing already has Ansible repo with ssm connection plugin.
-- AWS CLI/IAM-Role configured to the machine which has permissions to spin-up AWS resources.
-
-### Variables referred in Ansible Role
-
-The following table provide details about variables referred within Ansible Role.
-
-| Variable Name | Details |
-| ------ | ------ |
-| aws_region | Name of AWS-region |
-| iam_role_name | Name of IAM Role which will be attached to newly-created EC2-Instance |
-| iam_policy_name | Name of IAM Policy which will be attached to the IAM role referred above |
-| instance_type | Instance type user for creating EC2-Instance |
-| instance_id | AWS EC2 Instance-Id (This gets populated by role) |
-| bucket_name | Name of S3 buckted used by SSM (This gets populated by role) |
-
-### Example Playbook
-
-A sample example to demonstrate the usage of role within Ansible-playbook.(Make sure the respective variables are passed as parameters.)
-
-```yaml
- - hosts: localhost
- roles:
- - aws_ssm_integration_test_setup_teardown
-```
-
-#### Author's Information
-
-Krishna Nand Choudhary (krishnanandchoudhary)
-Nikhil Araga (araganik)
-Gaurav Ashtikar (gau1991)
diff --git a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/tasks/main.yml b/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/tasks/main.yml
deleted file mode 100644
index 7993733bbd..0000000000
--- a/test/integration/targets/connection_aws_ssm/aws_ssm_integration_test_teardown/tasks/main.yml
+++ /dev/null
@@ -1,85 +0,0 @@
----
-- name: Set up AWS connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{aws_access_key}}"
- aws_secret_key: "{{aws_secret_key}}"
- region: "{{aws_region}}"
- security_token: "{{security_token}}"
- no_log: true
-
-- name: Check if ec2_linux_vars_to_delete.yml is present
- stat:
- path: "{{playbook_dir}}/ec2_linux_vars_to_delete.yml"
- register: ec2_linux_vars_file
-
-- name: Include variable file to delete EC2 Linux infra
- include_vars: "{{playbook_dir}}/ec2_linux_vars_to_delete.yml"
- when: ec2_linux_vars_file.stat.exists == true
-
-- name: Check if ec2_windows_vars_to_delete.yml is present
- stat:
- path: "{{playbook_dir}}/ec2_windows_vars_to_delete.yml"
- register: ec2_windows_vars_file
-
-- name: Include variable file to delete EC2 Windows infra
- include_vars: "{{playbook_dir}}/ec2_windows_vars_to_delete.yml"
- when: ec2_windows_vars_file.stat.exists == true
-
-- name: Check if s3_vars_to_delete.yml is present
- stat:
- path: "{{playbook_dir}}/s3_vars_to_delete.yml"
- register: s3_vars_file
-
-- name: Include variable file to delete S3 Infra infra
- include_vars: "{{playbook_dir}}/s3_vars_to_delete.yml"
- when: s3_vars_file.stat.exists == true
-
-- name: Check if iam_role_vars_to_delete.yml is present
- stat:
- path: "{{playbook_dir}}/iam_role_vars_to_delete.yml"
- register: iam_role_vars_file
-
-- name: Include variable file to delete IAM Role infra
- include_vars: "{{playbook_dir}}/iam_role_vars_to_delete.yml"
- when: iam_role_vars_file.stat.exists == true
-
-- name: Terminate Windows EC2 instances that were previously launched
- ec2:
- instance_ids:
- - "{{windows_instance_id}}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
- when: ec2_windows_vars_file.stat.exists == true
-
-- name: Terminate Linux EC2 instances that were previously launched
- ec2:
- instance_ids:
- - "{{linux_instance_id}}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
- when: ec2_linux_vars_file.stat.exists == true
-
-- name: Delete S3 bucket
- aws_s3:
- bucket: "{{bucket_name}}"
- mode: delete
- <<: *aws_connection_info
- ignore_errors: yes
- when: s3_vars_file.stat.exists == true
-
-- name: Delete IAM role
- iam_role:
- name: "{{iam_role_name}}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
- when: iam_role_vars_file.stat.exists == true
-
-- name: Delete AWS keys environement
- file:
- path: "{{playbook_dir}}/aws-env-vars.sh"
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/connection_aws_ssm/inventory.aws_ssm.template b/test/integration/targets/connection_aws_ssm/inventory.aws_ssm.template
deleted file mode 100644
index afbee1aeec..0000000000
--- a/test/integration/targets/connection_aws_ssm/inventory.aws_ssm.template
+++ /dev/null
@@ -1,10 +0,0 @@
-[aws_ssm]
-@NAME ansible_aws_ssm_instance_id=@HOST ansible_aws_ssm_region=@AWS_REGION
-
-[aws_ssm:vars]
-ansible_connection=aws_ssm
-ansible_aws_ssm_bucket_name=@S3_BUCKET
-
-# support tests that target testhost
-[testhost:children]
-aws_ssm
diff --git a/test/integration/targets/connection_aws_ssm/runme.sh b/test/integration/targets/connection_aws_ssm/runme.sh
deleted file mode 100755
index 1d9b38733d..0000000000
--- a/test/integration/targets/connection_aws_ssm/runme.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-
-CMD_ARGS=("$@")
-
-# Destroy Environment
-cleanup() {
-
- cd ../connection_aws_ssm
-
- ansible-playbook -c local aws_ssm_integration_test_teardown.yml "${CMD_ARGS[@]}"
-
-}
-
-trap "cleanup" EXIT
-
-# Setup Environment
-ansible-playbook -c local aws_ssm_integration_test_setup.yml "$@"
-
-# Export the AWS Keys
-set +x
-. ./aws-env-vars.sh
-set -x
-
-cd ../connection
-
-# Execute Integration tests for Linux
-INVENTORY=../connection_aws_ssm/inventory-linux.aws_ssm ./test.sh \
- -e target_hosts=aws_ssm \
- -e local_tmp=/tmp/ansible-local \
- -e remote_tmp=/tmp/ansible-remote \
- -e action_prefix= \
- "$@"
-
-# Execute Integration tests for Windows
-INVENTORY=../connection_aws_ssm/inventory-windows.aws_ssm ./test.sh \
- -e target_hosts=aws_ssm \
- -e local_tmp=/tmp/ansible-local \
- -e remote_tmp=c:/windows/temp/ansible-remote \
- -e action_prefix=win_ \
- "$@"
diff --git a/test/integration/targets/dms_endpoint/aliases b/test/integration/targets/dms_endpoint/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/dms_endpoint/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/dms_endpoint/tasks/main.yml b/test/integration/targets/dms_endpoint/tasks/main.yml
deleted file mode 100644
index 8aee0fb829..0000000000
--- a/test/integration/targets/dms_endpoint/tasks/main.yml
+++ /dev/null
@@ -1,136 +0,0 @@
----
-
-- name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- region: "{{ aws_region }}"
- dms_identifier: "{{ resource_prefix }}-dms"
- no_log: yes
-
-- block:
- - name: create endpoints
- dms_endpoint:
- state: present
- endpointidentifier: "{{ dms_identifier }}"
- endpointtype: source
- enginename: aurora
- username: testing
- password: testint1234
- servername: "{{ resource_prefix }}.exampledomain.com"
- port: 3306
- databasename: 'testdb'
- sslmode: none
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is changed
- - result is not failed
-
- - name: create endpoints no change
- dms_endpoint:
- state: present
- endpointidentifier: "{{ dms_identifier }}"
- endpointtype: source
- enginename: aurora
- username: testing
- password: testint1234
- servername: "{{ resource_prefix }}.exampledomain.com"
- port: 3306
- databasename: 'testdb'
- sslmode: none
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is not changed
- - result is not failed
-
- - name: update endpoints
- dms_endpoint:
- state: present
- endpointidentifier: "{{ dms_identifier }}"
- endpointtype: source
- enginename: aurora
- username: testing
- password: testint1234
- servername: "{{ resource_prefix }}.exampledomain.com"
- port: 3306
- databasename: 'testdb2'
- sslmode: none
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is changed
- - result is not failed
-
- - name: update endpoints no change
- dms_endpoint:
- state: present
- endpointidentifier: "{{ dms_identifier }}"
- endpointtype: source
- enginename: aurora
- username: testing
- password: testint1234
- servername: "{{ resource_prefix }}.exampledomain.com"
- port: 3306
- databasename: 'testdb2'
- sslmode: none
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is not changed
- - result is not failed
-
- always:
- - name: delete endpoints
- dms_endpoint:
- state: absent
- endpointidentifier: "{{ dms_identifier }}"
- endpointtype: source
- enginename: aurora
- username: testing
- password: testint1234
- servername: "{{ resource_prefix }}.exampledomain.com"
- port: 3306
- databasename: 'testdb'
- sslmode: none
- wait: True
- timeout: 60
- retries: 10
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is changed
- - result is not failed
-
- - name: delete endpoints no change
- dms_endpoint:
- state: absent
- endpointidentifier: "{{ dms_identifier }}"
- endpointtype: source
- enginename: aurora
- username: testing
- password: testint1234
- servername: "{{ resource_prefix }}.exampledomain.com"
- port: 3306
- databasename: 'testdb'
- sslmode: none
- wait: False
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is not changed
- - result is not failed \ No newline at end of file
diff --git a/test/integration/targets/dms_replication_subnet_group/aliases b/test/integration/targets/dms_replication_subnet_group/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/dms_replication_subnet_group/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/dms_replication_subnet_group/defaults/main.yml b/test/integration/targets/dms_replication_subnet_group/defaults/main.yml
deleted file mode 100644
index feed0f4ccf..0000000000
--- a/test/integration/targets/dms_replication_subnet_group/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-resource_prefix: "test_dms_sg"
-dms_role_role_name: dms-vpc-role \ No newline at end of file
diff --git a/test/integration/targets/dms_replication_subnet_group/files/dmsAssumeRolePolicyDocument.json b/test/integration/targets/dms_replication_subnet_group/files/dmsAssumeRolePolicyDocument.json
deleted file mode 100644
index 69ee87eeab..0000000000
--- a/test/integration/targets/dms_replication_subnet_group/files/dmsAssumeRolePolicyDocument.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "Service": "dms.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-} \ No newline at end of file
diff --git a/test/integration/targets/dms_replication_subnet_group/tasks/main.yml b/test/integration/targets/dms_replication_subnet_group/tasks/main.yml
deleted file mode 100644
index 16c7ddf551..0000000000
--- a/test/integration/targets/dms_replication_subnet_group/tasks/main.yml
+++ /dev/null
@@ -1,175 +0,0 @@
----
-
-- name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- region: "{{ aws_region }}"
- dms_sg_identifier: "{{ resource_prefix }}-dms"
- no_log: yes
-
-- block:
-
- - name: ensure IAM role exists
- iam_role:
- <<: *aws_connection_info
- name: "{{ dms_role_role_name }}"
- assume_role_policy_document: "{{ lookup('file','dmsAssumeRolePolicyDocument.json') }}"
- state: present
- create_instance_profile: no
- managed_policy:
- - 'arn:aws:iam::aws:policy/service-role/AmazonDMSVPCManagementRole'
- register: iam_role_output
- ignore_errors: yes
-
- - name: Create VPC for use in testing
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- cidr_block: 10.22.32.0/23
- tags:
- Name: Ansible ec2_instance Testing VPC
- tenancy: default
- <<: *aws_connection_info
- register: testing_vpc
-
- - name: create subnet1
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.22.32.16/28
- az: eu-west-1a
- <<: *aws_connection_info
- register: subnet1
-
- - name: create subnet2
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.22.32.32/28
- az: eu-west-1c
- <<: *aws_connection_info
- register: subnet2
-
- - name: create replication subnet group
- dms_replication_subnet_group:
- state: present
- identifier: "{{ dms_sg_identifier }}"
- description: "Development Subnet Group"
- subnet_ids: [ "{{ subnet1.subnet.id }}", "{{ subnet2.subnet.id }}"]
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is changed
- - result is not failed
-
- - name: create subnet group no change
- dms_replication_subnet_group:
- state: present
- identifier: "{{ dms_sg_identifier }}"
- description: "Development Subnet Group"
- subnet_ids: [ "{{ subnet1.subnet.id }}", "{{ subnet2.subnet.id }}"]
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is not changed
- - result is not failed
-
- - name: update subnet group
- dms_replication_subnet_group:
- state: present
- identifier: "{{ dms_sg_identifier }}"
- description: "Development Subnet Group updated"
- subnet_ids: [ "{{ subnet1.subnet.id }}", "{{ subnet2.subnet.id }}"]
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is changed
- - result is not failed
-
- - name: update subnet group no change
- dms_replication_subnet_group:
- state: present
- identifier: "{{ dms_sg_identifier }}"
- description: "Development Subnet Group updated"
- subnet_ids: [ "{{ subnet1.subnet.id }}", "{{ subnet2.subnet.id }}"]
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is not changed
- - result is not failed
-
- always:
- - name: delete subnet group no change
- dms_replication_subnet_group:
- state: absent
- identifier: "{{ dms_sg_identifier }}"
- description: "Development Subnet Group updated"
- subnet_ids: [ "{{ subnet1.subnet.id }}", "{{ subnet2.subnet.id }}"]
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is changed
- - result is not failed
-
- - name: delete subnet group no change
- dms_replication_subnet_group:
- state: absent
- identifier: "{{ dms_sg_identifier }}"
- description: "Development Subnet Group updated"
- subnet_ids: [ "{{ subnet1.subnet.id }}", "{{ subnet2.subnet.id }}"]
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result is not changed
- - result is not failed
-
- - name: delete subnet1
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.22.32.16/28
- az: eu-west-1a
- <<: *aws_connection_info
-
- - name: delete subnet2
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.22.32.32/28
- az: eu-west-1c
- <<: *aws_connection_info
-
- - name: delete VPC for use in testing
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- cidr_block: 10.22.32.0/23
- tags:
- Name: Ansible ec2_instance Testing VPC
- tenancy: default
- state: absent
- <<: *aws_connection_info
-
- - name: delete dms-vpc role
- iam_role:
- <<: *aws_connection_info
- name: "{{ dms_role_role_name }}"
- assume_role_policy_document: "{{ lookup('file','dmsAssumeRolePolicyDocument.json') }}"
- state: absent
- create_instance_profile: no
- managed_policy:
- - 'arn:aws:iam::aws:policy/service-role/AmazonDMSVPCManagementRole'
- register: iam_role_output
- ignore_errors: yes \ No newline at end of file
diff --git a/test/integration/targets/ec2_asg/aliases b/test/integration/targets/ec2_asg/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/ec2_asg/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/ec2_asg/defaults/main.yml b/test/integration/targets/ec2_asg/defaults/main.yml
deleted file mode 100644
index 80bf25cd00..0000000000
--- a/test/integration/targets/ec2_asg/defaults/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# defaults file for ec2_asg
-# Amazon Linux 2 AMI 2019.06.12 (HVM), GP2 Volume Type
-ec2_ami_name: 'amzn2-ami-hvm-2.0.20190612-x86_64-gp2'
diff --git a/test/integration/targets/ec2_asg/tasks/main.yml b/test/integration/targets/ec2_asg/tasks/main.yml
deleted file mode 100644
index c921ca3c54..0000000000
--- a/test/integration/targets/ec2_asg/tasks/main.yml
+++ /dev/null
@@ -1,782 +0,0 @@
----
-# tasks file for test_ec2_asg
-
-- name: Test incomplete credentials with ec2_asg
-
- block:
-
- # ============================================================
-
- - name: test invalid profile
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- region: "{{ aws_region }}"
- profile: notavalidprofile
- ignore_errors: yes
- register: result
-
- - name:
- assert:
- that:
- - "'The config profile (notavalidprofile) could not be found' in result.msg"
-
- - name: test partial credentials
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- region: "{{ aws_region }}"
- aws_access_key: "{{ aws_access_key }}"
- ignore_errors: yes
- register: result
-
- - name:
- assert:
- that:
- - "'Partial credentials found in explicit, missing: aws_secret_access_key' in result.msg"
-
- - name: test without specifying region
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- ignore_errors: yes
- register: result
-
- - name:
- assert:
- that:
- - result.msg == 'The ec2_asg module requires a region and none was found in configuration, environment variables or module parameters'
-
- # ============================================================
-
-- name: Test incomplete arguments with ec2_asg
-
- block:
-
- # ============================================================
-
- - name: test without specifying required module options
- ec2_asg:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- ignore_errors: yes
- register: result
-
- - name: assert name is a required module option
- assert:
- that:
- - "result.msg == 'missing required arguments: name'"
-
-- name: Run ec2_asg integration tests.
-
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
-
- block:
-
- # ============================================================
-
- - name: Find AMI to use
- ec2_ami_info:
- owners: 'amazon'
- filters:
- name: '{{ ec2_ami_name }}'
- register: ec2_amis
- - set_fact:
- ec2_ami_image: '{{ ec2_amis.images[0].image_id }}'
-
- - name: load balancer name has to be less than 32 characters
- # the 8 digit identifier at the end of resource_prefix helps determine during which test something
- # was created
- set_fact:
- load_balancer_name: "{{ item }}-lb"
- loop: "{{ resource_prefix | regex_findall('.{8}$') }}"
-
- # Set up the testing dependencies: VPC, subnet, security group, and two launch configurations
-
- - name: Create VPC for use in testing
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- cidr_block: 10.55.77.0/24
- tenancy: default
- register: testing_vpc
-
- - name: Create internet gateway for use in testing
- ec2_vpc_igw:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- state: present
- register: igw
-
- - name: Create subnet for use in testing
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.55.77.0/24
- az: "{{ aws_region }}a"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet"
- register: testing_subnet
-
- - name: create routing rules
- ec2_vpc_route_table:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- tags:
- created: "{{ resource_prefix }}-route"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- subnets:
- - "{{ testing_subnet.subnet.id }}"
-
- - name: create a security group with the vpc created in the ec2_setup
- ec2_group:
- name: "{{ resource_prefix }}-sg"
- description: a security group for ansible tests
- vpc_id: "{{ testing_vpc.vpc.id }}"
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- register: sg
-
- - name: ensure launch configs exist
- ec2_lc:
- name: "{{ item }}"
- assign_public_ip: true
- image_id: "{{ ec2_ami_image }}"
- user_data: |
- #cloud-config
- package_upgrade: true
- package_update: true
- packages:
- - httpd
- runcmd:
- - "service httpd start"
- security_groups: "{{ sg.group_id }}"
- instance_type: t3.micro
- loop:
- - "{{ resource_prefix }}-lc"
- - "{{ resource_prefix }}-lc-2"
-
- # ============================================================
-
- - name: launch asg and wait for instances to be deemed healthy (no ELB)
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- launch_config_name: "{{ resource_prefix }}-lc"
- desired_capacity: 1
- min_size: 1
- max_size: 1
- vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
- state: present
- wait_for_instances: yes
- register: output
-
- - assert:
- that:
- - "output.viable_instances == 1"
-
- - name: Tag asg
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- tags:
- - tag_a: 'value 1'
- propagate_at_launch: no
- - tag_b: 'value 2'
- propagate_at_launch: yes
- register: output
-
- - assert:
- that:
- - "output.tags | length == 2"
- - output is changed
-
- - name: Re-Tag asg (different order)
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- tags:
- - tag_b: 'value 2'
- propagate_at_launch: yes
- - tag_a: 'value 1'
- propagate_at_launch: no
- register: output
-
- - assert:
- that:
- - "output.tags | length == 2"
- - output is not changed
-
- - name: Re-Tag asg new tags
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- tags:
- - tag_c: 'value 3'
- propagate_at_launch: no
- register: output
-
- - assert:
- that:
- - "output.tags | length == 1"
- - output is changed
-
- - name: Re-Tag asg update propagate_at_launch
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- tags:
- - tag_c: 'value 3'
- propagate_at_launch: yes
- register: output
-
- - assert:
- that:
- - "output.tags | length == 1"
- - output is changed
-
- - name: Enable metrics collection
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- metrics_collection: yes
- register: output
-
- - assert:
- that:
- - output is changed
-
- - name: Enable metrics collection (check idempotency)
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- metrics_collection: yes
- register: output
-
- - assert:
- that:
- - output is not changed
-
- - name: Disable metrics collection
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- metrics_collection: no
- register: output
-
- - assert:
- that:
- - output is changed
-
- - name: Disable metrics collection (check idempotency)
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- metrics_collection: no
- register: output
-
- - assert:
- that:
- - output is not changed
-
- # - name: pause for a bit to make sure that the group can't be trivially deleted
- # pause: seconds=30
- - name: kill asg
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- state: absent
- wait_timeout: 800
- async: 400
-
- # ============================================================
-
- - name: launch asg and do not wait for instances to be deemed healthy (no ELB)
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- launch_config_name: "{{ resource_prefix }}-lc"
- desired_capacity: 1
- min_size: 1
- max_size: 1
- vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
- wait_for_instances: no
- state: present
- register: output
-
- - assert:
- that:
- - "output.viable_instances == 0"
-
- - name: kill asg
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- state: absent
- wait_timeout: 800
- register: output
- retries: 3
- until: output is succeeded
- delay: 10
- async: 400
-
- # ============================================================
-
- - name: create asg with asg metrics enabled
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- metrics_collection: true
- launch_config_name: "{{ resource_prefix }}-lc"
- desired_capacity: 0
- min_size: 0
- max_size: 0
- vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
- state: present
- register: output
-
- - assert:
- that:
- - "'Group' in output.metrics_collection.0.Metric"
-
- - name: kill asg
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- state: absent
- wait_timeout: 800
- async: 400
-
- # ============================================================
-
- - name: launch load balancer
- ec2_elb_lb:
- name: "{{ load_balancer_name }}"
- state: present
- security_group_ids:
- - "{{ sg.group_id }}"
- subnets: "{{ testing_subnet.subnet.id }}"
- connection_draining_timeout: 60
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- health_check:
- ping_protocol: tcp
- ping_port: 80
- ping_path: "/"
- response_timeout: 5
- interval: 10
- unhealthy_threshold: 4
- healthy_threshold: 2
- register: load_balancer
-
- - name: launch asg and wait for instances to be deemed healthy (ELB)
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- launch_config_name: "{{ resource_prefix }}-lc"
- health_check_type: ELB
- desired_capacity: 1
- min_size: 1
- max_size: 1
- health_check_period: 300
- vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
- load_balancers: "{{ load_balancer_name }}"
- wait_for_instances: yes
- wait_timeout: 900
- state: present
- register: output
-
- - assert:
- that:
- - "output.viable_instances == 1"
-
- # ============================================================
-
- # grow scaling group to 3
-
- - name: add 2 more instances wait for instances to be deemed healthy (ELB)
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- launch_config_name: "{{ resource_prefix }}-lc"
- health_check_type: ELB
- desired_capacity: 3
- min_size: 3
- max_size: 5
- health_check_period: 600
- vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
- load_balancers: "{{ load_balancer_name }}"
- wait_for_instances: yes
- wait_timeout: 1200
- state: present
- register: output
-
- - assert:
- that:
- - "output.viable_instances == 3"
-
- # ============================================================
-
- # Test max_instance_lifetime option
-
- - name: enable asg max_instance_lifetime
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- max_instance_lifetime: 604801
- register: output
-
- - name: ensure max_instance_lifetime is set
- assert:
- that:
- - output.max_instance_lifetime == 604801
-
- - name: run without max_instance_lifetime
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- launch_config_name: "{{ resource_prefix }}-lc"
-
- - name: ensure max_instance_lifetime not affected by defaults
- assert:
- that:
- - output.max_instance_lifetime == 604801
-
- - name: disable asg max_instance_lifetime
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- launch_config_name: "{{ resource_prefix }}-lc"
- max_instance_lifetime: 0
- register: output
-
- - name: ensure max_instance_lifetime is not set
- assert:
- that:
- - not output.max_instance_lifetime
-
- # ============================================================
-
- # # perform rolling replace with different launch configuration
-
- - name: perform rolling update to new AMI
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- launch_config_name: "{{ resource_prefix }}-lc-2"
- health_check_type: ELB
- desired_capacity: 3
- min_size: 1
- max_size: 5
- health_check_period: 900
- load_balancers: "{{ load_balancer_name }}"
- vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
- wait_for_instances: yes
- replace_all_instances: yes
- wait_timeout: 1800
- state: present
- register: output
-
- # ensure that all instances have new launch config
- - assert:
- that:
- - "item.value.launch_config_name == '{{ resource_prefix }}-lc-2'"
- loop: "{{ output.instance_facts | dict2items }}"
-
- # assert they are all healthy and that the rolling update resulted in the appropriate number of instances
- - assert:
- that:
- - "output.viable_instances == 3"
-
- # ============================================================
-
- # perform rolling replace with the original launch configuration
-
- - name: perform rolling update to new AMI while removing the load balancer
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- launch_config_name: "{{ resource_prefix }}-lc"
- health_check_type: EC2
- desired_capacity: 3
- min_size: 1
- max_size: 5
- health_check_period: 900
- load_balancers: []
- vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
- wait_for_instances: yes
- replace_all_instances: yes
- wait_timeout: 1800
- state: present
- register: output
-
- # ensure that all instances have new launch config
- - assert:
- that:
- - "item.value.launch_config_name == '{{ resource_prefix }}-lc'"
- loop: "{{ output.instance_facts | dict2items }}"
-
- # assert they are all healthy and that the rolling update resulted in the appropriate number of instances
- # there should be the same number of instances as there were before the rolling update was performed
- - assert:
- that:
- - "output.viable_instances == 3"
-
- # ============================================================
-
- # perform rolling replace with new launch configuration and lc_check:false
-
- # Note - this is done async so we can query asg_facts during
- # the execution. Issues #28087 and #35993 result in correct
- # end result, but spin up extraneous instances during execution.
- - name: "perform rolling update to new AMI with lc_check: false"
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- launch_config_name: "{{ resource_prefix }}-lc-2"
- health_check_type: EC2
- desired_capacity: 3
- min_size: 1
- max_size: 5
- health_check_period: 900
- load_balancers: []
- vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
- wait_for_instances: yes
- replace_all_instances: yes
- replace_batch_size: 3
- lc_check: false
- wait_timeout: 1800
- state: present
- async: 1800
- poll: 0
- register: asg_job
-
- - name: get ec2_asg info for 3 minutes
- ec2_asg_info:
- name: "{{ resource_prefix }}-asg"
- register: output
- loop_control:
- pause: 15
- loop: "{{ range(12) | list }}"
-
- # Since we started with 3 servers and replace all of them.
- # We should see 6 servers total.
- - assert:
- that:
- - output | json_query(inst_id_json_query) | unique | length == 6
- vars:
- inst_id_json_query: results[].results[].instances[].instance_id
-
- - name: Ensure ec2_asg task completes
- async_status: jid="{{ asg_job.ansible_job_id }}"
- register: status
- until: status is finished
- retries: 200
- delay: 15
-
- # ============================================================
-
- - name: kill asg
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- state: absent
- wait_timeout: 800
- async: 400
-
- # Create new asg with replace_all_instances and lc_check:false
-
- # Note - this is done async so we can query asg_facts during
- # the execution. Issues #28087 results in correct
- # end result, but spin up extraneous instances during execution.
- - name: "new asg with lc_check: false"
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- launch_config_name: "{{ resource_prefix }}-lc"
- health_check_type: EC2
- desired_capacity: 3
- min_size: 1
- max_size: 5
- health_check_period: 900
- load_balancers: []
- vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
- wait_for_instances: yes
- replace_all_instances: yes
- replace_batch_size: 3
- lc_check: false
- wait_timeout: 1800
- state: present
- async: 1800
- poll: 0
- register: asg_job
-
- # Collect ec2_asg_info for 3 minutes
- - name: get ec2_asg information
- ec2_asg_info:
- name: "{{ resource_prefix }}-asg"
- register: output
- loop_control:
- pause: 15
- loop: "{{ range(12) | list }}"
-
- # Get all instance_ids we saw and assert we saw number expected
- # Should only see 3 (don't replace instances we just created)
- - assert:
- that:
- - output | json_query(inst_id_json_query) | unique | length == 3
- vars:
- inst_id_json_query: results[].results[].instances[].instance_id
-
- - name: Ensure ec2_asg task completes
- async_status: jid="{{ asg_job.ansible_job_id }}"
- register: status
- until: status is finished
- retries: 200
- delay: 15
-
- # we need a launch template, otherwise we cannot test the mixed instance policy
- - name: create launch template for autoscaling group to test its mixed instance policy
- ec2_launch_template:
- template_name: "{{ resource_prefix }}-lt"
- image_id: "{{ ec2_ami_image }}"
- instance_type: t3.micro
- credit_specification:
- cpu_credits: standard
- network_interfaces:
- - associate_public_ip_address: yes
- delete_on_termination: yes
- device_index: 0
- groups:
- - "{{ sg.group_id }}"
-
- - name: update autoscaling group with mixed-instance policy
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- launch_template:
- launch_template_name: "{{ resource_prefix }}-lt"
- desired_capacity: 1
- min_size: 1
- max_size: 1
- vpc_zone_identifier: "{{ testing_subnet.subnet.id }}"
- state: present
- mixed_instances_policy:
- instance_types:
- - t3.micro
- - t3a.micro
- wait_for_instances: yes
- register: output
-
- - assert:
- that:
- - "output.mixed_instances_policy | length == 2"
- - "output.mixed_instances_policy[0] == 't3.micro'"
- - "output.mixed_instances_policy[1] == 't3a.micro'"
-
-# ============================================================
-
- always:
-
- - name: kill asg
- ec2_asg:
- name: "{{ resource_prefix }}-asg"
- state: absent
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- # Remove the testing dependencies
-
- - name: remove the load balancer
- ec2_elb_lb:
- name: "{{ load_balancer_name }}"
- state: absent
- security_group_ids:
- - "{{ sg.group_id }}"
- subnets: "{{ testing_subnet.subnet.id }}"
- wait: yes
- connection_draining_timeout: 60
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- health_check:
- ping_protocol: tcp
- ping_port: 80
- ping_path: "/"
- response_timeout: 5
- interval: 10
- unhealthy_threshold: 4
- healthy_threshold: 2
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- - name: remove launch configs
- ec2_lc:
- name: "{{ resource_prefix }}-lc"
- state: absent
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
- loop:
- - "{{ resource_prefix }}-lc"
- - "{{ resource_prefix }}-lc-2"
-
- - name: delete launch template
- ec2_launch_template:
- name: "{{ resource_prefix }}-lt"
- state: absent
- register: del_lt
- retries: 10
- until: del_lt is not failed
- ignore_errors: true
-
- - name: remove the security group
- ec2_group:
- name: "{{ resource_prefix }}-sg"
- description: a security group for ansible tests
- vpc_id: "{{ testing_vpc.vpc.id }}"
- state: absent
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- - name: remove routing rules
- ec2_vpc_route_table:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- tags:
- created: "{{ resource_prefix }}-route"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- subnets:
- - "{{ testing_subnet.subnet.id }}"
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- - name: remove internet gateway
- ec2_vpc_igw:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- state: absent
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- - name: remove the subnet
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.55.77.0/24
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- - name: remove the VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- cidr_block: 10.55.77.0/24
- state: absent
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
diff --git a/test/integration/targets/ec2_asg/vars/main.yml b/test/integration/targets/ec2_asg/vars/main.yml
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/ec2_asg/vars/main.yml
+++ /dev/null
diff --git a/test/integration/targets/ec2_eip/aliases b/test/integration/targets/ec2_eip/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/ec2_eip/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/ec2_eip/defaults/main.yml b/test/integration/targets/ec2_eip/defaults/main.yml
deleted file mode 100644
index 8986714b6c..0000000000
--- a/test/integration/targets/ec2_eip/defaults/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-# VPCs are identified by the CIDR. Don't hard code the CIDR. shippable will
-# run multiple copies of the test concurrently (Python 2.x and Python 3.x)
-vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
-subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.42.0/24'
diff --git a/test/integration/targets/ec2_eip/meta/main.yml b/test/integration/targets/ec2_eip/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_eip/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_eip/tasks/main.yml b/test/integration/targets/ec2_eip/tasks/main.yml
deleted file mode 100644
index 5acb839d17..0000000000
--- a/test/integration/targets/ec2_eip/tasks/main.yml
+++ /dev/null
@@ -1,767 +0,0 @@
----
-# Tests for Elastic IP allocation: ec2_eip and ec2_eip_info
-#
-# Tests ec2_eip:
-# - Basic allocation (no conditions)
-# - Allocation matching a Public IP
-# - Allocation matching a tag name
-# - Allocation matching a tag name + value
-# - Allocation from a specific pool
-# - Attaching an EIP to an ENI
-#
-# Tests ec2_eip_info:
-# - Listing all eips
-# - Searching for a specific eip by public IP
-# - Searching for a specific eip by allocation-id
-#
-# Possible Bugs:
-# - check_mode not honoured #62318
-#
-- name: Integration testing for ec2_eip
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- ec2_eip:
- in_vpc: yes
-
- block:
- - name: Get the current caller identity facts
- aws_caller_info:
- register: caller_info
-
- - name: list available AZs
- aws_az_info:
- register: region_azs
-
- - name: pick an AZ for testing
- set_fact:
- subnet_az: "{{ region_azs.availability_zones[0].zone_name }}"
-
- - name: create a VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: present
- cidr_block: "{{ vpc_cidr }}"
- tags:
- AnsibleEIPTest: "Pending"
- AnsibleEIPTestPrefix: "{{ resource_prefix }}"
- register: vpc_result
-
- - name: create subnet
- ec2_vpc_subnet:
- cidr: "{{ subnet_cidr }}"
- az: "{{ subnet_az }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: present
- register: vpc_subnet_create
-
- - ec2_vpc_igw:
- state: present
- vpc_id: "{{ vpc_result.vpc.id }}"
- register: vpc_igw
-
- # =================================================
- # A rough Lock using the VPC...
- #
- # Because we're testing the behaviour when dealing with objects that are
- # both tagged and untagged, we need to know that EIPs aren't being poached
- # underneath us. See specifically the behaviour around
- # I(reuse_existing_ip_allowed), I(tag_name) and I(tag_value)
- #
- # We also want to know that things like only 1 EIP was allocated / released.
- #
- # Because Python 2.x and Python 3.x tests are run concurrently there's a
- # high chance of the tests interfering with each other if we don't try to
- # perform some kind of locking here.
-
- - name: Look for signs of concurrent EIP tests. Pause if they are running or their prefix comes before ours.
- vars:
- running_query: "vpcs[?tags.AnsibleEIPTest=='Running']"
- pending_query: "vpcs[?tags.AnsibleEIPTest=='Pending'].tags.AnsibleEIPTestPrefix"
- ec2_vpc_net_info:
- filters:
- "tag:AnsibleEIPTest": ["Pending", "Running"]
- register: vpc_info
- retries: 120
- delay: 5
- until:
- # Anyone else running?
- - ( vpc_info | json_query(running_query) | length == 0 )
- # Are we first in the queue?
- - ( vpc_info | json_query(pending_query) | sort | first == resource_prefix )
-
- - name: Make a crude lock
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: present
- cidr_block: "{{ vpc_cidr }}"
- tags:
- AnsibleEIPTest: "Running"
- AnsibleEIPTestPrefix: "{{ resource_prefix }}"
-
- # =================================================
-
- - name: Get current state of EIPs
- ec2_eip_info:
- register: eip_info_start
-
- - name: Require that there are no free IPs when we start, otherwise we can't test things properly
- assert:
- that:
- - eip_info_start is defined
- - '"addresses" in eip_info_start'
- - ( eip_info_start.addresses | length ) == ( eip_info_start | json_query("addresses[].association_id") | length )
-
- #==================================================================
- # EIP Creation 'no conditions'
-
- # XXX check_mode not honoured
- #- name: Allocate a new eip (CHECK MODE)
- # ec2_eip:
- # state: present
- # register: eip
- # check_mode: yes
- #- ec2_eip_info:
- # register: eip_info
- #- assert:
- # that:
- # - eip is defined
- # - eip is changed
- # - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
-
- - name: Allocate a new eip (no conditions)
- ec2_eip:
- state: present
- register: eip
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip is defined
- - eip is changed
- - eip.public_ip is defined and ( eip.public_ip | ipaddr )
- - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
- - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
-
- # Get the info for our specific eip
- - ec2_eip_info:
- filters:
- public-ip: '{{ eip.public_ip }}'
- - assert:
- that:
- - '"addresses" in eip_info'
- - eip_info.addresses | length == 1
- - eip_info.addresses[0].allocation_id == eip.allocation_id
- - eip_info.addresses[0].domain == "vpc"
- - eip_info.addresses[0].public_ip == eip.public_ip
-
- # Get the info for our specific eip
- - ec2_eip_info:
- filters:
- allocation-id: '{{ eip.allocation_id }}'
- - assert:
- that:
- - '"addresses" in eip_info'
- - eip_info.addresses | length == 1
- - eip_info.addresses[0].allocation_id == eip.allocation_id
- - eip_info.addresses[0].domain == "vpc"
- - eip_info.addresses[0].public_ip == eip.public_ip
-
- # Clean up EIPs as we go to reduce the risk of hitting limits
- - name: Release eip
- ec2_eip:
- state: absent
- public_ip: "{{ eip.public_ip }}"
- register: eip_release
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip_release is defined
- - eip_release is changed
- - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
-
- #==================================================================
- # EIP Creation: reuse allowed
-
- - name: Allocate a new eip - attempt reusing unallocated ones (none available)
- ec2_eip:
- state: present
- reuse_existing_ip_allowed: yes
- register: eip
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip is defined
- - eip is changed
- - eip.public_ip is defined and ( eip.public_ip | ipaddr )
- - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
- - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
-
- - name: Re-Allocate a new eip - attempt reusing unallocated ones (one available)
- ec2_eip:
- state: present
- reuse_existing_ip_allowed: yes
- register: reallocate_eip
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - reallocate_eip is defined
- - reallocate_eip is not changed
- - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ipaddr )
- - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-")
- - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
-
- - name: Release eip
- ec2_eip:
- state: absent
- public_ip: "{{ eip.public_ip }}"
- register: eip_release
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
- - eip_release is defined
- - eip_release is changed
-
- #==================================================================
- # EIP Creation: Matching an existing IP
-
- - name: Allocate a new eip
- ec2_eip:
- state: present
- register: eip
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip is defined
- - eip is changed
- - eip.public_ip is defined and ( eip.public_ip | ipaddr )
- - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
- - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
-
- - name: Match an existing eip (changed == false)
- ec2_eip:
- state: present
- public_ip: "{{ eip.public_ip }}"
- register: reallocate_eip
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - reallocate_eip is defined
- - reallocate_eip is not changed
- - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ipaddr )
- - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-")
- - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
-
- - name: Release eip
- ec2_eip:
- state: absent
- public_ip: "{{ eip.public_ip }}"
- register: eip_release
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip_release is defined
- - eip_release is changed
- - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
-
- #==================================================================
- # EIP Creation: Matching Tags
-
- - name: Allocate a new eip (no tags)
- ec2_eip:
- state: present
- register: eip
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip is defined
- - eip is changed
- - eip.public_ip is defined and ( eip.public_ip | ipaddr )
- - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
- - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
-
- - name: attempt reusing an existing eip with a tag (No match available)
- ec2_eip:
- state: present
- reuse_existing_ip_allowed: yes
- tag_name: Team
- register: no_tagged_eip
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - no_tagged_eip is defined
- - no_tagged_eip is changed
- - no_tagged_eip.public_ip is defined and ( no_tagged_eip.public_ip | ipaddr )
- - no_tagged_eip.allocation_id is defined and no_tagged_eip.allocation_id.startswith("eipalloc-")
- - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length )
-
- - name: tag eip so we can try matching it
- ec2_tag:
- state: present
- resource: '{{ eip.allocation_id }}'
- tags:
- Team: Frontend
-
- - name: attempt reusing an existing eip with a tag (Match available)
- ec2_eip:
- state: present
- reuse_existing_ip_allowed: yes
- tag_name: Team
- register: reallocate_eip
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - reallocate_eip is defined
- - reallocate_eip is not changed
- - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ipaddr )
- - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-")
- - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length )
-
- - name: attempt reusing an existing eip with a tag and it's value (no match available)
- ec2_eip:
- state: present
- reuse_existing_ip_allowed: yes
- tag_name: Team
- tag_value: Backend
- register: backend_eip
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - backend_eip is defined
- - backend_eip is changed
- - backend_eip.public_ip is defined and ( backend_eip.public_ip | ipaddr )
- - backend_eip.allocation_id is defined and backend_eip.allocation_id.startswith("eipalloc-")
- - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length )
-
- - name: tag eip so we can try matching it
- ec2_tag:
- state: present
- resource: '{{ eip.allocation_id }}'
- tags:
- Team: Backend
-
- - name: attempt reusing an existing eip with a tag and it's value (match available)
- ec2_eip:
- state: present
- reuse_existing_ip_allowed: yes
- tag_name: Team
- tag_value: Backend
- register: reallocate_eip
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - reallocate_eip is defined
- - reallocate_eip is not changed
- - reallocate_eip.public_ip is defined and reallocate_eip.public_ip != ""
- - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id != ""
- - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length )
-
- - name: Release backend_eip
- ec2_eip:
- state: absent
- public_ip: "{{ backend_eip.public_ip }}"
- register: eip_release
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip_release is defined
- - eip_release is changed
- - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length )
-
- - name: Release no_tagged_eip
- ec2_eip:
- state: absent
- public_ip: "{{ no_tagged_eip.public_ip }}"
- register: eip_release
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip_release is defined
- - eip_release is changed
- - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
-
- - name: Release eip
- ec2_eip:
- state: absent
- public_ip: "{{ eip.public_ip }}"
- register: eip_release
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip_release is defined
- - eip_release is changed
- - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
-
- #==================================================================
- # Allocation from a pool
-
- - name: allocate a new eip from a pool
- ec2_eip:
- state: present
- public_ipv4_pool: amazon
- register: eip
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip is defined
- - eip is changed
- - eip.public_ip is defined and ( eip.public_ip | ipaddr )
- - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
- - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
-
- #==================================================================
- # Assigning EIP to an ENI
-
- - name: create ENI A
- ec2_eni:
- subnet_id: '{{ vpc_subnet_create.subnet.id }}'
- register: eni_create_a
-
- - name: create ENI B
- ec2_eni:
- subnet_id: '{{ vpc_subnet_create.subnet.id }}'
- register: eni_create_b
-
- # Test attaching EIP to ENI
- - name: Attach EIP to ENI A
- ec2_eip:
- public_ip: "{{ eip.public_ip }}"
- device_id: "{{ eni_create_a.interface.id }}"
- register: associate_eip
- - ec2_eip_info:
- filters:
- public-ip: '{{ eip.public_ip }}'
- register: eip_info
- - assert:
- that:
- - associate_eip is defined
- - associate_eip is changed
- - eip_info.addresses | length == 1
- - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip
- - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id
- - eip_info.addresses[0].allocation_id == eip.allocation_id
- - eip_info.addresses[0].domain == "vpc"
- - eip_info.addresses[0].public_ip == eip.public_ip
- - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
- - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id
- - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ipaddr )
- - eip_info.addresses[0].network_interface_owner_id == caller_info.account
-
- - name: Re-Attach EIP to ENI A (no change)
- ec2_eip:
- public_ip: "{{ eip.public_ip }}"
- device_id: "{{ eni_create_a.interface.id }}"
- register: associate_eip
- - ec2_eip_info:
- filters:
- public-ip: '{{ eip.public_ip }}'
- register: eip_info
- - assert:
- that:
- - associate_eip is defined
- - associate_eip is not changed
- - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip
- - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id
- - eip_info.addresses | length == 1
- - eip_info.addresses[0].allocation_id == eip.allocation_id
- - eip_info.addresses[0].domain == "vpc"
- - eip_info.addresses[0].public_ip == eip.public_ip
- - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
- - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id
- - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ipaddr )
-
- # Test attaching EIP to ENI B
- - name: Attach EIP to ENI B (should fail, already associated)
- ec2_eip:
- public_ip: "{{ eip.public_ip }}"
- device_id: "{{ eni_create_b.interface.id }}"
- register: associate_eip
- ignore_errors: yes
- - ec2_eip_info:
- filters:
- public-ip: '{{ eip.public_ip }}'
- register: eip_info
- - assert:
- that:
- - associate_eip is defined
- - associate_eip is failed
- - eip_info.addresses | length == 1
- - eip_info.addresses[0].allocation_id == eip.allocation_id
- - eip_info.addresses[0].domain == "vpc"
- - eip_info.addresses[0].public_ip == eip.public_ip
- - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
- - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id
- - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ipaddr )
-
- - name: Attach EIP to ENI B
- ec2_eip:
- public_ip: "{{ eip.public_ip }}"
- device_id: "{{ eni_create_b.interface.id }}"
- allow_reassociation: yes
- register: associate_eip
- - ec2_eip_info:
- filters:
- public-ip: '{{ eip.public_ip }}'
- register: eip_info
- - assert:
- that:
- - associate_eip is defined
- - associate_eip is changed
- - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip
- - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id
- - eip_info.addresses | length == 1
- - eip_info.addresses[0].allocation_id == eip.allocation_id
- - eip_info.addresses[0].domain == "vpc"
- - eip_info.addresses[0].public_ip == eip.public_ip
- - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-")
- - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id
- - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ipaddr )
-
- - name: Detach EIP from ENI B, without enabling release on disassociation
- ec2_eip:
- state: absent
- public_ip: "{{ eip.public_ip }}"
- device_id: "{{ eni_create_b.interface.id }}"
- register: disassociate_eip
- - ec2_eip_info:
- filters:
- public-ip: '{{ eip.public_ip }}'
- register: eip_info
- - assert:
- that:
- - associate_eip is defined
- - associate_eip is changed
- - eip_info.addresses | length == 1
-
- - name: Re-detach EIP from ENI B, without enabling release on disassociation
- ec2_eip:
- state: absent
- public_ip: "{{ eip.public_ip }}"
- device_id: "{{ eni_create_b.interface.id }}"
- register: associate_eip
- - ec2_eip_info:
- filters:
- public-ip: '{{ eip.public_ip }}'
- register: eip_info
- - assert:
- that:
- - associate_eip is defined
- - associate_eip is not changed
- - eip_info.addresses | length == 1
-
- - name: Attach EIP to ENI A
- ec2_eip:
- public_ip: "{{ eip.public_ip }}"
- device_id: "{{ eni_create_a.interface.id }}"
- register: associate_eip
- - ec2_eip_info:
- filters:
- public-ip: '{{ eip.public_ip }}'
- register: eip_info
- - assert:
- that:
- - associate_eip is defined
- - associate_eip is changed
- - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip
- - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id
- - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id
-
- - name: Detach EIP from ENI A, enabling release on disassociation
- ec2_eip:
- state: absent
- public_ip: "{{ eip.public_ip }}"
- device_id: "{{ eni_create_a.interface.id }}"
- release_on_disassociation: yes
- register: disassociate_eip
- - ec2_eip_info:
- filters:
- public-ip: '{{ eip.public_ip }}'
- register: eip_info
- - assert:
- that:
- - associate_eip is defined
- - associate_eip is changed
- - eip_info.addresses | length == 0
-
- - name: Re-detach EIP from ENI A, enabling release on disassociation
- ec2_eip:
- state: absent
- public_ip: "{{ eip.public_ip }}"
- device_id: "{{ eni_create_a.interface.id }}"
- release_on_disassociation: yes
- register: associate_eip
- - ec2_eip_info:
- filters:
- public-ip: '{{ eip.public_ip }}'
- register: eip_info
- - assert:
- that:
- - associate_eip is defined
- - associate_eip is not changed
- - eip_info.addresses | length == 0
-
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
-
- - name: Cleanup ENI B
- ec2_eni:
- state: absent
- eni_id: "{{ eni_create_b.interface.id }}"
-
- - name: Cleanup ENI A
- ec2_eni:
- state: absent
- eni_id: "{{ eni_create_a.interface.id }}"
-
- - name: Cleanup IGW
- ec2_vpc_igw:
- state: absent
- vpc_id: "{{ vpc_result.vpc.id }}"
- register: vpc_igw
-
- - name: Cleanup Subnet
- ec2_vpc_subnet:
- state: absent
- cidr: "{{ subnet_cidr }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
-
- - name: Release eip
- ec2_eip:
- state: absent
- public_ip: "{{ eip.public_ip }}"
- register: eip_release
- ignore_errors: true
-
- #==================================================================
- # EIP Deletion
-
- - name: allocate a new eip
- ec2_eip:
- state: present
- register: eip
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip is defined
- - eip is changed
- - eip.public_ip is defined and ( eip.public_ip | ipaddr )
- - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-")
- - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length )
-
- - name: Release eip
- ec2_eip:
- state: absent
- public_ip: "{{ eip.public_ip }}"
- register: eip_release
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip_release is defined
- - eip_release is changed
- - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
-
- - name: Rerelease eip (no change)
- ec2_eip:
- state: absent
- public_ip: "{{ eip.public_ip }}"
- register: eip_release
- - ec2_eip_info:
- register: eip_info
- - assert:
- that:
- - eip_release is defined
- - eip_release is not changed
- - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length )
-
- - name: Cleanup VPC
- ec2_vpc_net:
- state: absent
- name: "{{ resource_prefix }}-vpc"
- cidr_block: "{{ vpc_cidr }}"
-
- always:
-
- - name: Cleanup ENI A
- ec2_eni:
- state: absent
- eni_id: "{{ eni_create_a.interface.id }}"
- ignore_errors: yes
-
- - name: Cleanup ENI B
- ec2_eni:
- state: absent
- eni_id: "{{ eni_create_b.interface.id }}"
- ignore_errors: yes
-
- - name: Cleanup IGW
- ec2_vpc_igw:
- state: absent
- vpc_id: "{{ vpc_result.vpc.id }}"
- register: vpc_igw
-
- - name: Cleanup Subnet
- ec2_vpc_subnet:
- state: absent
- cidr: "{{ subnet_cidr }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- ignore_errors: yes
-
- - name: Cleanup eip
- ec2_eip:
- state: absent
- public_ip: "{{ eip.public_ip }}"
- when: eip is changed
- ignore_errors: yes
-
- - name: Cleanup reallocate_eip
- ec2_eip:
- state: absent
- public_ip: "{{ reallocate_eip.public_ip }}"
- when: reallocate_eip is changed
- ignore_errors: yes
-
- - name: Cleanup backend_eip
- ec2_eip:
- state: absent
- public_ip: "{{ backend_eip.public_ip }}"
- when: backend_eip is changed
- ignore_errors: yes
-
- - name: Cleanup no_tagged_eip
- ec2_eip:
- state: absent
- public_ip: "{{ no_tagged_eip.public_ip }}"
- when: no_tagged_eip is changed
- ignore_errors: yes
-
- - name: Cleanup VPC
- ec2_vpc_net:
- state: absent
- name: "{{ resource_prefix }}-vpc"
- cidr_block: "{{ vpc_cidr }}"
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_instance/aliases b/test/integration/targets/ec2_instance/aliases
deleted file mode 100644
index 62cb1d2c5b..0000000000
--- a/test/integration/targets/ec2_instance/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-shippable/aws/group3
-ec2_instance_info
diff --git a/test/integration/targets/ec2_instance/inventory b/test/integration/targets/ec2_instance/inventory
deleted file mode 100644
index 44b46ec88f..0000000000
--- a/test/integration/targets/ec2_instance/inventory
+++ /dev/null
@@ -1,17 +0,0 @@
-[tests]
-# Sorted fastest to slowest
-version_fail_wrapper
-ebs_optimized
-block_devices
-cpu_options
-default_vpc_tests
-external_resource_attach
-instance_no_wait
-iam_instance_role
-termination_protection
-tags_and_vpc_settings
-checkmode_tests
-
-[all:vars]
-ansible_connection=local
-ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/integration/targets/ec2_instance/main.yml b/test/integration/targets/ec2_instance/main.yml
deleted file mode 100644
index 7695f7bcb9..0000000000
--- a/test/integration/targets/ec2_instance/main.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-# Beware: most of our tests here are run in parallel.
-# To add new tests you'll need to add a new host to the inventory and a matching
-# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
-
-
-# Prepare the VPC and figure out which AMI to use
-- hosts: all
- gather_facts: no
- tasks:
- - module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- vars:
- # We can't just use "run_once" because the facts don't propagate when
- # running an 'include' that was run_once
- setup_run_once: yes
- block:
- - include_role:
- name: 'ec2_instance'
- tasks_from: find_ami.yml
- - include_role:
- name: 'ec2_instance'
- tasks_from: env_setup.yml
- rescue:
- - include_role:
- name: 'ec2_instance'
- tasks_from: env_cleanup.yml
- run_once: yes
- - fail:
- msg: 'Environment preparation failed'
- run_once: yes
-
-# VPC should get cleaned up once all hosts have run
-- hosts: all
- gather_facts: no
- strategy: free
- #serial: 10
- roles:
- - ec2_instance
diff --git a/test/integration/targets/ec2_instance/meta/main.yml b/test/integration/targets/ec2_instance/meta/main.yml
deleted file mode 100644
index 38b31be072..0000000000
--- a/test/integration/targets/ec2_instance/meta/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
- - setup_remote_tmp_dir
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/defaults/main.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/defaults/main.yml
deleted file mode 100644
index 8e70ab6933..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/defaults/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# defaults file for ec2_instance
-ec2_instance_owner: 'integration-run-{{ resource_prefix }}'
-ec2_instance_type: 't3.micro'
-ec2_instance_tag_TestId: '{{ resource_prefix }}-{{ inventory_hostname }}'
-ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'
-
-vpc_name: '{{ resource_prefix }}-vpc'
-vpc_seed: '{{ resource_prefix }}'
-vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
-subnet_a_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
-subnet_a_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.32.'
-subnet_b_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.33.0/24'
-subnet_b_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.33.'
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/files/assume-role-policy.json b/test/integration/targets/ec2_instance/roles/ec2_instance/files/assume-role-policy.json
deleted file mode 100644
index 72413abdd3..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/files/assume-role-policy.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "Version": "2008-10-17",
- "Statement": [
- {
- "Sid": "",
- "Effect": "Allow",
- "Principal": {
- "Service": "ec2.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/meta/main.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/block_devices.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/block_devices.yml
deleted file mode 100644
index 0a8ab63f08..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/block_devices.yml
+++ /dev/null
@@ -1,82 +0,0 @@
-- block:
- - name: "New instance with an extra block device"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-ebs-vols"
- image_id: "{{ ec2_ami_image }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- volumes:
- - device_name: /dev/sdb
- ebs:
- volume_size: 20
- delete_on_termination: true
- volume_type: standard
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- instance_type: "{{ ec2_instance_type }}"
- wait: true
- register: block_device_instances
-
- - name: "Gather instance info"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-ebs-vols"
- register: block_device_instances_info
-
- - assert:
- that:
- - block_device_instances is not failed
- - block_device_instances is changed
- - block_device_instances_info.instances[0].block_device_mappings[0]
- - block_device_instances_info.instances[0].block_device_mappings[1]
- - block_device_instances_info.instances[0].block_device_mappings[1].device_name == '/dev/sdb'
-
- - name: "New instance with an extra block device (check mode)"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-ebs-vols-checkmode"
- image_id: "{{ ec2_ami_image }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- volumes:
- - device_name: /dev/sdb
- ebs:
- volume_size: 20
- delete_on_termination: true
- volume_type: standard
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- instance_type: "{{ ec2_instance_type }}"
- check_mode: yes
-
- - name: "fact presented ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-ebs-vols"
- "instance-state-name": "running"
- register: presented_instance_fact
-
- - name: "fact checkmode ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-ebs-vols-checkmode"
- register: checkmode_instance_fact
-
- - name: "Confirm whether the check mode is working normally."
- assert:
- that:
- - "{{ presented_instance_fact.instances | length }} > 0"
- - "{{ checkmode_instance_fact.instances | length }} == 0"
-
- - name: "Terminate instances"
- ec2_instance:
- state: absent
- instance_ids: "{{ block_device_instances.instance_ids }}"
-
- always:
- - name: "Terminate block_devices instances"
- ec2_instance:
- state: absent
- filters:
- "tag:TestId": "{{ ec2_instance_tag_TestId }}"
- wait: yes
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/checkmode_tests.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/checkmode_tests.yml
deleted file mode 100644
index b161eca636..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/checkmode_tests.yml
+++ /dev/null
@@ -1,172 +0,0 @@
-- block:
- - name: "Make basic instance"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-checkmode-comparison"
- image_id: "{{ ec2_ami_image }}"
- security_groups: "{{ sg.group_id }}"
- instance_type: "{{ ec2_instance_type }}"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- wait: false
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- register: basic_instance
-
- - name: "Make basic instance (check mode)"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-checkmode-comparison-checkmode"
- image_id: "{{ ec2_ami_image }}"
- security_groups: "{{ sg.group_id }}"
- instance_type: "{{ ec2_instance_type }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- check_mode: yes
-
- - name: "fact presented ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
- register: presented_instance_fact
-
- - name: "fact checkmode ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-checkmode-comparison-checkmode"
- register: checkmode_instance_fact
-
- - name: "Confirm whether the check mode is working normally."
- assert:
- that:
- - "{{ presented_instance_fact.instances | length }} > 0"
- - "{{ checkmode_instance_fact.instances | length }} == 0"
-
- - name: "Stop instance (check mode)"
- ec2_instance:
- state: stopped
- name: "{{ resource_prefix }}-checkmode-comparison"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- check_mode: yes
-
- - name: "fact ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
- register: confirm_checkmode_stopinstance_fact
-
- - name: "Verify that it was not stopped."
- assert:
- that:
- - '"{{ confirm_checkmode_stopinstance_fact.instances[0].state.name }}" != "stopped"'
-
- - name: "Stop instance."
- ec2_instance:
- state: stopped
- name: "{{ resource_prefix }}-checkmode-comparison"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- register: instance_stop
- until: not instance_stop.failed
- retries: 10
-
- - name: "fact stopped ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
- register: confirm_stopinstance_fact
-
- - name: "Verify that it was stopped."
- assert:
- that:
- - '"{{ confirm_stopinstance_fact.instances[0].state.name }}" in ["stopped", "stopping"]'
-
- - name: "Running instance in check mode."
- ec2_instance:
- state: running
- name: "{{ resource_prefix }}-checkmode-comparison"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- check_mode: yes
-
- - name: "fact ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
- register: confirm_checkmode_runninginstance_fact
-
- - name: "Verify that it was not running."
- assert:
- that:
- - '"{{ confirm_checkmode_runninginstance_fact.instances[0].state.name }}" != "running"'
-
- - name: "Running instance."
- ec2_instance:
- state: running
- name: "{{ resource_prefix }}-checkmode-comparison"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
-
- - name: "fact ec2 instance."
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
- register: confirm_runninginstance_fact
-
- - name: "Verify that it was running."
- assert:
- that:
- - '"{{ confirm_runninginstance_fact.instances[0].state.name }}" == "running"'
-
- - name: "Terminate instance in check mode."
- ec2_instance:
- state: absent
- name: "{{ resource_prefix }}-checkmode-comparison"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- check_mode: yes
-
- - name: "fact ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
- register: confirm_checkmode_terminatedinstance_fact
-
- - name: "Verify that it was not terminated,"
- assert:
- that:
- - '"{{ confirm_checkmode_terminatedinstance_fact.instances[0].state.name }}" != "terminated"'
-
- - name: "Terminate instance."
- ec2_instance:
- state: absent
- name: "{{ resource_prefix }}-checkmode-comparison"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
-
- - name: "fact ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-checkmode-comparison"
- register: confirm_terminatedinstance_fact
-
- - name: "Verify that it was terminated,"
- assert:
- that:
- - '"{{ confirm_terminatedinstance_fact.instances[0].state.name }}" == "terminated"'
-
- always:
- - name: "Terminate checkmode instances"
- ec2_instance:
- state: absent
- filters:
- "tag:TestId": "{{ ec2_instance_tag_TestId }}"
- wait: yes
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/cpu_options.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/cpu_options.yml
deleted file mode 100644
index 947011f75e..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/cpu_options.yml
+++ /dev/null
@@ -1,86 +0,0 @@
-- block:
- - name: "create t3.nano instance with cpu_options"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
- image_id: "{{ ec2_ami_image }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- instance_type: t3.nano
- cpu_options:
- core_count: 1
- threads_per_core: 1
- wait: false
- register: instance_creation
-
- - name: "instance with cpu_options created with the right options"
- assert:
- that:
- - instance_creation is success
- - instance_creation is changed
-
- - name: "modify cpu_options on existing instance (warning displayed)"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
- image_id: "{{ ec2_ami_image }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- instance_type: t3.nano
- cpu_options:
- core_count: 1
- threads_per_core: 2
- wait: false
- register: cpu_options_update
- ignore_errors: yes
-
- - name: "fact presented ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core"
- register: presented_instance_fact
-
- - name: "modify cpu_options has no effect on existing instance"
- assert:
- that:
- - cpu_options_update is success
- - cpu_options_update is not changed
- - "{{ presented_instance_fact.instances | length }} > 0"
- - "'{{ presented_instance_fact.instances.0.state.name }}' in ['running','pending']"
- - "{{ presented_instance_fact.instances.0.cpu_options.core_count }} == 1"
- - "{{ presented_instance_fact.instances.0.cpu_options.threads_per_core }} == 1"
-
- - name: "create t3.nano instance with cpu_options(check mode)"
- ec2_instance:
- name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode"
- image_id: "{{ ec2_ami_image }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- instance_type: t3.nano
- cpu_options:
- core_count: 1
- threads_per_core: 1
- check_mode: yes
-
- - name: "fact checkmode ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode"
- register: checkmode_instance_fact
-
- - name: "Confirm existence of instance id."
- assert:
- that:
- - "{{ checkmode_instance_fact.instances | length }} == 0"
-
- always:
- - name: "Terminate cpu_options instances"
- ec2_instance:
- state: absent
- filters:
- "tag:TestId": "{{ ec2_instance_tag_TestId }}"
- wait: yes
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/default_vpc_tests.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/default_vpc_tests.yml
deleted file mode 100644
index a69dfe9f86..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/default_vpc_tests.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-- block:
- - name: "Make instance in a default subnet of the VPC"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-default-vpc"
- image_id: "{{ ec2_ami_image }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- security_group: "default"
- instance_type: "{{ ec2_instance_type }}"
- wait: false
- register: in_default_vpc
-
- - name: "Make instance in a default subnet of the VPC(check mode)"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-default-vpc-checkmode"
- image_id: "{{ ec2_ami_image }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- security_group: "default"
- instance_type: "{{ ec2_instance_type }}"
- check_mode: yes
-
- - name: "fact presented ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-default-vpc"
- register: presented_instance_fact
-
- - name: "fact checkmode ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-default-vpc-checkmode"
- register: checkmode_instance_fact
-
- - name: "Confirm whether the check mode is working normally."
- assert:
- that:
- - "{{ presented_instance_fact.instances | length }} > 0"
- - "{{ checkmode_instance_fact.instances | length }} == 0"
-
- - name: "Terminate instances"
- ec2_instance:
- state: absent
- instance_ids: "{{ in_default_vpc.instance_ids }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
-
- always:
- - name: "Terminate vpc_tests instances"
- ec2_instance:
- state: absent
- filters:
- "tag:TestId": "{{ ec2_instance_tag_TestId }}"
- wait: yes
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/ebs_optimized.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/ebs_optimized.yml
deleted file mode 100644
index 5bfdc086e7..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/ebs_optimized.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-- block:
- - name: "Make EBS optimized instance in the testing subnet of the test VPC"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc"
- image_id: "{{ ec2_ami_image }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- security_groups: "{{ sg.group_id }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- ebs_optimized: true
- instance_type: t3.nano
- wait: false
- register: ebs_opt_in_vpc
-
- - name: "Get ec2 instance info"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc"
- register: ebs_opt_instance_info
-
- - name: "Assert instance is ebs_optimized"
- assert:
- that:
- - "{{ ebs_opt_instance_info.instances.0.ebs_optimized }}"
-
- - name: "Terminate instances"
- ec2_instance:
- state: absent
- instance_ids: "{{ ebs_opt_in_vpc.instance_ids }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
-
- always:
- - name: "Terminate ebs_optimzed instances"
- ec2_instance:
- state: absent
- filters:
- "tag:TestId": "{{ ec2_instance_tag_TestId }}"
- wait: yes
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_cleanup.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_cleanup.yml
deleted file mode 100644
index 1b6c79e0d9..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_cleanup.yml
+++ /dev/null
@@ -1,93 +0,0 @@
-- name: "remove Instances"
- ec2_instance:
- state: absent
- filters:
- vpc-id: "{{ testing_vpc.vpc.id }}"
- wait: yes
- ignore_errors: yes
- retries: 10
-
-- name: "remove ENIs"
- ec2_eni_info:
- filters:
- vpc-id: "{{ testing_vpc.vpc.id }}"
- register: enis
-
-- name: "delete all ENIs"
- ec2_eni:
- state: absent
- eni_id: "{{ item.id }}"
- until: removed is not failed
- with_items: "{{ enis.network_interfaces }}"
- ignore_errors: yes
- retries: 10
-
-- name: "remove the security group"
- ec2_group:
- state: absent
- name: "{{ resource_prefix }}-sg"
- description: a security group for ansible tests
- vpc_id: "{{ testing_vpc.vpc.id }}"
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
-- name: "remove routing rules"
- ec2_vpc_route_table:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- tags:
- created: "{{ resource_prefix }}-route"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- subnets:
- - "{{ testing_subnet_a.subnet.id }}"
- - "{{ testing_subnet_b.subnet.id }}"
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
-- name: "remove internet gateway"
- ec2_vpc_igw:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
-- name: "remove subnet A"
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: "{{ subnet_a_cidr }}"
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
-- name: "remove subnet B"
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: "{{ subnet_b_cidr }}"
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
-- name: "remove the VPC"
- ec2_vpc_net:
- state: absent
- name: "{{ vpc_name }}"
- cidr_block: "{{ vpc_cidr }}"
- tags:
- Name: Ansible Testing VPC
- tenancy: default
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_setup.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_setup.yml
deleted file mode 100644
index 6c76b7bf79..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/env_setup.yml
+++ /dev/null
@@ -1,79 +0,0 @@
-- run_once: '{{ setup_run_once | default("no") | bool }}'
- block:
- - name: "fetch AZ availability"
- aws_az_info:
- register: az_info
- - name: "Assert that we have multiple AZs available to us"
- assert:
- that: az_info.availability_zones | length >= 2
-
- - name: "pick AZs"
- set_fact:
- subnet_a_az: '{{ az_info.availability_zones[0].zone_name }}'
- subnet_b_az: '{{ az_info.availability_zones[1].zone_name }}'
-
- - name: "Create VPC for use in testing"
- ec2_vpc_net:
- state: present
- name: "{{ vpc_name }}"
- cidr_block: "{{ vpc_cidr }}"
- tags:
- Name: Ansible ec2_instance Testing VPC
- tenancy: default
- register: testing_vpc
-
- - name: "Create internet gateway for use in testing"
- ec2_vpc_igw:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- register: igw
-
- - name: "Create default subnet in zone A"
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: "{{ subnet_a_cidr }}"
- az: "{{ subnet_a_az }}"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet-a"
- register: testing_subnet_a
-
- - name: "Create secondary subnet in zone B"
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: "{{ subnet_b_cidr }}"
- az: "{{ subnet_b_az }}"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet-b"
- register: testing_subnet_b
-
- - name: "create routing rules"
- ec2_vpc_route_table:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- tags:
- created: "{{ resource_prefix }}-route"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- subnets:
- - "{{ testing_subnet_a.subnet.id }}"
- - "{{ testing_subnet_b.subnet.id }}"
-
- - name: "create a security group with the vpc"
- ec2_group:
- state: present
- name: "{{ resource_prefix }}-sg"
- description: a security group for ansible tests
- vpc_id: "{{ testing_vpc.vpc.id }}"
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- register: sg
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/external_resource_attach.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/external_resource_attach.yml
deleted file mode 100644
index 2625977f41..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/external_resource_attach.yml
+++ /dev/null
@@ -1,129 +0,0 @@
-- block:
- # Make custom ENIs and attach via the `network` parameter
- - ec2_eni:
- state: present
- delete_on_termination: true
- subnet_id: "{{ testing_subnet_b.subnet.id }}"
- security_groups:
- - "{{ sg.group_id }}"
- register: eni_a
-
- - ec2_eni:
- state: present
- delete_on_termination: true
- subnet_id: "{{ testing_subnet_b.subnet.id }}"
- security_groups:
- - "{{ sg.group_id }}"
- register: eni_b
-
- - ec2_eni:
- state: present
- delete_on_termination: true
- subnet_id: "{{ testing_subnet_b.subnet.id }}"
- security_groups:
- - "{{ sg.group_id }}"
- register: eni_c
-
- - ec2_key:
- name: "{{ resource_prefix }}_test_key"
-
- - name: "Make instance in the testing subnet created in the test VPC"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-eni-vpc"
- key_name: "{{ resource_prefix }}_test_key"
- network:
- interfaces:
- - id: "{{ eni_a.interface.id }}"
- image_id: "{{ ec2_ami_image }}"
- availability_zone: '{{ subnet_b_az }}'
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- instance_type: "{{ ec2_instance_type }}"
- wait: false
- register: in_test_vpc
-
- - name: "Gather {{ resource_prefix }}-test-eni-vpc info"
- ec2_instance_info:
- filters:
- "tag:Name": '{{ resource_prefix }}-test-eni-vpc'
- register: in_test_vpc_instance
-
- - assert:
- that:
- - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"'
- - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1'
-
- - name: "Add a second interface"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-eni-vpc"
- network:
- interfaces:
- - id: "{{ eni_a.interface.id }}"
- - id: "{{ eni_b.interface.id }}"
- image_id: "{{ ec2_ami_image }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- instance_type: "{{ ec2_instance_type }}"
- wait: false
- register: add_interface
- until: add_interface is not failed
- ignore_errors: yes
- retries: 10
-
- - name: "Make instance in the testing subnet created in the test VPC(check mode)"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-eni-vpc-checkmode"
- key_name: "{{ resource_prefix }}_test_key"
- network:
- interfaces:
- - id: "{{ eni_c.interface.id }}"
- image_id: "{{ ec2_ami_image }}"
- availability_zone: '{{ subnet_b_az }}'
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- instance_type: "{{ ec2_instance_type }}"
- check_mode: yes
-
- - name: "fact presented ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-eni-vpc"
- register: presented_instance_fact
-
- - name: "fact checkmode ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-eni-vpc-checkmode"
- register: checkmode_instance_fact
-
- - name: "Confirm existence of instance id."
- assert:
- that:
- - "{{ presented_instance_fact.instances | length }} > 0"
- - "{{ checkmode_instance_fact.instances | length }} == 0"
-
- always:
- - name: "Terminate external_resource_attach instances"
- ec2_instance:
- state: absent
- filters:
- "tag:TestId": "{{ ec2_instance_tag_TestId }}"
- wait: yes
- ignore_errors: yes
-
- - ec2_key:
- state: absent
- name: "{{ resource_prefix }}_test_key"
- ignore_errors: yes
-
- - ec2_eni:
- state: absent
- eni_id: '{{ item.interface.id }}'
- ignore_errors: yes
- with_items:
- - '{{ eni_a }}'
- - '{{ eni_b }}'
- - '{{ eni_c }}'
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/find_ami.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/find_ami.yml
deleted file mode 100644
index 5c0e61f84c..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/find_ami.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-- run_once: '{{ setup_run_once | default("no") | bool }}'
- block:
- - name: "Find AMI to use"
- run_once: yes
- ec2_ami_info:
- owners: 'amazon'
- filters:
- name: '{{ ec2_ami_name }}'
- register: ec2_amis
- - name: "Set fact with latest AMI"
- run_once: yes
- vars:
- latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
- set_fact:
- ec2_ami_image: '{{ latest_ami.image_id }}'
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/iam_instance_role.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/iam_instance_role.yml
deleted file mode 100644
index 6e29b74674..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/iam_instance_role.yml
+++ /dev/null
@@ -1,127 +0,0 @@
-- block:
- - name: "Create IAM role for test"
- iam_role:
- state: present
- name: "ansible-test-sts-{{ resource_prefix }}-test-policy"
- assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
- create_instance_profile: yes
- managed_policy:
- - AmazonEC2ContainerServiceRole
- register: iam_role
-
- - name: "Create second IAM role for test"
- iam_role:
- state: present
- name: "ansible-test-sts-{{ resource_prefix }}-test-policy-2"
- assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
- create_instance_profile: yes
- managed_policy:
- - AmazonEC2ContainerServiceRole
- register: iam_role_2
-
- - name: "wait 10 seconds for roles to become available"
- wait_for:
- timeout: 10
- delegate_to: localhost
-
- - name: "Make instance with an instance_role"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-instance-role"
- image_id: "{{ ec2_ami_image }}"
- security_groups: "{{ sg.group_id }}"
- instance_type: "{{ ec2_instance_type }}"
- instance_role: "ansible-test-sts-{{ resource_prefix }}-test-policy"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- register: instance_with_role
-
- - assert:
- that:
- - 'instance_with_role.instances[0].iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
-
- - name: "Make instance with an instance_role(check mode)"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-instance-role-checkmode"
- image_id: "{{ ec2_ami_image }}"
- security_groups: "{{ sg.group_id }}"
- instance_type: "{{ ec2_instance_type }}"
- instance_role: "{{ iam_role.arn.replace(':role/', ':instance-profile/') }}"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- check_mode: yes
-
- - name: "fact presented ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-instance-role"
- register: presented_instance_fact
-
- - name: "fact checkmode ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-instance-role-checkmode"
- register: checkmode_instance_fact
-
- - name: "Confirm whether the check mode is working normally."
- assert:
- that:
- - "{{ presented_instance_fact.instances | length }} > 0"
- - "{{ checkmode_instance_fact.instances | length }} == 0"
-
- - name: "Update instance with new instance_role"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-instance-role"
- image_id: "{{ ec2_ami_image }}"
- security_groups: "{{ sg.group_id }}"
- instance_type: "{{ ec2_instance_type }}"
- instance_role: "{{ iam_role_2.arn.replace(':role/', ':instance-profile/') }}"
- vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- register: instance_with_updated_role
-
- - name: "wait 10 seconds for role update to complete"
- wait_for:
- timeout: 10
- delegate_to: localhost
-
- - name: "fact checkmode ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-instance-role"
- register: updates_instance_info
-
- - assert:
- that:
- - 'updates_instance_info.instances[0].iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
- - 'updates_instance_info.instances[0].instance_id == instance_with_role.instances[0].instance_id'
-
- always:
- - name: "Terminate iam_instance_role instances"
- ec2_instance:
- state: absent
- filters:
- "tag:TestId": "{{ ec2_instance_tag_TestId }}"
- wait: yes
- ignore_errors: yes
-
- - name: "Delete IAM role for test"
- iam_role:
- state: absent
- name: "{{ item }}"
- assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
- create_instance_profile: yes
- managed_policy:
- - AmazonEC2ContainerServiceRole
- loop:
- - "ansible-test-sts-{{ resource_prefix }}-test-policy"
- - "ansible-test-sts-{{ resource_prefix }}-test-policy-2"
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/instance_no_wait.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/instance_no_wait.yml
deleted file mode 100644
index 418d7ef3e8..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/instance_no_wait.yml
+++ /dev/null
@@ -1,68 +0,0 @@
-- block:
- - name: "New instance and don't wait for it to complete"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-no-wait"
- image_id: "{{ ec2_ami_image }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- wait: false
- instance_type: "{{ ec2_instance_type }}"
- register: in_test_vpc
-
- - assert:
- that:
- - in_test_vpc is not failed
- - in_test_vpc is changed
- - in_test_vpc.instances is not defined
- - in_test_vpc.instance_ids is defined
- - in_test_vpc.instance_ids | length > 0
-
- - name: "New instance and don't wait for it to complete ( check mode )"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-no-wait-checkmode"
- image_id: "{{ ec2_ami_image }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- wait: false
- instance_type: "{{ ec2_instance_type }}"
- check_mode: yes
-
- - name: "Facts for ec2 test instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-no-wait"
- register: real_instance_fact
- until: real_instance_fact.instances | length > 0
- retries: 10
-
- - name: "Facts for checkmode ec2 test instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-no-wait-checkmode"
- register: checkmode_instance_fact
-
- - name: "Confirm whether the check mode is working normally."
- assert:
- that:
- - "{{ real_instance_fact.instances | length }} > 0"
- - "{{ checkmode_instance_fact.instances | length }} == 0"
-
- - name: "Terminate instances"
- ec2_instance:
- state: absent
- instance_ids: "{{ in_test_vpc.instance_ids }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
-
- always:
- - name: "Terminate instance_no_wait instances"
- ec2_instance:
- state: absent
- filters:
- "tag:TestId": "{{ ec2_instance_tag_TestId }}"
- wait: yes
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/main.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/main.yml
deleted file mode 100644
index e10aebcefe..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/main.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-# Beware: most of our tests here are run in parallel.
-# To add new tests you'll need to add a new host to the inventory and a matching
-# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
-#
-# Please make sure you tag your instances with
-# tags:
-# "tag:TestId": "{{ ec2_instance_tag_TestId }}"
-# And delete them based off that tag at the end of your specific set of tests
-#
-# ###############################################################################
-#
-# A Note about ec2 environment variable name preference:
-# - EC2_URL -> AWS_URL
-# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
-# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
-# - EC2_REGION -> AWS_REGION
-#
-
-- name: "Wrap up all tests and setup AWS credentials"
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- - debug:
- msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}"
- - include_tasks: '{{ inventory_hostname }}.yml'
- - debug:
- msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}"
-
- always:
- - set_fact:
- _role_complete: True
- - vars:
- completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") | list | select("defined") | list | length }}'
- hosts_in_play: '{{ ansible_play_hosts_all | length }}'
- debug:
- msg: "{{ completed_hosts }} of {{ hosts_in_play }} complete"
- - include_tasks: env_cleanup.yml
- vars:
- completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") | list | select("defined") | list | length }}'
- hosts_in_play: '{{ ansible_play_hosts_all | length }}'
- when:
- - aws_cleanup
- - completed_hosts == hosts_in_play
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/tags_and_vpc_settings.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/tags_and_vpc_settings.yml
deleted file mode 100644
index d38b53f76f..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/tags_and_vpc_settings.yml
+++ /dev/null
@@ -1,158 +0,0 @@
-- block:
- - name: "Make instance in the testing subnet created in the test VPC"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-basic-vpc-create"
- image_id: "{{ ec2_ami_image }}"
- user_data: |
- #cloud-config
- package_upgrade: true
- package_update: true
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- Something: else
- security_groups: "{{ sg.group_id }}"
- network:
- source_dest_check: false
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- instance_type: "{{ ec2_instance_type }}"
- wait: false
- register: in_test_vpc
-
- - name: "Make instance in the testing subnet created in the test VPC(check mode)"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-basic-vpc-create-checkmode"
- image_id: "{{ ec2_ami_image }}"
- user_data: |
- #cloud-config
- package_upgrade: true
- package_update: true
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- Something: else
- security_groups: "{{ sg.group_id }}"
- network:
- source_dest_check: false
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- instance_type: "{{ ec2_instance_type }}"
- check_mode: yes
-
- - name: "Try to re-make the instance, hopefully this shows changed=False"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-basic-vpc-create"
- image_id: "{{ ec2_ami_image }}"
- user_data: |
- #cloud-config
- package_upgrade: true
- package_update: true
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- Something: else
- security_groups: "{{ sg.group_id }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- instance_type: "{{ ec2_instance_type }}"
- register: remake_in_test_vpc
- - name: "Remaking the same instance resulted in no changes"
- assert:
- that: not remake_in_test_vpc.changed
- - name: "check that instance IDs match anyway"
- assert:
- that: 'remake_in_test_vpc.instance_ids[0] == in_test_vpc.instance_ids[0]'
- - name: "check that source_dest_check was set to false"
- assert:
- that: 'not remake_in_test_vpc.instances[0].source_dest_check'
-
- - name: "fact presented ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-basic-vpc-create"
- register: presented_instance_fact
-
- - name: "fact checkmode ec2 instance"
- ec2_instance_info:
- filters:
- "tag:Name": "{{ resource_prefix }}-test-basic-vpc-create-checkmode"
- register: checkmode_instance_fact
-
- - name: "Confirm whether the check mode is working normally."
- assert:
- that:
- - "{{ presented_instance_fact.instances | length }} > 0"
- - "{{ checkmode_instance_fact.instances | length }} == 0"
-
- - name: "Alter it by adding tags"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-basic-vpc-create"
- image_id: "{{ ec2_ami_image }}"
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- Another: thing
- security_groups: "{{ sg.group_id }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- instance_type: "{{ ec2_instance_type }}"
- register: add_another_tag
-
- - ec2_instance_info:
- instance_ids: "{{ add_another_tag.instance_ids }}"
- register: check_tags
- - name: "Remaking the same instance resulted in no changes"
- assert:
- that:
- - check_tags.instances[0].tags.Another == 'thing'
- - check_tags.instances[0].tags.Something == 'else'
-
- - name: "Purge a tag"
- ec2_instance:
- state: present
- name: "{{ resource_prefix }}-test-basic-vpc-create"
- image_id: "{{ ec2_ami_image }}"
- purge_tags: true
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- Another: thing
- security_groups: "{{ sg.group_id }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- instance_type: "{{ ec2_instance_type }}"
-
- - ec2_instance_info:
- instance_ids: "{{ add_another_tag.instance_ids }}"
- register: check_tags
-
- - name: "Remaking the same instance resulted in no changes"
- assert:
- that:
- - "'Something' not in check_tags.instances[0].tags"
-
- - name: "check that subnet-default public IP rule was followed"
- assert:
- that:
- - check_tags.instances[0].public_dns_name == ""
- - check_tags.instances[0].private_ip_address.startswith(subnet_b_startswith)
- - check_tags.instances[0].subnet_id == testing_subnet_b.subnet.id
- - name: "check that tags were applied"
- assert:
- that:
- - check_tags.instances[0].tags.Name.startswith(resource_prefix)
- - "'{{ check_tags.instances[0].state.name }}' in ['pending', 'running']"
-
- - name: "Terminate instance"
- ec2_instance:
- state: absent
- filters:
- "tag:TestId": "{{ ec2_instance_tag_TestId }}"
- wait: false
- register: result
- - assert:
- that: result.changed
-
- always:
- - name: "Terminate tags_and_vpc_settings instances"
- ec2_instance:
- state: absent
- filters:
- "tag:TestId": "{{ ec2_instance_tag_TestId }}"
- wait: yes
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/termination_protection.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/termination_protection.yml
deleted file mode 100644
index 418e3c398d..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/termination_protection.yml
+++ /dev/null
@@ -1,184 +0,0 @@
-- block:
-
- - name: Create instance with termination protection (check mode)
- ec2_instance:
- name: "{{ resource_prefix }}-termination-protection"
- image_id: "{{ ec2_ami_image }}"
- tags:
- TestId: "{{ resource_prefix }}"
- security_groups: "{{ sg.group_id }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- termination_protection: true
- instance_type: "{{ ec2_instance_type }}"
- state: running
- wait: yes
- check_mode: yes
- register: create_instance_check_mode_results
-
- - name: Check the returned value for the earlier task
- assert:
- that:
- - "{{ create_instance_check_mode_results.changed }}"
- - "{{ create_instance_check_mode_results.spec.DisableApiTermination }}"
-
- - name: Create instance with termination protection
- ec2_instance:
- name: "{{ resource_prefix }}-termination-protection"
- image_id: "{{ ec2_ami_image }}"
- tags:
- TestId: "{{ resource_prefix }}"
- security_groups: "{{ sg.group_id }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- termination_protection: true
- instance_type: "{{ ec2_instance_type }}"
- state: running
- wait: yes
- register: create_instance_results
-
- - name: Check return values of the create instance task
- assert:
- that:
- - "{{ create_instance_results.instances | length }} > 0"
- - "'{{ create_instance_results.instances.0.state.name }}' == 'running'"
- - "'{{ create_instance_results.spec.DisableApiTermination }}'"
-
- - name: Create instance with termination protection (check mode) (idempotent)
- ec2_instance:
- name: "{{ resource_prefix }}-termination-protection"
- image_id: "{{ ec2_ami_image }}"
- tags:
- TestId: "{{ resource_prefix }}"
- security_groups: "{{ sg.group_id }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- termination_protection: true
- instance_type: "{{ ec2_instance_type }}"
- state: running
- wait: yes
- check_mode: yes
- register: create_instance_check_mode_results
-
- - name: Check the returned value for the earlier task
- assert:
- that:
- - "{{ not create_instance_check_mode_results.changed }}"
-
- - name: Create instance with termination protection (idempotent)
- ec2_instance:
- name: "{{ resource_prefix }}-termination-protection"
- image_id: "{{ ec2_ami_image }}"
- tags:
- TestId: "{{ resource_prefix }}"
- security_groups: "{{ sg.group_id }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- termination_protection: true
- instance_type: "{{ ec2_instance_type }}"
- state: running
- wait: yes
- register: create_instance_results
-
- - name: Check return values of the create instance task
- assert:
- that:
- - "{{ not create_instance_results.changed }}"
- - "{{ create_instance_results.instances | length }} > 0"
-
- - name: Try to terminate the instance (expected to fail)
- ec2_instance:
- filters:
- tag:Name: "{{ resource_prefix }}-termination-protection"
- state: absent
- failed_when: "'Unable to terminate instances' not in terminate_instance_results.msg"
- register: terminate_instance_results
-
- # https://github.com/ansible/ansible/issues/67716
- # Updates to termination protection in check mode has a bug (listed above)
-
- - name: Set termination protection to false
- ec2_instance:
- name: "{{ resource_prefix }}-termination-protection"
- image_id: "{{ ec2_ami_image }}"
- termination_protection: false
- instance_type: "{{ ec2_instance_type }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- register: set_termination_protection_results
-
- - name: Check return value
- assert:
- that:
- - "{{ set_termination_protection_results.changed }}"
- - "{{ not set_termination_protection_results.changes[0].DisableApiTermination.Value }}"
-
- - name: Set termination protection to false (idempotent)
- ec2_instance:
- name: "{{ resource_prefix }}-termination-protection"
- image_id: "{{ ec2_ami_image }}"
- termination_protection: false
- instance_type: "{{ ec2_instance_type }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- register: set_termination_protection_results
-
- - name: Check return value
- assert:
- that:
- - "{{ not set_termination_protection_results.changed }}"
-
- - name: Set termination protection to true
- ec2_instance:
- name: "{{ resource_prefix }}-termination-protection"
- image_id: "{{ ec2_ami_image }}"
- termination_protection: true
- instance_type: "{{ ec2_instance_type }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- register: set_termination_protection_results
-
- - name: Check return value
- assert:
- that:
- - "{{ set_termination_protection_results.changed }}"
- - "{{ set_termination_protection_results.changes[0].DisableApiTermination.Value }}"
-
- - name: Set termination protection to true (idempotent)
- ec2_instance:
- name: "{{ resource_prefix }}-termination-protection"
- image_id: "{{ ec2_ami_image }}"
- termination_protection: true
- instance_type: "{{ ec2_instance_type }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- register: set_termination_protection_results
-
- - name: Check return value
- assert:
- that:
- - "{{ not set_termination_protection_results.changed }}"
-
- - name: Set termination protection to false (so we can terminate instance)
- ec2_instance:
- name: "{{ resource_prefix }}-termination-protection"
- image_id: "{{ ec2_ami_image }}"
- termination_protection: false
- instance_type: "{{ ec2_instance_type }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- register: set_termination_protection_results
-
- - name: Terminate the instance
- ec2_instance:
- filters:
- tag:TestId: "{{ resource_prefix }}"
- state: absent
-
- always:
-
- - name: Set termination protection to false (so we can terminate instance) (cleanup)
- ec2_instance:
- filters:
- tag:TestId: "{{ resource_prefix }}"
- termination_protection: false
- ignore_errors: yes
-
- - name: Terminate instance
- ec2_instance:
- filters:
- tag:TestId: "{{ resource_prefix }}"
- state: absent
- wait: false
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail.yml
deleted file mode 100644
index 67370ebe37..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-- block:
- - name: "create t3.nano with cpu options (fails gracefully)"
- ec2_instance:
- state: present
- name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-ec2"
- image_id: "{{ ec2_ami_image }}"
- instance_type: "t3.nano"
- cpu_options:
- core_count: 1
- threads_per_core: 1
- tags:
- TestId: "{{ ec2_instance_tag_TestId }}"
- register: ec2_instance_cpu_options_creation
- ignore_errors: yes
-
- - name: "check that graceful error message is returned when creation with cpu_options and old botocore"
- assert:
- that:
- - ec2_instance_cpu_options_creation.failed
- - 'ec2_instance_cpu_options_creation.msg == "cpu_options is only supported with botocore >= 1.10.16"'
-
- always:
- - name: "Terminate version_fail instances"
- ec2_instance:
- state: absent
- filters:
- "tag:TestId": "{{ ec2_instance_tag_TestId }}"
- wait: yes
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail_wrapper.yml b/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail_wrapper.yml
deleted file mode 100644
index ae5bd78500..0000000000
--- a/test/integration/targets/ec2_instance/roles/ec2_instance/tasks/version_fail_wrapper.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- include_role:
- name: 'setup_remote_tmp_dir'
-
-- set_fact:
- virtualenv: "{{ remote_tmp_dir }}/virtualenv"
- virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
-
-- set_fact:
- virtualenv_interpreter: "{{ virtualenv }}/bin/python"
-
-- pip:
- name: "virtualenv"
-
-- pip:
- name:
- - 'botocore<1.10.16'
- - boto3
- - coverage
- virtualenv: "{{ virtualenv }}"
- virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: no
-
-- include_tasks: version_fail.yml
- vars:
- ansible_python_interpreter: "{{ virtualenv_interpreter }}"
-
-- file:
- state: absent
- path: "{{ virtualenv }}"
diff --git a/test/integration/targets/ec2_instance/runme.sh b/test/integration/targets/ec2_instance/runme.sh
deleted file mode 100755
index aa324772bb..0000000000
--- a/test/integration/targets/ec2_instance/runme.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-#
-# Beware: most of our tests here are run in parallel.
-# To add new tests you'll need to add a new host to the inventory and a matching
-# '{{ inventory_hostname }}'.yml file in roles/ec2_instance/tasks/
-
-
-set -eux
-
-export ANSIBLE_ROLES_PATH=../
-
-ansible-playbook main.yml -i inventory "$@"
diff --git a/test/integration/targets/ec2_launch_template/aliases b/test/integration/targets/ec2_launch_template/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/ec2_launch_template/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/ec2_launch_template/meta/main.yml b/test/integration/targets/ec2_launch_template/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_launch_template/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_launch_template/playbooks/full_test.yml b/test/integration/targets/ec2_launch_template/playbooks/full_test.yml
deleted file mode 100644
index ae375ac17b..0000000000
--- a/test/integration/targets/ec2_launch_template/playbooks/full_test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-- hosts: localhost
- connection: local
- environment: "{{ ansible_test.environment }}"
- roles:
- - ec2_launch_template
diff --git a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/defaults/main.yml b/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/defaults/main.yml
deleted file mode 100644
index 9651b91642..0000000000
--- a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/defaults/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-resource_prefix: ansible-test-default-group
-ec2_ami_image:
- # https://wiki.centos.org/Cloud/AWS collected 2018-01-10
- ap-northeast-1: ami-571e3c30
- ap-northeast-2: ami-97cb19f9
- ap-south-1: ami-11f0837e
- ap-southeast-1: ami-30318f53
- ap-southeast-2: ami-24959b47
- ca-central-1: ami-daeb57be
- eu-central-1: ami-7cbc6e13
- eu-west-1: ami-0d063c6b
- eu-west-2: ami-c22236a6
- sa-east-1: ami-864f2dea
- us-east-1: ami-ae7bfdb8
- us-east-2: ami-9cbf9bf9
- us-west-1: ami-7c280d1c
- us-west-2: ami-0c2aba6c
diff --git a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/files/assume-role-policy.json b/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/files/assume-role-policy.json
deleted file mode 100644
index 72413abdd3..0000000000
--- a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/files/assume-role-policy.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "Version": "2008-10-17",
- "Statement": [
- {
- "Sid": "",
- "Effect": "Allow",
- "Principal": {
- "Service": "ec2.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/meta/main.yml b/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/cpu_options.yml b/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/cpu_options.yml
deleted file mode 100644
index 8d610a2ea7..0000000000
--- a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/cpu_options.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-- block:
- - name: delete a non-existent template
- ec2_launch_template:
- name: "{{ resource_prefix }}-not-a-real-template"
- state: absent
- register: del_fake_lt
- ignore_errors: true
- - assert:
- that:
- - del_fake_lt is not failed
- - name: create c4.large instance with cpu_options
- ec2_launch_template:
- name: "{{ resource_prefix }}-c4large-1-threads-per-core"
- image_id: "{{ ec2_ami_image[aws_region] }}"
- tags:
- TestId: "{{ resource_prefix }}"
- instance_type: c4.large
- cpu_options:
- core_count: 1
- threads_per_core: 1
- register: lt
-
- - name: instance with cpu_options created with the right options
- assert:
- that:
- - lt is success
- - lt is changed
- - "lt.latest_template.launch_template_data.cpu_options.core_count == 1"
- - "lt.latest_template.launch_template_data.cpu_options.threads_per_core == 1"
- always:
- - name: delete the template
- ec2_launch_template:
- name: "{{ resource_prefix }}-c4large-1-threads-per-core"
- state: absent
- register: del_lt
- retries: 10
- until: del_lt is not failed
- ignore_errors: true
diff --git a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/iam_instance_role.yml b/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/iam_instance_role.yml
deleted file mode 100644
index 5e9b7f563d..0000000000
--- a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/iam_instance_role.yml
+++ /dev/null
@@ -1,104 +0,0 @@
-- block:
- - name: Create IAM role for test
- iam_role:
- name: "{{ resource_prefix }}-test-policy"
- assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
- state: present
- create_instance_profile: yes
- managed_policy:
- - AmazonS3ReadOnlyAccess
- register: iam_role
-
- - name: Create second IAM role for test
- iam_role:
- name: "{{ resource_prefix }}-test-policy-2"
- assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
- state: present
- create_instance_profile: yes
- managed_policy:
- - AmazonS3ReadOnlyAccess
- register: iam_role_2
-
- - name: Make instance with an instance_role
- ec2_launch_template:
- name: "{{ resource_prefix }}-test-instance-role"
- image_id: "{{ ec2_ami_image[aws_region] }}"
- instance_type: t2.micro
- iam_instance_profile: "{{ resource_prefix }}-test-policy"
- register: template_with_role
-
- - assert:
- that:
- - 'template_with_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
-
- - name: Create template again, with no change to instance_role
- ec2_launch_template:
- name: "{{ resource_prefix }}-test-instance-role"
- image_id: "{{ ec2_ami_image[aws_region] }}"
- instance_type: t2.micro
- iam_instance_profile: "{{ resource_prefix }}-test-policy"
- register: template_with_role
-
- - assert:
- that:
- - 'template_with_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")'
- - 'template_with_role is not changed'
-
- - name: Update instance with new instance_role
- ec2_launch_template:
- name: "{{ resource_prefix }}-test-instance-role"
- image_id: "{{ ec2_ami_image[aws_region] }}"
- instance_type: t2.micro
- iam_instance_profile: "{{ resource_prefix }}-test-policy-2"
- register: template_with_updated_role
-
- - assert:
- that:
- - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
- - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
- - 'template_with_role.default_template.version_number < template_with_updated_role.default_template.version_number'
- - 'template_with_updated_role is changed'
- - 'template_with_updated_role is not failed'
-
- - name: Re-set with same new instance_role
- ec2_launch_template:
- name: "{{ resource_prefix }}-test-instance-role"
- image_id: "{{ ec2_ami_image[aws_region] }}"
- instance_type: t2.micro
- iam_instance_profile: "{{ resource_prefix }}-test-policy-2"
- register: template_with_updated_role
-
- - assert:
- that:
- - 'template_with_updated_role is not changed'
- - 'template_with_updated_role.default_template.launch_template_data.iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")'
-
- always:
- - name: delete launch template
- ec2_launch_template:
- name: "{{ resource_prefix }}-test-instance-role"
- state: absent
- register: lt_removed
- until: lt_removed is not failed
- ignore_errors: yes
- retries: 10
- - name: Delete IAM role for test
- iam_role:
- name: "{{ resource_prefix }}-test-policy"
- assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
- state: absent
- create_instance_profile: yes
- register: iam_removed
- until: iam_removed is not failed
- ignore_errors: yes
- retries: 10
- - name: Delete IAM role for test
- iam_role:
- name: "{{ resource_prefix }}-test-policy-2"
- assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}"
- state: absent
- create_instance_profile: yes
- register: iam_2_removed
- until: iam_2_removed is not failed
- ignore_errors: yes
- retries: 10
diff --git a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/main.yml b/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/main.yml
deleted file mode 100644
index 4976da276e..0000000000
--- a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-# A Note about ec2 environment variable name preference:
-# - EC2_URL -> AWS_URL
-# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
-# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
-# - EC2_REGION -> AWS_REGION
-#
-
-# - include: ../../../../../setup_ec2/tasks/common.yml module_name: ec2_instance
-
-- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- - include_tasks: cpu_options.yml
- - include_tasks: iam_instance_role.yml
- - include_tasks: versions.yml
-
- always:
- - debug:
- msg: teardown goes here
diff --git a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/tags_and_vpc_settings.yml b/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/tags_and_vpc_settings.yml
deleted file mode 100644
index 7da7f770af..0000000000
--- a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/tags_and_vpc_settings.yml
+++ /dev/null
@@ -1,208 +0,0 @@
-- block:
- # ============================================================
- # set up VPC
- - name: Create VPC for use in testing
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- cidr_block: 10.99.0.0/16
- tags:
- Name: Ansible ec2_instance Testing VPC
- tenancy: default
- register: testing_vpc
-
- - name: Create default subnet in zone A
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.99.0.0/24
- az: "{{ aws_region }}a"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet-a"
- register: testing_subnet_a
-
- - name: Create secondary subnet in zone B
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.99.1.0/24
- az: "{{ aws_region }}b"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet-b"
- register: testing_subnet_b
-
- - name: create a security group with the vpc
- ec2_group:
- name: "{{ resource_prefix }}-sg"
- description: a security group for ansible tests
- vpc_id: "{{ testing_vpc.vpc.id }}"
- rules:
- - proto: tcp
- ports: [22, 80]
- cidr_ip: 0.0.0.0/0
- register: sg
- # TODO: switch these tests from instances
- - assert:
- that:
- - 1 == 0
- # ============================================================
- # start subnet/sg testing
- - name: Make instance in the testing subnet created in the test VPC
- ec2_instance:
- name: "{{ resource_prefix }}-test-basic-vpc-create"
- image_id: "{{ ec2_ami_image[aws_region] }}"
- user_data: |
- #cloud-config
- package_upgrade: true
- package_update: true
- tags:
- TestId: "{{ resource_prefix }}"
- Something: else
- security_groups: "{{ sg.group_id }}"
- network:
- source_dest_check: false
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- instance_type: t2.micro
- volumes:
- - device_name: /dev/sda1
- ebs:
- delete_on_termination: true
- register: in_test_vpc
-
- - name: Try to re-make the instance, hopefully this shows changed=False
- ec2_instance:
- name: "{{ resource_prefix }}-test-basic-vpc-create"
- image_id: "{{ ec2_ami_image[aws_region] }}"
- user_data: |
- #cloud-config
- package_upgrade: true
- package_update: true
- tags:
- TestId: "{{ resource_prefix }}"
- Something: else
- security_groups: "{{ sg.group_id }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- instance_type: t2.micro
- register: remake_in_test_vpc
- - name: "Remaking the same instance resulted in no changes"
- assert:
- that: not remake_in_test_vpc.changed
- - name: check that instance IDs match anyway
- assert:
- that: 'remake_in_test_vpc.instance_ids[0] == in_test_vpc.instance_ids[0]'
- - name: check that source_dest_check was set to false
- assert:
- that: 'not remake_in_test_vpc.instances[0].source_dest_check'
-
- - name: Alter it by adding tags
- ec2_instance:
- name: "{{ resource_prefix }}-test-basic-vpc-create"
- image_id: "{{ ec2_ami_image[aws_region] }}"
- tags:
- TestId: "{{ resource_prefix }}"
- Another: thing
- security_groups: "{{ sg.group_id }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- instance_type: t2.micro
- register: add_another_tag
-
- - ec2_instance_info:
- instance_ids: "{{ add_another_tag.instance_ids }}"
- register: check_tags
- - name: "Remaking the same instance resulted in no changes"
- assert:
- that:
- - check_tags.instances[0].tags.Another == 'thing'
- - check_tags.instances[0].tags.Something == 'else'
-
- - name: Purge a tag
- ec2_instance:
- name: "{{ resource_prefix }}-test-basic-vpc-create"
- image_id: "{{ ec2_ami_image[aws_region] }}"
- purge_tags: true
- tags:
- TestId: "{{ resource_prefix }}"
- Another: thing
- security_groups: "{{ sg.group_id }}"
- vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}"
- instance_type: t2.micro
- - ec2_instance_info:
- instance_ids: "{{ add_another_tag.instance_ids }}"
- register: check_tags
- - name: "Remaking the same instance resulted in no changes"
- assert:
- that:
- - "'Something' not in check_tags.instances[0].tags"
-
- - name: Terminate instance
- ec2_instance:
- filters:
- tag:TestId: "{{ resource_prefix }}"
- state: absent
- register: result
- - assert:
- that: result.changed
-
- - name: Terminate instance
- ec2_instance:
- instance_ids: "{{ in_test_vpc.instance_ids }}"
- state: absent
- register: result
- - assert:
- that: not result.changed
-
- - name: check that subnet-default public IP rule was followed
- assert:
- that:
- - in_test_vpc.instances[0].public_dns_name == ""
- - in_test_vpc.instances[0].private_ip_address.startswith("10.22.33")
- - in_test_vpc.instances[0].subnet_id == testing_subnet_b.subnet.id
- - name: check that tags were applied
- assert:
- that:
- - in_test_vpc.instances[0].tags.Name.startswith(resource_prefix)
- - in_test_vpc.instances[0].state.name == 'running'
-
- always:
- - name: remove the security group
- ec2_group:
- name: "{{ resource_prefix }}-sg"
- description: a security group for ansible tests
- vpc_id: "{{ testing_vpc.vpc.id }}"
- state: absent
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- - name: remove subnet A
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.99.0.0/24
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- - name: remove subnet B
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.99.1.0/24
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- - name: remove the VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- cidr_block: 10.99.0.0/16
- state: absent
- tags:
- Name: Ansible Testing VPC
- tenancy: default
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
diff --git a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/versions.yml b/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/versions.yml
deleted file mode 100644
index 9035467a60..0000000000
--- a/test/integration/targets/ec2_launch_template/playbooks/roles/ec2_launch_template/tasks/versions.yml
+++ /dev/null
@@ -1,62 +0,0 @@
-- block:
- - name: create simple instance template
- ec2_launch_template:
- name: "{{ resource_prefix }}-simple"
- image_id: "{{ ec2_ami_image[aws_region] }}"
- tags:
- TestId: "{{ resource_prefix }}"
- instance_type: c4.large
- register: lt
-
- - name: instance with cpu_options created with the right options
- assert:
- that:
- - lt is success
- - lt is changed
- - lt.default_version == 1
- - lt.latest_version == 1
-
- - name: update simple instance template
- ec2_launch_template:
- name: "{{ resource_prefix }}-simple"
- default_version: 1
- image_id: "{{ ec2_ami_image[aws_region] }}"
- tags:
- TestId: "{{ resource_prefix }}"
- instance_type: m5.large
- register: lt
-
- - name: instance with cpu_options created with the right options
- assert:
- that:
- - lt is success
- - lt is changed
- - lt.default_version == 1
- - lt.latest_version == 2
-
- - name: update simple instance template
- ec2_launch_template:
- name: "{{ resource_prefix }}-simple"
- image_id: "{{ ec2_ami_image[aws_region] }}"
- tags:
- TestId: "{{ resource_prefix }}"
- instance_type: t3.medium
- register: lt
-
- - name: instance with cpu_options created with the right options
- assert:
- that:
- - lt is success
- - lt is changed
- - lt.default_version == 3
- - lt.latest_version == 3
-
- always:
- - name: delete the template
- ec2_launch_template:
- name: "{{ resource_prefix }}-simple"
- state: absent
- register: del_lt
- retries: 10
- until: del_lt is not failed
- ignore_errors: true
diff --git a/test/integration/targets/ec2_launch_template/playbooks/version_fail.yml b/test/integration/targets/ec2_launch_template/playbooks/version_fail.yml
deleted file mode 100644
index 02b87f4a29..0000000000
--- a/test/integration/targets/ec2_launch_template/playbooks/version_fail.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-- hosts: localhost
- connection: local
- environment: "{{ ansible_test.environment }}"
- vars:
- resource_prefix: 'ansible-testing'
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- tasks:
- - block:
- - name: Include vars file in roles/ec2_instance/defaults/main.yml
- include_vars:
- file: 'roles/ec2_launch_template/defaults/main.yml'
-
- - name: create c4.large template (failure expected)
- ec2_launch_template:
- state: present
- name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-tpl"
- instance_type: c4.large
- register: ec2_lt
- ignore_errors: yes
-
- - name: check that graceful error message is returned when creation with cpu_options and old botocore
- assert:
- that:
- - ec2_lt is failed
- - 'ec2_lt.msg == "ec2_launch_template requires boto3 >= 1.6.0"'
- always:
- - name: delete the c4.large template just in case it was created
- ec2_launch_template:
- state: absent
- name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-tpl"
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_launch_template/runme.sh b/test/integration/targets/ec2_launch_template/runme.sh
deleted file mode 100755
index 6247904467..0000000000
--- a/test/integration/targets/ec2_launch_template/runme.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-
-export ANSIBLE_ROLES_PATH=../
-
-# Test graceful failure for older versions of botocore
-source virtualenv.sh
-pip install 'boto3<1.6.0'
-ansible-playbook -i ../../inventory -v playbooks/version_fail.yml "$@"
-
-# Run full test suite
-source virtualenv.sh
-pip install 'boto3>1.6.0'
-ansible-playbook -i ../../inventory -v playbooks/full_test.yml "$@"
diff --git a/test/integration/targets/ec2_metric_alarm/aliases b/test/integration/targets/ec2_metric_alarm/aliases
deleted file mode 100644
index 72a9fb4f57..0000000000
--- a/test/integration/targets/ec2_metric_alarm/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group4
diff --git a/test/integration/targets/ec2_metric_alarm/defaults/main.yml b/test/integration/targets/ec2_metric_alarm/defaults/main.yml
deleted file mode 100644
index 4d80b5d6e0..0000000000
--- a/test/integration/targets/ec2_metric_alarm/defaults/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# defaults file for ec2_instance
-ec2_instance_name: '{{ resource_prefix }}-node'
-ec2_instance_owner: 'integration-run-{{ resource_prefix }}'
-ec2_ami_name: "amzn-ami-hvm*"
-alarm_prefix: "ansible-test"
diff --git a/test/integration/targets/ec2_metric_alarm/meta/main.yml b/test/integration/targets/ec2_metric_alarm/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_metric_alarm/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_metric_alarm/tasks/env_cleanup.yml b/test/integration/targets/ec2_metric_alarm/tasks/env_cleanup.yml
deleted file mode 100644
index e90ddc6450..0000000000
--- a/test/integration/targets/ec2_metric_alarm/tasks/env_cleanup.yml
+++ /dev/null
@@ -1,94 +0,0 @@
-- name: remove any instances in the test VPC
- ec2_instance:
- filters:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- state: absent
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
-- name: remove ENIs
- ec2_eni_info:
- filters:
- vpc-id: "{{ testing_vpc.vpc.id }}"
- register: enis
-
-- name: delete all ENIs
- ec2_eni:
- eni_id: "{{ item.id }}"
- state: absent
- until: removed is not failed
- with_items: "{{ enis.network_interfaces }}"
- ignore_errors: yes
- retries: 10
-
-- name: remove the security group
- ec2_group:
- name: "{{ resource_prefix }}-sg"
- description: a security group for ansible tests
- vpc_id: "{{ testing_vpc.vpc.id }}"
- state: absent
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
-- name: remove routing rules
- ec2_vpc_route_table:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- tags:
- created: "{{ resource_prefix }}-route"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- subnets:
- - "{{ testing_subnet_a.subnet.id }}"
- - "{{ testing_subnet_b.subnet.id }}"
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
-- name: remove internet gateway
- ec2_vpc_igw:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- state: absent
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
-- name: remove subnet A
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.22.32.0/24
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
-- name: remove subnet B
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.22.33.0/24
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
-- name: remove the VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- cidr_block: 10.22.32.0/23
- state: absent
- tags:
- Name: Ansible Testing VPC
- tenancy: default
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
diff --git a/test/integration/targets/ec2_metric_alarm/tasks/env_setup.yml b/test/integration/targets/ec2_metric_alarm/tasks/env_setup.yml
deleted file mode 100644
index 80b49dbcf7..0000000000
--- a/test/integration/targets/ec2_metric_alarm/tasks/env_setup.yml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: Create VPC for use in testing
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- cidr_block: 10.22.32.0/23
- tags:
- Name: Ansible ec2_instance Testing VPC
- tenancy: default
- register: testing_vpc
-
-- name: Create internet gateway for use in testing
- ec2_vpc_igw:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- state: present
- register: igw
-
-- name: Create default subnet in zone A
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.22.32.0/24
- az: "{{ aws_region }}a"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet-a"
- register: testing_subnet_a
-
-- name: Create secondary subnet in zone B
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.22.33.0/24
- az: "{{ aws_region }}b"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet-b"
- register: testing_subnet_b
-
-- name: create routing rules
- ec2_vpc_route_table:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- tags:
- created: "{{ resource_prefix }}-route"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- subnets:
- - "{{ testing_subnet_a.subnet.id }}"
- - "{{ testing_subnet_b.subnet.id }}"
-
-- name: create a security group with the vpc
- ec2_group:
- name: "{{ resource_prefix }}-sg"
- description: a security group for ansible tests
- vpc_id: "{{ testing_vpc.vpc.id }}"
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- register: sg
diff --git a/test/integration/targets/ec2_metric_alarm/tasks/main.yml b/test/integration/targets/ec2_metric_alarm/tasks/main.yml
deleted file mode 100644
index f3f645cb2a..0000000000
--- a/test/integration/targets/ec2_metric_alarm/tasks/main.yml
+++ /dev/null
@@ -1,228 +0,0 @@
-- name: run ec2_metric_alarm tests
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- - set_fact:
- alarm_full_name: "{{ alarm_prefix }}-{{ resource_prefix }}-cpu-low"
-
- # until there's a module to get info about alarms, awscli is needed
- - name: install awscli
- pip:
- state: present
- name: awscli
-
- - name: set up environment for testing.
- include_tasks: env_setup.yml
-
- - name: get info on alarms
- command: aws cloudwatch describe-alarms --alarm-names {{ alarm_full_name }}
- environment:
- AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
- AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
- AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
- AWS_DEFAULT_REGION: "{{ aws_region }}"
- register: alarm_info_query
-
- - name: Find AMI to use
- ec2_ami_info:
- owners: 'amazon'
- filters:
- name: '{{ ec2_ami_name }}'
- register: ec2_amis
- - set_fact:
- ec2_ami_image: '{{ ec2_amis.images[0].image_id }}'
-
- - name: Make instance in a default subnet of the VPC
- ec2_instance:
- name: "{{ resource_prefix }}-test-default-vpc"
- image_id: "{{ec2_ami_image }}"
- tags:
- TestId: "{{ resource_prefix }}"
- security_groups: "{{ sg.group_id }}"
- instance_type: t2.micro
- wait: true
- register: ec2_instance_results
-
- - name: create ec2 metric alarm on ec2 instance
- ec2_metric_alarm:
- dimensions:
- InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}"
- state: present
- name: "{{ alarm_full_name }}"
- metric: "CPUUtilization"
- namespace: "AWS/EC2"
- treat_missing_data: missing
- statistic: Average
- comparison: "<="
- threshold: 5.0
- period: 300
- evaluation_periods: 3
- unit: "Percent"
- description: "This will alarm when an instance's cpu usage average is lower than 5% for 15 minutes "
- register: ec2_instance_metric_alarm
-
- - name: get info on alarms
- command: aws cloudwatch describe-alarms --alarm-names {{ alarm_full_name }}
- environment:
- AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
- AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
- AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
- AWS_DEFAULT_REGION: "{{ aws_region }}"
- register: alarm_info_query
-
- - name: convert it to an object
- set_fact:
- alarm_info: "{{ alarm_info_query.stdout |from_json }}"
-
- - name: "verify that an alarm was created"
- assert:
- that:
- - 'ec2_instance_metric_alarm.changed'
- - 'ec2_instance_metric_alarm.alarm_arn'
- - 'ec2_instance_metric_alarm.statistic == alarm_info["MetricAlarms"][0].Statistic'
- - 'ec2_instance_metric_alarm.name == alarm_info["MetricAlarms"][0].AlarmName'
- - 'ec2_instance_metric_alarm.metric== alarm_info["MetricAlarms"][0].MetricName'
- - 'ec2_instance_metric_alarm.namespace == alarm_info["MetricAlarms"][0].Namespace'
- - 'ec2_instance_metric_alarm.comparison == alarm_info["MetricAlarms"][0].ComparisonOperator'
- - 'ec2_instance_metric_alarm.comparison == alarm_info["MetricAlarms"][0].ComparisonOperator'
- - 'ec2_instance_metric_alarm.threshold == alarm_info["MetricAlarms"][0].Threshold'
- - 'ec2_instance_metric_alarm.period == alarm_info["MetricAlarms"][0].Period'
- - 'ec2_instance_metric_alarm.unit == alarm_info["MetricAlarms"][0].Unit'
- - 'ec2_instance_metric_alarm.evaluation_periods == alarm_info["MetricAlarms"][0].EvaluationPeriods'
- - 'ec2_instance_metric_alarm.description == alarm_info["MetricAlarms"][0].AlarmDescription'
- - 'ec2_instance_metric_alarm.treat_missing_data == alarm_info["MetricAlarms"][0].TreatMissingData'
-
- - name: create ec2 metric alarm on ec2 instance (idempotent)
- ec2_metric_alarm:
- dimensions:
- InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}"
- state: present
- name: "{{ alarm_full_name }}"
- metric: "CPUUtilization"
- namespace: "AWS/EC2"
- treat_missing_data: missing
- statistic: Average
- comparison: "<="
- threshold: 5.0
- period: 300
- evaluation_periods: 3
- unit: "Percent"
- description: "This will alarm when an instance's cpu usage average is lower than 5% for 15 minutes "
- register: ec2_instance_metric_alarm_idempotent
-
- - name: get info on alarms
- command: aws cloudwatch describe-alarms --alarm-names {{ alarm_full_name }}
- environment:
- AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
- AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
- AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
- AWS_DEFAULT_REGION: "{{ aws_region }}"
- register: alarm_info_query_idempotent
-
- - name: convert it to an object
- set_fact:
- alarm_info_idempotent: "{{ alarm_info_query_idempotent.stdout |from_json }}"
-
- - name: "Verify alarm does not register as changed after update"
- assert:
- that:
- - not ec2_instance_metric_alarm_idempotent.changed
-
- - name: "Verify alarm did not change after updating"
- assert:
- that:
- - "alarm_info['MetricAlarms'][0]['{{item}}'] == alarm_info_idempotent['MetricAlarms'][0]['{{ item }}']"
- with_items:
- - AlarmArn
- - Statistic
- - AlarmName
- - MetricName
- - Namespace
- - ComparisonOperator
- - Threshold
- - Period
- - Unit
- - EvaluationPeriods
- - AlarmDescription
- - TreatMissingData
-
- - name: update alarm
- ec2_metric_alarm:
- dimensions:
- InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}"
- state: present
- name: "{{ alarm_full_name }}"
- metric: "CPUUtilization"
- namespace: "AWS/EC2"
- statistic: Average
- comparison: "<="
- threshold: 5.0
- period: 60
- evaluation_periods: 3
- unit: "Percent"
- description: "This will alarm when an instance's cpu usage average is lower than 5% for 3 minutes "
- register: ec2_instance_metric_alarm_update
-
- - name: "verify that alarm registers as updated"
- assert:
- that:
- - 'ec2_instance_metric_alarm.changed'
-
- - name: "verify that properties were changed"
- assert:
- that:
- - 'ec2_instance_metric_alarm_update.changed'
- - 'ec2_instance_metric_alarm_update.period == 60' #Period should be 60, not matching old value
- - 'ec2_instance_metric_alarm_update.alarm_arn == ec2_instance_metric_alarm.alarm_arn'
- - 'ec2_instance_metric_alarm_update.statistic == alarm_info["MetricAlarms"][0].Statistic'
- - 'ec2_instance_metric_alarm_update.name == alarm_info["MetricAlarms"][0].AlarmName'
- - 'ec2_instance_metric_alarm_update.metric== alarm_info["MetricAlarms"][0].MetricName'
- - 'ec2_instance_metric_alarm_update.namespace == alarm_info["MetricAlarms"][0].Namespace'
- - 'ec2_instance_metric_alarm_update.statistic == alarm_info["MetricAlarms"][0].Statistic'
- - 'ec2_instance_metric_alarm_update.comparison == alarm_info["MetricAlarms"][0].ComparisonOperator'
- - 'ec2_instance_metric_alarm_update.threshold == alarm_info["MetricAlarms"][0].Threshold'
- - 'ec2_instance_metric_alarm_update.unit == alarm_info["MetricAlarms"][0].Unit'
- - 'ec2_instance_metric_alarm_update.evaluation_periods == alarm_info["MetricAlarms"][0].EvaluationPeriods'
- - 'ec2_instance_metric_alarm_update.treat_missing_data == alarm_info["MetricAlarms"][0].TreatMissingData'
-
- - name: try to remove the alarm
- ec2_metric_alarm:
- state: absent
- name: "{{ alarm_full_name }}"
-
- register: ec2_instance_metric_alarm_deletion
-
- - name: Verify that the alarm reports deleted/changed
- assert:
- that:
- - 'ec2_instance_metric_alarm_deletion.changed'
-
- - name: get info on alarms
- command: aws cloudwatch describe-alarms --alarm-names {{ alarm_full_name }}
- environment:
- AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
- AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
- AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
- AWS_DEFAULT_REGION: "{{ aws_region }}"
- register: alarm_info_query
-
- - name: convert it to an object
- set_fact:
- alarm_info: "{{ alarm_info_query.stdout |from_json }}"
-
- - name: Verify that the alarm was deleted using cli
- assert:
- that:
- - 'alarm_info["MetricAlarms"]|length == 0'
- always:
- - name: try to stop the ec2 instance
- ec2_instance:
- instance_ids: "{{ ec2_instance_results.instances[0].instance_id }}"
- state: terminated
- ignore_errors: yes
-
- - include_tasks: env_cleanup.yml
diff --git a/test/integration/targets/ec2_metric_alarm/vars/main.yml b/test/integration/targets/ec2_metric_alarm/vars/main.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/test/integration/targets/ec2_metric_alarm/vars/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/test/integration/targets/ec2_transit_gateway/aliases b/test/integration/targets/ec2_transit_gateway/aliases
deleted file mode 100644
index ce6c077119..0000000000
--- a/test/integration/targets/ec2_transit_gateway/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-shippable/aws/group2
-ec2_transit_gateway_info
diff --git a/test/integration/targets/ec2_transit_gateway/tasks/main.yml b/test/integration/targets/ec2_transit_gateway/tasks/main.yml
deleted file mode 100644
index b70db39302..0000000000
--- a/test/integration/targets/ec2_transit_gateway/tasks/main.yml
+++ /dev/null
@@ -1,175 +0,0 @@
----
-# tasks file for test_ec2_transit_gateway
-
-- name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
-- name: generate unique value for testing
- set_fact:
- tgw_description: "{{ resource_prefix }}-tgw"
-
-- block:
- - name: test create transit gateway without permissions
- ec2_transit_gateway:
- description: "{{ tgw_description }}"
- region: "{{ aws_region }}"
- register: result
- ignore_errors: yes
-
- - name: assert nice message returned
- assert:
- that:
- - result is failed
- - "result.msg != 'MODULE FAILURE'"
-
- - name: test create transit gateway without region
- ec2_transit_gateway:
- description: "{{ tgw_description }}"
- register: result
- ignore_errors: yes
-
- - name: assert failure when called with minimal parameters but no region
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("The ec2_transit_gateway module requires a region")'
-
- - name: test create transit gateway without tags
- ec2_transit_gateway:
- description: "{{ tgw_description }}"
- <<: *aws_connection_info
- register: create_result
- - name: assert changed is True
- assert:
- that:
- - create_result.changed == True
-
- - name: test update transit gateway with tags by description
- ec2_transit_gateway:
- description: "{{ tgw_description }}"
- tags:
- Name: Ansible Test TGW
- <<: *aws_connection_info
- register: result
- - name: assert changed is True
- assert:
- that:
- - result.changed == True
- - result.transit_gateway.tags | length == 1
- - "'Name' in result.transit_gateway.tags"
-
- - name: test update transit gateway with new tag and purge_tags false
- ec2_transit_gateway:
- transit_gateway_id: '{{ create_result.transit_gateway.transit_gateway_id }}'
- purge_tags: False
- tags:
- status: ok to delete
- <<: *aws_connection_info
- register: result
- - name: assert changed is True and have 2 tags
- assert:
- that:
- - result.changed == True
- - result.transit_gateway.tags | length == 2
- - "'Name' in result.transit_gateway.tags"
-
- - name: test update transit gateway with purge_tags true
- ec2_transit_gateway:
- transit_gateway_id: '{{ create_result.transit_gateway.transit_gateway_id }}'
- purge_tags: True
- tags:
- status: ok to delete
- <<: *aws_connection_info
- register: result
- - name: assert changed is True and TGW tag is absent
- assert:
- that:
- - result.changed == True
- - result.transit_gateway.tags | length == 1
- - "'Name' not in result.transit_gateway.tags"
-
- - name: test idempotence
- ec2_transit_gateway:
- description: "{{ tgw_description }}"
- purge_tags: True
- tags:
- status: ok to delete
- <<: *aws_connection_info
- register: result
- - name: assert changed is False
- assert:
- that:
- - result.changed == False
-
- # ==== Combine ec2_transit_gateway_info ======================
- - name: test success with no parameters
- ec2_transit_gateway_info:
- <<: *aws_connection_info
- register: result
- - name: assert success with no parameters
- assert:
- that:
- - 'result.changed == false'
- - 'result.transit_gateways != []'
-
- - name: test success with single filter
- ec2_transit_gateway_info:
- filters:
- transit-gateway-id: "{{ create_result.transit_gateway.transit_gateway_id }}"
- <<: *aws_connection_info
- register: result
- - name: assert success with transit_gateway_id filter
- assert:
- that:
- - 'result.changed == false'
- - 'result.transit_gateways != []'
-
- - name: test empty result set for non-existent tgw id via filter
- ec2_transit_gateway_info:
- filters:
- transit-gateway-id: tgw-00000011111111122
- <<: *aws_connection_info
- register: result
- - name: assert success with transit_gateway_id filter
- assert:
- that:
- - 'result.changed == false'
- - 'result.transit_gateways == []'
-
- - name: test NotFound exception caught and returned empty result set
- ec2_transit_gateway_info:
- transit_gateway_id: tgw-00000011111111122
- <<: *aws_connection_info
- register: result
- - name: assert success with transit_gateway_id filter
- assert:
- that:
- - 'result.changed == false'
- - 'result.transit_gateways == []'
-
- - name: test success with multiple filters
- ec2_transit_gateway_info:
- filters:
- options.dns-support: enable
- options.vpn-ecmp-support: enable
- <<: *aws_connection_info
- register: result
- - name: assert success with transit_gateway_id filter
- assert:
- that:
- - 'result.changed == false'
- - 'result.transit_gateways != []'
- always:
- ###### TEARDOWN STARTS HERE ######
- - name: delete transit gateway
- ec2_transit_gateway:
- description: "{{ tgw_description }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_vpc_egress_igw/aliases b/test/integration/targets/ec2_vpc_egress_igw/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/ec2_vpc_egress_igw/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/ec2_vpc_egress_igw/tasks/main.yml b/test/integration/targets/ec2_vpc_egress_igw/tasks/main.yml
deleted file mode 100644
index ff47baf331..0000000000
--- a/test/integration/targets/ec2_vpc_egress_igw/tasks/main.yml
+++ /dev/null
@@ -1,112 +0,0 @@
----
-- block:
-
- # ============================================================
- - name: test failure with no parameters
- ec2_vpc_egress_igw:
- register: result
- ignore_errors: true
-
- - name: assert failure with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "missing required arguments: vpc_id"'
-
- # ============================================================
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- # ============================================================
- - name: test failure with non-existent VPC ID
- ec2_vpc_egress_igw:
- state: present
- vpc_id: vpc-012345678
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: assert failure with non-existent VPC ID
- assert:
- that:
- - 'result.failed'
- - 'result.error.code == "InvalidVpcID.NotFound"'
- - '"invalid vpc ID" in result.msg'
-
- # ============================================================
- - name: create a VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: present
- cidr_block: "10.232.232.128/26"
- <<: *aws_connection_info
- tags:
- Name: "{{ resource_prefix }}-vpc"
- Description: "Created by ansible-test"
- register: vpc_result
-
- # ============================================================
- - name: create egress-only internet gateway (expected changed=true)
- ec2_vpc_egress_igw:
- state: present
- vpc_id: "{{ vpc_result.vpc.id }}"
- <<: *aws_connection_info
- register: vpc_eigw_create
-
- - name: assert creation happened (expected changed=true)
- assert:
- that:
- - 'vpc_eigw_create'
- - 'vpc_eigw_create.gateway_id.startswith("eigw-")'
- - 'vpc_eigw_create.vpc_id == vpc_result.vpc.id'
-
- # ============================================================
- - name: attempt to recreate egress-only internet gateway on VPC (expected changed=false)
- ec2_vpc_egress_igw:
- state: present
- vpc_id: "{{ vpc_result.vpc.id }}"
- <<: *aws_connection_info
- register: vpc_eigw_recreate
-
- - name: assert recreation did nothing (expected changed=false)
- assert:
- that:
- - 'vpc_eigw_recreate.changed == False'
- - 'vpc_eigw_recreate.gateway_id == vpc_eigw_create.gateway_id'
- - 'vpc_eigw_recreate.vpc_id == vpc_eigw_create.vpc_id'
-
- # ============================================================
- - name: test state=absent (expected changed=true)
- ec2_vpc_egress_igw:
- state: absent
- vpc_id: "{{ vpc_result.vpc.id }}"
- <<: *aws_connection_info
- register: vpc_eigw_delete
-
- - name: assert state=absent (expected changed=true)
- assert:
- that:
- - 'vpc_eigw_delete.changed'
-
- always:
- # ============================================================
- - name: tidy up EIGW
- ec2_vpc_egress_igw:
- state: absent
- vpc_id: "{{ vpc_result.vpc.id }}"
- <<: *aws_connection_info
- ignore_errors: true
-
- - name: tidy up VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: absent
- cidr_block: "10.232.232.128/26"
- <<: *aws_connection_info
- ignore_errors: true
diff --git a/test/integration/targets/ec2_vpc_igw/aliases b/test/integration/targets/ec2_vpc_igw/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/ec2_vpc_igw/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/ec2_vpc_igw/tasks/main.yml b/test/integration/targets/ec2_vpc_igw/tasks/main.yml
deleted file mode 100644
index 4802a88af5..0000000000
--- a/test/integration/targets/ec2_vpc_igw/tasks/main.yml
+++ /dev/null
@@ -1,84 +0,0 @@
----
-- block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- # ============================================================
- - name: create a VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: present
- cidr_block: "10.232.232.128/26"
- <<: *aws_connection_info
- tags:
- Name: "{{ resource_prefix }}-vpc"
- Description: "Created by ansible-test"
- register: vpc_result
-
- # ============================================================
- - name: create internet gateway (expected changed=true)
- ec2_vpc_igw:
- state: present
- vpc_id: "{{ vpc_result.vpc.id }}"
- <<: *aws_connection_info
- register: vpc_igw_create
-
- - name: assert creation happened (expected changed=true)
- assert:
- that:
- - 'vpc_igw_create'
- - 'vpc_igw_create.gateway_id.startswith("igw-")'
- - 'vpc_igw_create.vpc_id == vpc_result.vpc.id'
- - '"tags" in vpc_igw_create'
- - '"gateway_id" in vpc_igw_create'
-
- # ============================================================
- - name: attempt to recreate internet gateway on VPC (expected changed=false)
- ec2_vpc_igw:
- state: present
- vpc_id: "{{ vpc_result.vpc.id }}"
- <<: *aws_connection_info
- register: vpc_igw_recreate
-
- - name: assert recreation did nothing (expected changed=false)
- assert:
- that:
- - 'vpc_igw_recreate.changed == False'
- - 'vpc_igw_recreate.gateway_id == vpc_igw_create.gateway_id'
- - 'vpc_igw_recreate.vpc_id == vpc_igw_create.vpc_id'
-
- # ============================================================
- - name: test state=absent (expected changed=true)
- ec2_vpc_igw:
- state: absent
- vpc_id: "{{ vpc_result.vpc.id }}"
- <<: *aws_connection_info
- register: vpc_igw_delete
-
- - name: assert state=absent (expected changed=true)
- assert:
- that:
- - 'vpc_igw_delete.changed'
-
- always:
- # ============================================================
- - name: tidy up IGW
- ec2_vpc_igw:
- state: absent
- vpc_id: "{{ vpc_result.vpc.id }}"
- <<: *aws_connection_info
- ignore_errors: true
-
- - name: tidy up VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: absent
- cidr_block: "10.232.232.128/26"
- <<: *aws_connection_info
- ignore_errors: true
diff --git a/test/integration/targets/ec2_vpc_nacl/aliases b/test/integration/targets/ec2_vpc_nacl/aliases
deleted file mode 100644
index 074f2ab60c..0000000000
--- a/test/integration/targets/ec2_vpc_nacl/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-ec2_vpc_nacl_info
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/ec2_vpc_nacl/meta/main.yml b/test/integration/targets/ec2_vpc_nacl/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_vpc_nacl/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_vpc_nacl/tasks/ingress_and_egress.yml b/test/integration/targets/ec2_vpc_nacl/tasks/ingress_and_egress.yml
deleted file mode 100644
index 4eb6079129..0000000000
--- a/test/integration/targets/ec2_vpc_nacl/tasks/ingress_and_egress.yml
+++ /dev/null
@@ -1,162 +0,0 @@
-# ============================================================
-
-- name: create ingress and egress rules using subnet IDs
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets: "{{ subnet_ids }}"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress:
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- state: 'present'
- register: nacl
-
-- name: assert the network acl was created
- assert:
- that:
- - nacl.changed
- - nacl.nacl_id.startswith('acl-')
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl.nacl_id }}"
- register: nacl_facts
-
-- name: assert the nacl has the correct attributes
- assert:
- that:
- - nacl_facts.nacls | length == 1
- - nacl_facts.nacls[0].ingress | length == 3
- - nacl_facts.nacls[0].egress | length == 1
-
-# ============================================================
-
-- name: remove an ingress rule
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets: "{{ subnet_ids }}"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress:
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- state: 'present'
- register: nacl
-
-- name: assert the network acl changed
- assert:
- that:
- - nacl.changed
- - nacl.nacl_id.startswith('acl-')
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl.nacl_id }}"
- register: nacl_facts
-
-- name: assert the nacl has the correct attributes
- assert:
- that:
- - nacl_facts.nacls | length == 1
- - nacl_facts.nacls[0].ingress | length == 2
- - nacl_facts.nacls[0].egress | length == 1
-
-# ============================================================
-
-- name: remove the egress rule
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets: "{{ subnet_ids }}"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress:
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- egress: []
- state: 'present'
- register: nacl
-
-- name: assert the network acl changed
- assert:
- that:
- - nacl.changed
- - nacl.nacl_id.startswith('acl-')
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl.nacl_id }}"
- register: nacl_facts
-
-- name: assert the nacl has the correct attributes
- assert:
- that:
- - nacl_facts.nacls | length == 1
- - nacl_facts.nacls[0].ingress | length == 2
- - nacl_facts.nacls[0].egress | length == 0
-
-# ============================================================
-
-- name: add egress rules
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets: "{{ subnet_ids }}"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress:
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- egress:
- - [100, 'tcp', 'allow', '10.0.0.0/24', null, null, 22, 22]
- - [200, 'udp', 'allow', '10.0.0.0/24', null, null, 22, 22]
- state: 'present'
- register: nacl
-
-- name: assert the network acl changed
- assert:
- that:
- - nacl.changed
- - nacl.nacl_id.startswith('acl-')
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl.nacl_id }}"
- register: nacl_facts
-
-- name: assert the nacl has the correct attributes
- assert:
- that:
- - nacl_facts.nacls | length == 1
- - nacl_facts.nacls[0].ingress | length == 2
- - nacl_facts.nacls[0].egress | length == 2
-
-# ============================================================
-
-- name: remove the network ACL
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- state: absent
- register: nacl
- until: nacl is success
- ignore_errors: yes
- retries: 5
- delay: 5
-
-- name: assert nacl was removed
- assert:
- that:
- - nacl.changed
diff --git a/test/integration/targets/ec2_vpc_nacl/tasks/ipv6.yml b/test/integration/targets/ec2_vpc_nacl/tasks/ipv6.yml
deleted file mode 100644
index 16b3a5aaaf..0000000000
--- a/test/integration/targets/ec2_vpc_nacl/tasks/ipv6.yml
+++ /dev/null
@@ -1,178 +0,0 @@
-- block:
- - name: create a VPC
- ec2_vpc_net:
- cidr_block: 10.230.231.0/24
- name: "{{ resource_prefix }}-ipv6"
- state: present
- ipv6_cidr: yes
- register: vpc_result
-
- - set_fact:
- vpc_ipv6_cidr: "{{ vpc_result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block }}"
-
- # ============================================================
- - name: create subnet with IPv6 (expected changed=true)
- ec2_vpc_subnet:
- cidr: 10.230.231.0/26
- vpc_id: "{{ vpc_result.vpc.id }}"
- ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}"
- state: present
- tags:
- Name: "{{ resource_prefix }}-ipv6-subnet-1"
- register: vpc_subnet_ipv6
-
- - name: assert creation with IPv6 happened (expected changed=true)
- assert:
- that:
- - "vpc_subnet_ipv6.subnet.ipv6_cidr_block == '{{ vpc_ipv6_cidr | regex_replace('::/56', '::/64') }}'"
-
- # ============================================================
-
- - name: create ingress and egress rules using subnet names
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_result.vpc.id }}"
- name: "{{ resource_prefix }}-acl"
- subnets:
- - "{{ resource_prefix }}-ipv6-subnet-1"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress:
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- state: 'present'
- register: nacl
- - assert:
- that:
- - nacl.nacl_id
-
- - set_fact:
- nacl_id: "{{ nacl.nacl_id }}"
-
- - name: add ipv6 entries
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_result.vpc.id }}"
- name: "{{ resource_prefix }}-acl"
- subnets:
- - "{{ resource_prefix }}-ipv6-subnet-1"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress:
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- - [205, 'ipv6-tcp', 'allow', '::/0', null, null, 80, 80]
- - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- - [305, 'ipv6-icmp', 'allow', '::/0', 0, 8]
- egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- - [105, 'all', 'allow', '::/0', null, null, null, null]
- state: 'present'
- register: nacl
- # FIXME: Currently IPv6 rules are not supported - uncomment assertion when
- # fixed (and add some nacl_info tests)
- ignore_errors: yes
- - name: get network ACL facts (test that it works with ipv6 entries)
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl_id }}"
- register: nacl_facts
-
-
- #- assert:
- # that:
- # - nacl.changed
- # - nacl.nacl_id == nacl_id
-
- - name: purge ingress entries
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_result.vpc.id }}"
- name: "{{ resource_prefix }}-acl"
- subnets:
- - "{{ resource_prefix }}-ipv6-subnet-1"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress: []
- egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- - [105, 'all', 'allow', '::/0', null, null, null, null]
- state: 'present'
- register: nacl
- # FIXME: Currently IPv6 rules are not supported - uncomment assertion when
- # fixed (and add some nacl_info tests)
- ignore_errors: yes
-
- #- assert:
- # that:
- # - nacl.changed
- # - nacl.nacl_id == nacl_id
-
- - name: purge egress entries
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_result.vpc.id }}"
- name: "{{ resource_prefix }}-acl"
- subnets:
- - "{{ resource_prefix }}-ipv6-subnet-1"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress: []
- egress: []
- state: 'present'
- register: nacl
-
- - assert:
- that:
- - nacl.changed
-
- # ============================================================
- - name: remove subnet ipv6 cidr (expected changed=true)
- ec2_vpc_subnet:
- cidr: 10.230.231.0/26
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: absent
- register: vpc_remove_ipv6_cidr
-
- - name: assert subnet ipv6 cidr removed (expected changed=true)
- assert:
- that:
- - 'vpc_remove_ipv6_cidr.changed'
-
- always:
-
- ################################################
- # TEARDOWN STARTS HERE
- ################################################
-
- - name: remove network ACL
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_result.vpc.id }}"
- name: "{{ resource_prefix }}-acl"
- state: absent
- register: removed_acl
- until: removed_acl is success
- retries: 5
- delay: 5
- ignore_errors: yes
-
- - name: tidy up subnet
- ec2_vpc_subnet:
- cidr: 10.230.231.0/26
- vpc_id: "{{ vpc_result.vpc.id }}"
- state: absent
- register: removed_subnet
- until: removed_subnet is success
- retries: 5
- delay: 5
- ignore_errors: yes
-
- - name: tidy up VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-ipv6"
- state: absent
- cidr_block: 10.230.231.0/24
- register: removed_vpc
- until: removed_vpc is success
- retries: 5
- delay: 5
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_vpc_nacl/tasks/main.yml b/test/integration/targets/ec2_vpc_nacl/tasks/main.yml
deleted file mode 100644
index ad72530e29..0000000000
--- a/test/integration/targets/ec2_vpc_nacl/tasks/main.yml
+++ /dev/null
@@ -1,170 +0,0 @@
----
-- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
-
- # ============================================================
-
- - name: test without any parameters
- ec2_vpc_nacl:
- register: result
- ignore_errors: yes
-
- - name: assert required parameters
- assert:
- that:
- - result.failed
- - "result.msg == 'one of the following is required: name, nacl_id'"
-
- - name: get network ACL info without any parameters
- ec2_vpc_nacl_info:
- register: nacl_facts
-
- - name: assert we don't error
- assert:
- that:
- - nacl_facts is succeeded
-
- - name: get network ACL info with invalid ID
- ec2_vpc_nacl_info:
- nacl_ids:
- - 'acl-000000000000'
- register: nacl_facts
- ignore_errors: yes
-
- - name: assert message mentions missing ACLs
- assert:
- that:
- - nacl_facts is failed
- - '"does not exist" in nacl_facts.msg'
-
- # ============================================================
-
- - name: fetch AZ availability
- aws_az_info:
- register: az_info
-
- - name: Assert that we have multiple AZs available to us
- assert:
- that: az_info.availability_zones | length >= 2
-
- - name: pick AZs
- set_fact:
- az_one: '{{ az_info.availability_zones[0].zone_name }}'
- az_two: '{{ az_info.availability_zones[1].zone_name }}'
-
- # ============================================================
-
- - name: create a VPC
- ec2_vpc_net:
- cidr_block: 10.230.230.0/24
- name: "{{ resource_prefix }}"
- state: present
- register: vpc
-
- - name: create subnets
- ec2_vpc_subnet:
- cidr: "{{ item.cidr }}"
- az: "{{ item.az }}"
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- tags:
- Name: "{{ item.name }}"
- with_items:
- - cidr: 10.230.230.0/26
- az: "{{ az_one }}"
- name: "{{ resource_prefix }}-subnet-1"
- - cidr: 10.230.230.64/26
- az: "{{ az_two }}"
- name: "{{ resource_prefix }}-subnet-2"
- - cidr: 10.230.230.128/26
- az: "{{ az_one }}"
- name: "{{ resource_prefix }}-subnet-3"
- - cidr: 10.230.230.192/26
- az: "{{ az_two }}"
- name: "{{ resource_prefix }}-subnet-4"
- register: subnets
-
- # ============================================================
-
- - include_tasks: tasks/subnet_ids.yml
- vars:
- vpc_id: "{{ vpc.vpc.id }}"
- subnet_ids: "{{ subnets | json_query('results[*].subnet.id') }}"
-
- - include_tasks: tasks/subnet_names.yml
- vars:
- vpc_id: "{{ vpc.vpc.id }}"
- subnet_names: "{{ subnets | json_query('results[*].subnet.tags.Name') }}"
-
- - include_tasks: tasks/tags.yml
- vars:
- vpc_id: "{{ vpc.vpc.id }}"
- subnet_ids: "{{ subnets | json_query('results[*].subnet.id') }}"
-
- - include_tasks: tasks/ingress_and_egress.yml
- vars:
- vpc_id: "{{ vpc.vpc.id }}"
- subnet_ids: "{{ subnets | json_query('results[*].subnet.id') }}"
-
- - include_tasks: tasks/ipv6.yml
-
- # ============================================================
-
- always:
-
- - name: remove network ACL
- ec2_vpc_nacl:
- vpc_id: "{{ vpc.vpc.id }}"
- name: "{{ resource_prefix }}-acl"
- state: absent
- register: removed_acl
- until: removed_acl is success
- retries: 5
- delay: 5
- ignore_errors: yes
-
- - name: remove subnets
- ec2_vpc_subnet:
- cidr: "{{ item.cidr }}"
- az: "{{ aws_region}}{{ item.az }}"
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- tags:
- Public: "{{ item.public | string }}"
- Name: "{{ item.public | ternary('public', 'private') }}-{{ item.az }}"
- with_items:
- - cidr: 10.230.230.0/26
- az: "a"
- public: "True"
- - cidr: 10.230.230.64/26
- az: "b"
- public: "True"
- - cidr: 10.230.230.128/26
- az: "a"
- public: "False"
- - cidr: 10.230.230.192/26
- az: "b"
- public: "False"
- ignore_errors: yes
- register: removed_subnets
- until: removed_subnets is success
- retries: 5
- delay: 5
-
- - name: remove the VPC
- ec2_vpc_net:
- cidr_block: 10.230.230.0/24
- name: "{{ resource_prefix }}"
- state: absent
- ignore_errors: yes
- register: removed_vpc
- until: removed_vpc is success
- retries: 5
- delay: 5
-
- # ============================================================
diff --git a/test/integration/targets/ec2_vpc_nacl/tasks/subnet_ids.yml b/test/integration/targets/ec2_vpc_nacl/tasks/subnet_ids.yml
deleted file mode 100644
index de371d629a..0000000000
--- a/test/integration/targets/ec2_vpc_nacl/tasks/subnet_ids.yml
+++ /dev/null
@@ -1,174 +0,0 @@
-# ============================================================
-
-- name: create ingress and egress rules using subnet IDs
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets: "{{ subnet_ids }}"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress:
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- state: 'present'
- register: nacl
-
-- set_fact:
- nacl_id: "{{ nacl.nacl_id }}"
-
-- name: assert the network acl was created
- assert:
- that:
- - nacl.changed
- - nacl.nacl_id.startswith('acl-')
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl_id }}"
- register: nacl_facts
-
-- name: assert the nacl has the correct attributes
- assert:
- that:
- - nacl_facts.nacls | length == 1
- - nacl_facts.nacls[0].nacl_id == nacl_id
- - nacl_facts.nacls[0].subnets | length == 4
- - nacl_facts.nacls[0].subnets | sort == subnet_ids | sort
- - nacl_facts.nacls[0].ingress | length == 3
- - nacl_facts.nacls[0].egress | length == 1
- - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
-
-# ============================================================
-
-- name: test idempotence
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets: "{{ subnet_ids }}"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress:
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- state: 'present'
- register: nacl
-
-- name: assert the network acl already existed
- assert:
- that:
- - not nacl.changed
- - nacl.nacl_id == nacl_id
- - nacl.nacl_id.startswith('acl-')
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl.nacl_id }}"
- register: nacl_facts_idem
-
-- name: assert the facts are the same as before
- assert:
- that:
- - nacl_facts_idem == nacl_facts
-
-# ============================================================
-
-- name: remove a subnet from the network ACL
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets:
- - "{{ subnet_ids[0] }}"
- - "{{ subnet_ids[1] }}"
- - "{{ subnet_ids[2] }}"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress:
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- state: 'present'
- register: nacl
-
-- name: assert the network ACL changed
- assert:
- that:
- - nacl.changed
- - nacl.nacl_id.startswith('acl-')
- - nacl.nacl_id == nacl_id
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_id:
- - "{{ nacl.nacl_id }}"
- register: nacl_facts
-
-- name: assert the nacl has the correct attributes
- assert:
- that:
- - nacl_facts.nacls | length == 1
- - nacl_facts.nacls[0].nacl_id == nacl_id
- - nacl_facts.nacls[0].subnets | length == 3
- - subnet_ids[3] not in nacl_facts.nacls[0].subnets
- - nacl_facts.nacls[0].ingress | length == 3
- - nacl_facts.nacls[0].egress | length == 1
- - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
-
-# ============================================================
-
-- name: remove the network ACL
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- state: absent
- register: nacl
- until: nacl is success
- ignore_errors: yes
- retries: 5
- delay: 5
-
-- name: assert nacl was removed
- assert:
- that:
- - nacl.changed
-
-- name: re-remove the network ACL by name (test idempotency)
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- state: absent
- register: nacl
- until: nacl is success
- ignore_errors: yes
- retries: 5
- delay: 5
-
-- name: assert nacl was removed
- assert:
- that:
- - nacl is not changed
-
-- name: re-remove the network ACL by id (test idempotency)
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- nacl_id: "{{ nacl_id }}"
- state: absent
- register: nacl
- until: nacl is success
- ignore_errors: yes
- retries: 5
- delay: 5
-
-- name: assert nacl was removed
- assert:
- that:
- - nacl is not changed
diff --git a/test/integration/targets/ec2_vpc_nacl/tasks/subnet_names.yml b/test/integration/targets/ec2_vpc_nacl/tasks/subnet_names.yml
deleted file mode 100644
index 5a4db04df9..0000000000
--- a/test/integration/targets/ec2_vpc_nacl/tasks/subnet_names.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-# ============================================================
-
-- name: create ingress and egress rules using subnet names
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets: "{{ subnet_names }}"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress:
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- state: 'present'
- register: nacl
-
-- set_fact:
- nacl_id: "{{ nacl.nacl_id }}"
-
-- name: assert the network acl was created
- assert:
- that:
- - nacl.changed
- - nacl.nacl_id.startswith('acl-')
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl_id }}"
- register: nacl_facts
-
-- name: assert the nacl has the correct attributes
- assert:
- that:
- - nacl_facts.nacls | length == 1
- - nacl_facts.nacls[0].nacl_id == nacl_id
- - nacl_facts.nacls[0].subnets | length == 4
- - nacl_facts.nacls[0].ingress | length == 3
- - nacl_facts.nacls[0].egress | length == 1
- - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
-
-# ============================================================
-
-- name: test idempotence
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets: "{{ subnet_names }}"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress:
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- state: 'present'
- register: nacl
-
-- name: assert the network acl already existed
- assert:
- that:
- - not nacl.changed
- - nacl.nacl_id == nacl_id
- - nacl.nacl_id.startswith('acl-')
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl.nacl_id }}"
- register: nacl_facts_idem
-
-- name: assert the facts are the same as before
- assert:
- that:
- - nacl_facts_idem == nacl_facts
-
-# ============================================================
-
-- name: remove a subnet from the network ACL
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets:
- - "{{ subnet_names[0] }}"
- - "{{ subnet_names[1] }}"
- - "{{ subnet_names[2] }}"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- ingress:
- - [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- - [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- - [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- egress:
- - [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- state: 'present'
- register: nacl
-
-- name: assert the network ACL changed
- assert:
- that:
- - nacl.changed
- - nacl.nacl_id == nacl_id
- - nacl.nacl_id.startswith('acl-')
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl.nacl_id }}"
- register: nacl_facts
-
-- name: assert the nacl has the correct attributes
- assert:
- that:
- - nacl_facts.nacls | length == 1
- - nacl_facts.nacls[0].nacl_id == nacl_id
- - nacl_facts.nacls[0].subnets | length == 3
- - nacl_facts.nacls[0].ingress | length == 3
- - nacl_facts.nacls[0].egress | length == 1
- - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
-
-# ============================================================
-
-- name: remove the network ACL
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- state: absent
- register: nacl
- until: nacl is success
- ignore_errors: yes
- retries: 5
- delay: 5
-
-- name: assert nacl was removed
- assert:
- that:
- - nacl.changed
diff --git a/test/integration/targets/ec2_vpc_nacl/tasks/tags.yml b/test/integration/targets/ec2_vpc_nacl/tasks/tags.yml
deleted file mode 100644
index f7847850a5..0000000000
--- a/test/integration/targets/ec2_vpc_nacl/tasks/tags.yml
+++ /dev/null
@@ -1,117 +0,0 @@
-# ============================================================
-
-- name: create a network ACL using subnet IDs
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets: "{{ subnet_ids }}"
- state: 'present'
- register: nacl
-
-- name: assert the network acl was created
- assert:
- that:
- - nacl.changed
- - nacl.nacl_id.startswith('acl-')
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl.nacl_id }}"
- register: nacl_facts
-
-- name: assert the nacl has the correct attributes
- assert:
- that:
- - nacl_facts.nacls[0].tags | length == 1
- - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
-
-# ============================================================
-
-- name: add a tag
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets: "{{ subnet_ids }}"
- tags:
- Created_by: "Ansible test {{ resource_prefix }}"
- state: 'present'
- register: nacl
-
-- name: assert the network acl changed
- assert:
- that:
- - nacl.changed
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl.nacl_id }}"
- register: nacl_facts
-
-- name: assert the facts are the same as before
- assert:
- that:
- - nacl_facts.nacls[0].tags | length == 2
- - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
- - "'{{ nacl_facts.nacls[0].tags.Created_by }}' == 'Ansible test {{ resource_prefix }}'"
-
-- name: get network ACL facts by filter
- ec2_vpc_nacl_info:
- filters:
- "tag:Created_by": "Ansible test {{ resource_prefix }}"
- register: nacl_facts
-
-- name: assert the facts are the same as before
- assert:
- that:
- - nacl_facts.nacls | length == 1
- - nacl_facts.nacls[0].tags | length == 2
- - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
- - "'{{ nacl_facts.nacls[0].tags.Created_by }}' == 'Ansible test {{ resource_prefix }}'"
-
-# ============================================================
-
-- name: remove a tag
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- subnets: "{{ subnet_ids }}"
- state: 'present'
- register: nacl
-
-- name: assert the network acl was created
- assert:
- that:
- - nacl.changed
- - nacl.nacl_id.startswith('acl-')
-
-- name: get network ACL facts
- ec2_vpc_nacl_info:
- nacl_ids:
- - "{{ nacl.nacl_id }}"
- register: nacl_facts
-
-- name: assert the nacl has the correct attributes
- assert:
- that:
- - nacl_facts.nacls[0].tags | length == 1
- - "'{{ nacl_facts.nacls[0].tags.Name }}' == '{{ resource_prefix }}-acl'"
-
-# ============================================================
-
-- name: remove the network ACL
- ec2_vpc_nacl:
- vpc_id: "{{ vpc_id }}"
- name: "{{ resource_prefix }}-acl"
- state: absent
- register: nacl
- until: nacl is success
- ignore_errors: yes
- retries: 5
- delay: 5
-
-- name: assert nacl was removed
- assert:
- that:
- - nacl.changed
diff --git a/test/integration/targets/ec2_vpc_nat_gateway/aliases b/test/integration/targets/ec2_vpc_nat_gateway/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/ec2_vpc_nat_gateway/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml b/test/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml
deleted file mode 100644
index 7cb7e986e0..0000000000
--- a/test/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml
+++ /dev/null
@@ -1,82 +0,0 @@
-# The tests for this module are incomplete.
-# The tests below were migrated from unit tests.
-# They take advantage of hard-coded results within the module to trigger both changed and unchanged responses.
-# They were migrated to maintain test coverage while removing unit tests that depended on use of TaskQueueManager.
-
-- name: Create new nat gateway with eip allocation-id
- ec2_vpc_nat_gateway:
- subnet_id: subnet-12345678
- allocation_id: eipalloc-12345678
- wait: yes
- region: us-west-2
- register: nat_gateway
- check_mode: yes
-
-- assert:
- that:
- - nat_gateway.changed
-
-- name: Create new nat gateway with eip allocation-id
- ec2_vpc_nat_gateway:
- subnet_id: subnet-123456789
- allocation_id: eipalloc-1234567
- wait: yes
- region: us-west-2
- register: nat_gateway
- check_mode: yes
-
-- assert:
- that:
- - not nat_gateway.changed
-
-- name: Create new nat gateway with eip address
- ec2_vpc_nat_gateway:
- subnet_id: subnet-12345678
- eip_address: 55.55.55.55
- wait: yes
- region: us-west-2
- register: nat_gateway
- check_mode: yes
-
-- assert:
- that:
- - nat_gateway.changed
-
-- name: Create new nat gateway with eip address
- ec2_vpc_nat_gateway:
- subnet_id: subnet-123456789
- eip_address: 55.55.55.55
- wait: yes
- region: us-west-2
- register: nat_gateway
- check_mode: yes
-
-- assert:
- that:
- - not nat_gateway.changed
-
-- name: Create new nat gateway only if one does not exist already
- ec2_vpc_nat_gateway:
- if_exist_do_not_create: yes
- subnet_id: subnet-123456789
- wait: yes
- region: us-west-2
- register: nat_gateway
- check_mode: yes
-
-- assert:
- that:
- - not nat_gateway.changed
-
-- name: Delete Nat Gateway
- ec2_vpc_nat_gateway:
- nat_gateway_id: nat-123456789
- state: absent
- wait: yes
- region: us-west-2
- register: nat_gateway
- check_mode: yes
-
-- assert:
- that:
- - nat_gateway.changed
diff --git a/test/integration/targets/ec2_vpc_route_table/aliases b/test/integration/targets/ec2_vpc_route_table/aliases
deleted file mode 100644
index e4da78b0eb..0000000000
--- a/test/integration/targets/ec2_vpc_route_table/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/aws
-shippable/aws/group2
-unstable
-ec2_vpc_route_table_info
diff --git a/test/integration/targets/ec2_vpc_route_table/meta/main.yml b/test/integration/targets/ec2_vpc_route_table/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ec2_vpc_route_table/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ec2_vpc_route_table/tasks/main.yml b/test/integration/targets/ec2_vpc_route_table/tasks/main.yml
deleted file mode 100644
index 8dee7c154a..0000000000
--- a/test/integration/targets/ec2_vpc_route_table/tasks/main.yml
+++ /dev/null
@@ -1,757 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: create VPC
- ec2_vpc_net:
- cidr_block: 10.228.228.0/22
- name: "{{ resource_prefix }}_vpc"
- state: present
- <<: *aws_connection_info
- register: vpc
-
- - name: create public subnet
- ec2_vpc_subnet:
- cidr: "{{ item.cidr }}"
- az: "{{ aws_region}}{{ item.az }}"
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- tags:
- Public: "{{ item.public|string }}"
- Name: "{{ (item.public|bool)|ternary('public', 'private') }}-{{ item.az }}"
- <<: *aws_connection_info
- with_items:
- - cidr: 10.228.228.0/24
- az: "a"
- public: "True"
- - cidr: 10.228.229.0/24
- az: "b"
- public: "True"
- - cidr: 10.228.230.0/24
- az: "a"
- public: "False"
- - cidr: 10.228.231.0/24
- az: "b"
- public: "False"
- register: subnets
-
- - ec2_vpc_subnet_info:
- filters:
- vpc-id: "{{ vpc.vpc.id }}"
- <<: *aws_connection_info
- register: vpc_subnets
-
- - name: create IGW
- ec2_vpc_igw:
- vpc_id: "{{ vpc.vpc.id }}"
- <<: *aws_connection_info
-
- - name: create NAT GW
- ec2_vpc_nat_gateway:
- if_exist_do_not_create: yes
- wait: yes
- subnet_id: "{{ subnets.results[0].subnet.id }}"
- <<: *aws_connection_info
- register: nat_gateway
-
- - name: CHECK MODE - route table should be created
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- <<: *aws_connection_info
- check_mode: true
- register: check_mode_results
-
- - name: assert that the public route table would be created
- assert:
- that:
- - check_mode_results.changed
-
- - name: create public route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- <<: *aws_connection_info
- register: create_public_table
-
- - name: assert that public route table has an id
- assert:
- that:
- # - create_public_table.changed
- - "create_public_table.route_table.id.startswith('rtb-')"
- - "'Public' in create_public_table.route_table.tags and create_public_table.route_table.tags['Public'] == 'true'"
- - create_public_table.route_table.routes|length == 1
- - create_public_table.route_table.associations|length == 0
-
- - name: CHECK MODE - route table should already exist
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- <<: *aws_connection_info
- check_mode: True
- register: check_mode_results
-
- - name: assert the table already exists
- assert:
- that:
- - not check_mode_results.changed
-
- - name: recreate public route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- <<: *aws_connection_info
- register: recreate_public_route_table
-
- - name: assert that public route table did not change
- assert:
- that:
- - not recreate_public_route_table.changed
-
- - name: CHECK MODE - add route to public route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- <<: *aws_connection_info
- check_mode: True
- register: check_mode_results
-
- - name: assert a route would be added
- assert:
- that:
- - check_mode_results.changed
-
- - name: add a route to public route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- <<: *aws_connection_info
- register: add_routes
-
- - name: assert route table contains new route
- assert:
- that:
- - add_routes.changed
- - add_routes.route_table.routes|length == 2
-
- - name: CHECK MODE - add subnets to public route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `True`].id') }}"
- <<: *aws_connection_info
- check_mode: True
- register: check_mode_results
-
- - name: assert the subnets would be added to the route table
- assert:
- that:
- - check_mode_results.changed
-
- - name: add subnets to public route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `True`].id') }}"
- <<: *aws_connection_info
- register: add_subnets
-
- - name: assert route table contains subnets
- assert:
- that:
- - add_subnets.changed
- - add_subnets.route_table.associations|length == 2
-
- - name: add a route to public route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- <<: *aws_connection_info
- register: add_routes
-
- - name: CHECK MODE - no routes but purge_routes set to false
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- purge_routes: no
- subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `True`].id') }}"
- <<: *aws_connection_info
- check_mode: True
- register: check_mode_results
-
- - name: assert no routes would be removed
- assert:
- that:
- - not check_mode_results.changed
-
- - name: rerun with purge_routes set to false
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- purge_routes: no
- subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `True`].id') }}"
- <<: *aws_connection_info
- register: no_purge_routes
-
- - name: assert route table still has routes
- assert:
- that:
- - not no_purge_routes.changed
- - no_purge_routes.route_table.routes|length == 2
- - no_purge_routes.route_table.associations|length == 2
-
- - name: rerun with purge_subnets set to false
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- purge_subnets: no
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- <<: *aws_connection_info
- register: no_purge_subnets
-
- - name: assert route table still has subnets
- assert:
- that:
- - not no_purge_subnets.changed
- - no_purge_subnets.route_table.routes|length == 2
- - no_purge_subnets.route_table.associations|length == 2
-
- - name: rerun with purge_tags not set (implicitly false)
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- lookup: id
- route_table_id: "{{ create_public_table.route_table.id }}"
- subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `True`].id') }}"
- <<: *aws_connection_info
- register: no_purge_tags
-
- - name: assert route table still has tags
- assert:
- that:
- - not no_purge_tags.changed
- - "'Public' in no_purge_tags.route_table.tags and no_purge_tags.route_table.tags['Public'] == 'true'"
-
- - name: CHECK MODE - purge subnets
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- subnets: []
- tags:
- Public: "true"
- Name: "Public route table"
- <<: *aws_connection_info
- check_mode: True
- register: check_mode_results
-
- - name: assert subnets would be removed
- assert:
- that:
- - check_mode_results.changed
-
- - name: purge subnets
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- subnets: []
- tags:
- Public: "true"
- Name: "Public route table"
- <<: *aws_connection_info
- register: purge_subnets
-
- - name: assert purge subnets worked
- assert:
- that:
- - purge_subnets.changed
- - purge_subnets.route_table.associations|length == 0
- - purge_subnets.route_table.id == create_public_table.route_table.id
-
- - name: CHECK MODE - purge routes
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- <<: *aws_connection_info
- routes: []
- check_mode: True
- register: check_mode_results
-
- - name: assert routes would be removed
- assert:
- that:
- - check_mode_results.changed
-
- - name: add subnets by cidr to public route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `True`].cidr_block') }}"
- lookup: id
- route_table_id: "{{ create_public_table.route_table.id }}"
- <<: *aws_connection_info
- register: add_subnets_cidr
-
- - name: assert route table contains subnets added by cidr
- assert:
- that:
- - add_subnets_cidr.changed
- - add_subnets_cidr.route_table.associations|length == 2
-
- - name: purge subnets added by cidr
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- subnets: []
- lookup: id
- route_table_id: "{{ create_public_table.route_table.id }}"
- <<: *aws_connection_info
- register: purge_subnets_cidr
-
- - name: assert purge subnets added by cidr worked
- assert:
- that:
- - purge_subnets_cidr.changed
- - purge_subnets_cidr.route_table.associations|length == 0
-
- - name: add subnets by name to public route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `True`].tags.Name') }}"
- lookup: id
- route_table_id: "{{ create_public_table.route_table.id }}"
- <<: *aws_connection_info
- register: add_subnets_name
-
- - name: assert route table contains subnets added by name
- assert:
- that:
- - add_subnets_name.changed
- - add_subnets_name.route_table.associations|length == 2
-
- - name: purge subnets added by name
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: igw
- subnets: []
- lookup: id
- route_table_id: "{{ create_public_table.route_table.id }}"
- <<: *aws_connection_info
- register: purge_subnets_name
-
- - name: assert purge subnets added by name worked
- assert:
- that:
- - purge_subnets_name.changed
- - purge_subnets_name.route_table.associations|length == 0
-
- - name: purge routes
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "true"
- Name: "Public route table"
- <<: *aws_connection_info
- routes: []
- register: purge_routes
-
- - name: assert purge routes worked
- assert:
- that:
- - purge_routes.changed
- - purge_routes.route_table.routes|length == 1
- - purge_routes.route_table.id == create_public_table.route_table.id
-
- - name: CHECK MODE - update tags
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- route_table_id: "{{ create_public_table.route_table.id }}"
- lookup: id
- purge_tags: yes
- tags:
- Name: Public route table
- Updated: new_tag
- <<: *aws_connection_info
- check_mode: True
- register: check_mode_results
-
- - name: assert tags would be changed
- assert:
- that:
- - check_mode_results.changed
-
- - name: update tags
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- route_table_id: "{{ create_public_table.route_table.id }}"
- lookup: id
- purge_tags: yes
- tags:
- Name: Public route table
- Updated: new_tag
- <<: *aws_connection_info
- register: update_tags
-
- - name: assert update tags worked
- assert:
- that:
- - update_tags.changed
- - "'Updated' in update_tags.route_table.tags and update_tags.route_table.tags['Updated'] == 'new_tag'"
- - "'Public' not in update_tags.route_table.tags"
-
- - name: create NAT GW
- ec2_vpc_nat_gateway:
- if_exist_do_not_create: yes
- wait: yes
- subnet_id: "{{ subnets.results[0].subnet.id }}"
- <<: *aws_connection_info
- register: nat_gateway
-
- - name: CHECK MODE - create private route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "false"
- Name: "Private route table"
- routes:
- - gateway_id: "{{ nat_gateway.nat_gateway_id }}"
- dest: 0.0.0.0/0
- subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `False`].id') }}"
- <<: *aws_connection_info
- check_mode: True
- register: check_mode_results
-
- - name: assert the route table would be created
- assert:
- that:
- - check_mode_results.changed
-
- - name: create private route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "false"
- Name: "Private route table"
- routes:
- - gateway_id: "{{ nat_gateway.nat_gateway_id }}"
- dest: 0.0.0.0/0
- subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `False`].id') }}"
- <<: *aws_connection_info
- register: create_private_table
-
- - name: assert creating private route table worked
- assert:
- that:
- - create_private_table.changed
- - create_private_table.route_table.id != create_public_table.route_table.id
- - "'Public' in create_private_table.route_table.tags"
-
- - name: CHECK MODE - destroy public route table by tags
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- tags:
- Updated: new_tag
- Name: Public route table
- <<: *aws_connection_info
- check_mode: True
- register: check_mode_results
-
- - name: assert the route table would be deleted
- assert:
- that:
- check_mode_results.changed
-
- - name: destroy public route table by tags
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- tags:
- Updated: new_tag
- Name: Public route table
- <<: *aws_connection_info
- register: destroy_table
-
- - name: assert destroy table worked
- assert:
- that:
- - destroy_table.changed
-
- - name: CHECK MODE - redestroy public route table
- ec2_vpc_route_table:
- route_table_id: "{{ create_public_table.route_table.id }}"
- lookup: id
- state: absent
- <<: *aws_connection_info
- check_mode: True
- register: check_mode_results
-
- - name: assert the public route table does not exist
- assert:
- that:
- - not check_mode_results.changed
-
- - name: redestroy public route table
- ec2_vpc_route_table:
- route_table_id: "{{ create_public_table.route_table.id }}"
- lookup: id
- state: absent
- <<: *aws_connection_info
- register: redestroy_table
-
- - name: assert redestroy table worked
- assert:
- that:
- - not redestroy_table.changed
-
- - name: destroy NAT GW
- ec2_vpc_nat_gateway:
- state: absent
- wait: yes
- release_eip: yes
- subnet_id: "{{ subnets.results[0].subnet.id }}"
- nat_gateway_id: "{{ nat_gateway.nat_gateway_id }}"
- <<: *aws_connection_info
- register: nat_gateway
-
- - name: show route table info, get table using route-table-id
- ec2_vpc_route_table_info:
- filters:
- route-table-id: "{{ create_private_table.route_table.id }}"
- <<: *aws_connection_info
- register: route_table_info
-
- - name: assert route_table_info has correct attributes
- assert:
- that:
- - '"route_tables" in route_table_info'
- - 'route_table_info.route_tables | length == 1'
- - '"id" in route_table_info.route_tables[0]'
- - '"routes" in route_table_info.route_tables[0]'
- - '"associations" in route_table_info.route_tables[0]'
- - '"tags" in route_table_info.route_tables[0]'
- - '"vpc_id" in route_table_info.route_tables[0]'
- - 'route_table_info.route_tables[0].id == create_private_table.route_table.id'
-
- - name: show route table info, get table using tags
- ec2_vpc_route_table_info:
- filters:
- "tag:Public": "false"
- "tag:Name": "Private route table"
- vpc-id: "{{ vpc.vpc.id }}"
- <<: *aws_connection_info
- register: route_table_info
-
- - name: assert route_table_info has correct tags
- assert:
- that:
- - 'route_table_info.route_tables | length == 1'
- - '"tags" in route_table_info.route_tables[0]'
- - '"Public" in route_table_info.route_tables[0].tags and route_table_info.route_tables[0].tags["Public"] == "false"'
- - '"Name" in route_table_info.route_tables[0].tags and route_table_info.route_tables[0].tags["Name"] == "Private route table"'
-
- - name: create NAT GW
- ec2_vpc_nat_gateway:
- if_exist_do_not_create: yes
- wait: yes
- subnet_id: "{{ subnets.results[0].subnet.id }}"
- <<: *aws_connection_info
- register: nat_gateway
-
- - name: show route table info
- ec2_vpc_route_table_info:
- filters:
- route-table-id: "{{ create_private_table.route_table.id }}"
- <<: *aws_connection_info
-
- - name: recreate private route table with new NAT GW
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "false"
- Name: "Private route table"
- routes:
- - nat_gateway_id: "{{ nat_gateway.nat_gateway_id }}"
- dest: 0.0.0.0/0
- subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `False`].id') }}"
- <<: *aws_connection_info
- register: recreate_private_table
-
- - name: assert creating private route table worked
- assert:
- that:
- - recreate_private_table.changed
- - recreate_private_table.route_table.id != create_public_table.route_table.id
-
- - name: create a VPC endpoint to test ec2_vpc_route_table ignores it
- ec2_vpc_endpoint:
- state: present
- vpc_id: "{{ vpc.vpc.id }}"
- service: "com.amazonaws.{{ aws_region }}.s3"
- route_table_ids:
- - "{{ recreate_private_table.route_table.route_table_id }}"
- <<: *aws_connection_info
- register: vpc_endpoint
-
- - name: purge routes
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Public: "false"
- Name: "Private route table"
- routes:
- - nat_gateway_id: "{{ nat_gateway.nat_gateway_id }}"
- dest: 0.0.0.0/0
- subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `False`].id') }}"
- purge_routes: true
- <<: *aws_connection_info
- register: result
-
- - name: Get endpoint infos to verify that it wasn't purged from the route table
- ec2_vpc_endpoint_info:
- query: endpoints
- vpc_endpoint_ids:
- - "{{ vpc_endpoint.result.vpc_endpoint_id }}"
- <<: *aws_connection_info
- register: endpoint_details
-
- - name: assert the route table is associated with the VPC endpoint
- assert:
- that:
- - endpoint_details.vpc_endpoints[0].route_table_ids[0] == recreate_private_table.route_table.route_table_id
-
- always:
- #############################################################################
- # TEAR DOWN STARTS HERE
- #############################################################################
- - name: remove the VPC endpoint
- ec2_vpc_endpoint:
- state: absent
- vpc_endpoint_id: "{{ vpc_endpoint.result.vpc_endpoint_id }}"
- <<: *aws_connection_info
- when: vpc_endpoint is defined
- ignore_errors: yes
-
- - name: destroy route tables
- ec2_vpc_route_table:
- route_table_id: "{{ item.route_table.id }}"
- lookup: id
- state: absent
- <<: *aws_connection_info
- with_items:
- - "{{ create_public_table|default() }}"
- - "{{ create_private_table|default() }}"
- when: item and not item.failed
- ignore_errors: yes
-
- - name: destroy NAT GW
- ec2_vpc_nat_gateway:
- state: absent
- wait: yes
- release_eip: yes
- subnet_id: "{{ subnets.results[0].subnet.id }}"
- nat_gateway_id: "{{ nat_gateway.nat_gateway_id }}"
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: destroy IGW
- ec2_vpc_igw:
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: destroy subnets
- ec2_vpc_subnet:
- cidr: "{{ item.cidr }}"
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- <<: *aws_connection_info
- with_items:
- - cidr: 10.228.228.0/24
- - cidr: 10.228.229.0/24
- - cidr: 10.228.230.0/24
- - cidr: 10.228.231.0/24
- ignore_errors: yes
-
- # FIXME: ec2_vpc_nat_gateway should take care of this, but clearly doesn't always
- - name: ensure EIP is actually released
- ec2_eip:
- state: absent
- device_id: "{{ item.network_interface_id }}"
- in_vpc: yes
- <<: *aws_connection_info
- with_items: "{{ nat_gateway.nat_gateway_addresses }}"
- ignore_errors: yes
-
- - name: destroy VPC
- ec2_vpc_net:
- cidr_block: 10.228.228.0/22
- name: "{{ resource_prefix }}_vpc"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/ec2_vpc_vgw/aliases b/test/integration/targets/ec2_vpc_vgw/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/ec2_vpc_vgw/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/ec2_vpc_vgw/tasks/main.yml b/test/integration/targets/ec2_vpc_vgw/tasks/main.yml
deleted file mode 100644
index 13365146e1..0000000000
--- a/test/integration/targets/ec2_vpc_vgw/tasks/main.yml
+++ /dev/null
@@ -1,171 +0,0 @@
----
-- block:
-
- # ============================================================
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- # ============================================================
- - debug: msg="Setting up test dependencies"
-
- - name: create a VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc-{{ item }}"
- state: present
- cidr_block: "10.0.0.0/26"
- <<: *aws_connection_info
- tags:
- Name: "{{ resource_prefix }}-vpc-{{ item }}"
- Description: "Created by ansible-test"
- register: vpc_result
- loop: [1, 2]
-
- - name: use set fact for vpc ids
- set_fact:
- vpc_id_1: '{{ vpc_result.results.0.vpc.id }}'
- vpc_id_2: '{{ vpc_result.results.1.vpc.id }}'
-
- # ============================================================
- - debug: msg="Running tests"
-
- - name: create vpn gateway and attach it to vpc
- ec2_vpc_vgw:
- state: present
- vpc_id: '{{ vpc_id_1 }}'
- name: "{{ resource_prefix }}-vgw"
- <<: *aws_connection_info
- register: vgw
-
- - assert:
- that:
- - vgw.changed
- - "{{ vgw.vgw.vpc_id == vpc_id_1 }}"
- - '"{{ vgw.vgw.tags.Name }}" == "{{ resource_prefix }}-vgw"'
-
- - name: test idempotence
- ec2_vpc_vgw:
- state: present
- vpc_id: '{{ vpc_id_1 }}'
- name: "{{ resource_prefix }}-vgw"
- <<: *aws_connection_info
- register: vgw
-
- - assert:
- that:
- - not vgw.changed
-
- # ============================================================
- - name: attach vpn gateway to the other VPC
- ec2_vpc_vgw:
- state: present
- vpc_id: '{{ vpc_id_2 }}'
- name: "{{ resource_prefix }}-vgw"
- <<: *aws_connection_info
- register: vgw
-
- - assert:
- that:
- - vgw.changed
- - "{{ vgw.vgw.vpc_id == vpc_id_2 }}"
-
- # ============================================================
- - name: add tags to the VGW
- ec2_vpc_vgw:
- state: present
- vpc_id: '{{ vpc_id_2 }}'
- name: "{{ resource_prefix }}-vgw"
- tags:
- created_by: ec2_vpc_vgw integration tests
- <<: *aws_connection_info
- register: vgw
-
- - assert:
- that:
- - vgw.changed
- - vgw.vgw.tags | length == 2
- - "'created_by' in vgw.vgw.tags"
-
- - name: test idempotence
- ec2_vpc_vgw:
- state: present
- vpc_id: '{{ vpc_id_2 }}'
- name: "{{ resource_prefix }}-vgw"
- tags:
- created_by: ec2_vpc_vgw integration tests
- <<: *aws_connection_info
- register: vgw
-
- - assert:
- that:
- - not vgw.changed
-
- # ============================================================
- - name: remove tags from the VGW
- ec2_vpc_vgw:
- state: present
- vpc_id: '{{ vpc_id_2 }}'
- name: "{{ resource_prefix }}-vgw"
- <<: *aws_connection_info
- register: vgw
-
- - assert:
- that:
- - vgw.changed
- - vgw.vgw.tags | length == 1
- - '"{{ vgw.vgw.tags.Name }}" == "{{ resource_prefix }}-vgw"'
-
- # ============================================================
- - name: detach vpn gateway
- ec2_vpc_vgw:
- state: present
- name: "{{ resource_prefix }}-vgw"
- <<: *aws_connection_info
- register: vgw
-
- - assert:
- that:
- - vgw.changed
- - not vgw.vgw.vpc_id
-
- - name: test idempotence
- ec2_vpc_vgw:
- state: present
- name: "{{ resource_prefix }}-vgw"
- <<: *aws_connection_info
- register: vgw
-
- - assert:
- that:
- - not vgw.changed
-
- # ============================================================
-
- always:
-
- - debug: msg="Removing test dependencies"
-
- - name: delete vpn gateway
- ec2_vpc_vgw:
- state: absent
- vpn_gateway_id: '{{ vgw.vgw.id }}'
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: delete vpc
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc-{{ item }}"
- state: absent
- cidr_block: "10.0.0.0/26"
- <<: *aws_connection_info
- loop: [1, 2]
- register: result
- retries: 10
- delay: 5
- until: result is not failed
- ignore_errors: true
diff --git a/test/integration/targets/ec2_vpc_vpn_info/aliases b/test/integration/targets/ec2_vpc_vpn_info/aliases
deleted file mode 100644
index 157ce0c9d4..0000000000
--- a/test/integration/targets/ec2_vpc_vpn_info/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group3
diff --git a/test/integration/targets/ec2_vpc_vpn_info/tasks/main.yml b/test/integration/targets/ec2_vpc_vpn_info/tasks/main.yml
deleted file mode 100644
index e7e802c9e3..0000000000
--- a/test/integration/targets/ec2_vpc_vpn_info/tasks/main.yml
+++ /dev/null
@@ -1,124 +0,0 @@
----
-- block:
-
- # ============================================================
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- # ============================================================
- - name: create a VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: present
- cidr_block: "10.0.0.0/26"
- <<: *aws_connection_info
- tags:
- Name: "{{ resource_prefix }}-vpc"
- Description: "Created by ansible-test"
- register: vpc_result
-
- - name: create vpn gateway and attach it to vpc
- ec2_vpc_vgw:
- state: present
- vpc_id: '{{ vpc_result.vpc.id }}'
- name: "{{ resource_prefix }}-vgw"
- <<: *aws_connection_info
- register: vgw
-
- - name: create customer gateway
- ec2_customer_gateway:
- bgp_asn: 12345
- ip_address: 1.2.3.4
- name: testcgw
- <<: *aws_connection_info
- register: cgw
-
- - name: create vpn connection, with customer gateway
- ec2_vpc_vpn:
- customer_gateway_id: '{{ cgw.gateway.customer_gateway.customer_gateway_id }}'
- vpn_gateway_id: '{{ vgw.vgw.id }}'
- state: present
- <<: *aws_connection_info
- register: vpn
-
- # ============================================================
- - name: test success with no parameters
- ec2_vpc_vpn_info:
- <<: *aws_connection_info
- register: result
-
- - name: assert success with no parameters
- assert:
- that:
- - 'result.changed == false'
- - 'result.vpn_connections != []'
-
- - name: test success with customer gateway id as a filter
- ec2_vpc_vpn_info:
- filters:
- customer-gateway-id: '{{ cgw.gateway.customer_gateway.customer_gateway_id }}'
- vpn-connection-id: '{{ vpn.vpn_connection_id }}'
- <<: *aws_connection_info
- register: result
-
- - name: assert success with customer gateway id as filter
- assert:
- that:
- - 'result.changed == false'
- - 'result.vpn_connections != []'
-
- # ============================================================
- always:
-
- - name: delete vpn connection
- ec2_vpc_vpn:
- state: absent
- vpn_connection_id: '{{ vpn.vpn_connection_id }}'
- <<: *aws_connection_info
- register: result
- retries: 10
- delay: 3
- until: result is not failed
- ignore_errors: true
-
- - name: delete customer gateway
- ec2_customer_gateway:
- state: absent
- ip_address: 1.2.3.4
- name: testcgw
- bgp_asn: 12345
- <<: *aws_connection_info
- register: result
- retries: 10
- delay: 3
- until: result is not failed
- ignore_errors: true
-
- - name: delete vpn gateway
- ec2_vpc_vgw:
- state: absent
- vpn_gateway_id: '{{ vgw.vgw.id }}'
- <<: *aws_connection_info
- register: result
- retries: 10
- delay: 3
- until: result is not failed
- ignore_errors: true
-
- - name: delete vpc
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: absent
- cidr_block: "10.0.0.0/26"
- <<: *aws_connection_info
- register: result
- retries: 10
- delay: 3
- until: result is not failed
- ignore_errors: true
diff --git a/test/integration/targets/ecs_cluster/aliases b/test/integration/targets/ecs_cluster/aliases
deleted file mode 100644
index 4b1bea7a18..0000000000
--- a/test/integration/targets/ecs_cluster/aliases
+++ /dev/null
@@ -1,6 +0,0 @@
-cloud/aws
-ecs_service_info
-ecs_task
-ecs_taskdefinition
-ecs_taskdefinition_info
-unsupported
diff --git a/test/integration/targets/ecs_cluster/defaults/main.yml b/test/integration/targets/ecs_cluster/defaults/main.yml
deleted file mode 100644
index 20e010e366..0000000000
--- a/test/integration/targets/ecs_cluster/defaults/main.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-ecs_cluster_name: "{{ resource_prefix }}"
-user_data: |
- #!/bin/bash
- echo ECS_CLUSTER={{ ecs_cluster_name }} >> /etc/ecs/ecs.config
-
-ecs_service_name: "{{ resource_prefix }}-service"
-ecs_task_image_path: nginx
-ecs_task_name: "{{ resource_prefix }}-task"
-ecs_task_memory: 128
-ecs_task_containers:
-- name: "{{ ecs_task_name }}"
- image: "{{ ecs_task_image_path }}"
- essential: true
- memory: "{{ ecs_task_memory }}"
- portMappings:
- - containerPort: "{{ ecs_task_container_port }}"
- hostPort: "{{ ecs_task_host_port|default(0) }}"
- mountPoints: "{{ ecs_task_mount_points|default([]) }}"
-ecs_service_deployment_configuration:
- minimum_healthy_percent: 0
- maximum_percent: 100
-ecs_service_placement_strategy:
- - type: binpack
- field: memory
- - type: spread
- field: attribute:ecs.availability-zone
-ecs_task_container_port: 8080
-ecs_target_group_name: "{{ resource_prefix[:28] }}-tg"
-ecs_load_balancer_name: "{{ resource_prefix[:29] }}-lb"
-ecs_service_health_check_grace_period: 60
-ecs_fargate_task_containers:
-- name: "{{ ecs_task_name }}"
- image: "{{ ecs_task_image_path }}"
- essential: true
- portMappings:
- - containerPort: "{{ ecs_task_container_port }}"
- hostPort: "{{ ecs_task_host_port|default(0) }}"
- #mountPoints: "{{ ecs_task_mount_points|default([]) }}"
diff --git a/test/integration/targets/ecs_cluster/files/ec2-trust-policy.json b/test/integration/targets/ecs_cluster/files/ec2-trust-policy.json
deleted file mode 100644
index 72413abdd3..0000000000
--- a/test/integration/targets/ecs_cluster/files/ec2-trust-policy.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "Version": "2008-10-17",
- "Statement": [
- {
- "Sid": "",
- "Effect": "Allow",
- "Principal": {
- "Service": "ec2.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/ecs_cluster/files/ecs-trust-policy.json b/test/integration/targets/ecs_cluster/files/ecs-trust-policy.json
deleted file mode 100644
index f871b34d91..0000000000
--- a/test/integration/targets/ecs_cluster/files/ecs-trust-policy.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "Version": "2008-10-17",
- "Statement": [
- {
- "Sid": "",
- "Effect": "Allow",
- "Principal": {
- "Service": [
- "ecs.amazonaws.com",
- "ecs-tasks.amazonaws.com"
- ]
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/ecs_cluster/meta/main.yml b/test/integration/targets/ecs_cluster/meta/main.yml
deleted file mode 100644
index 1810d4bec9..0000000000
--- a/test/integration/targets/ecs_cluster/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_remote_tmp_dir
diff --git a/test/integration/targets/ecs_cluster/tasks/full_test.yml b/test/integration/targets/ecs_cluster/tasks/full_test.yml
deleted file mode 100644
index 40813b8720..0000000000
--- a/test/integration/targets/ecs_cluster/tasks/full_test.yml
+++ /dev/null
@@ -1,1169 +0,0 @@
----
-# tasks file for ecs_cluster
-
-- block:
- # ============================================================
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: ensure IAM instance role exists
- iam_role:
- name: ecsInstanceRole
- assume_role_policy_document: "{{ lookup('file','ec2-trust-policy.json') }}"
- state: present
- create_instance_profile: yes
- managed_policy:
- - AmazonEC2ContainerServiceforEC2Role
- <<: *aws_connection_info
-
- - name: ensure IAM service role exists
- iam_role:
- name: ecsServiceRole
- assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}"
- state: present
- create_instance_profile: no
- managed_policy:
- - AmazonEC2ContainerServiceRole
- <<: *aws_connection_info
-
- - name: ensure AWSServiceRoleForECS role exists
- iam_role_info:
- name: AWSServiceRoleForECS
- <<: *aws_connection_info
- register: iam_role_result
-
- # FIXME: come up with a way to automate this
- - name: fail if AWSServiceRoleForECS role does not exist
- fail:
- msg: >
- Run `aws iam create-service-linked-role --aws-service-name=ecs.amazonaws.com ` to create
- a linked role for AWS VPC load balancer management
- when: not iam_role_result.iam_roles
-
- - name: create an ECS cluster
- ecs_cluster:
- name: "{{ ecs_cluster_name }}"
- state: present
- <<: *aws_connection_info
- register: ecs_cluster
-
- - name: check that ecs_cluster changed
- assert:
- that:
- - ecs_cluster.changed
-
- - name: create same ECS cluster (should do nothing)
- ecs_cluster:
- name: "{{ ecs_cluster_name }}"
- state: present
- <<: *aws_connection_info
- register: ecs_cluster_again
-
- - name: check that ecs_cluster did not change
- assert:
- that:
- - not ecs_cluster_again.changed
-
- - name: create a VPC to work in
- ec2_vpc_net:
- cidr_block: 10.0.0.0/16
- state: present
- name: '{{ resource_prefix }}_ecs_cluster'
- resource_tags:
- Name: '{{ resource_prefix }}_ecs_cluster'
- <<: *aws_connection_info
- register: setup_vpc
-
- - name: create a key pair to use for creating an ec2 instance
- ec2_key:
- name: '{{ resource_prefix }}_ecs_cluster'
- state: present
- <<: *aws_connection_info
- when: ec2_keypair is not defined # allow override in cloud-config-aws.ini
- register: setup_key
-
- - name: create subnets
- ec2_vpc_subnet:
- az: '{{ ec2_region }}{{ item.zone }}'
- tags:
- Name: '{{ resource_prefix }}_ecs_cluster-subnet-{{ item.zone }}'
- vpc_id: '{{ setup_vpc.vpc.id }}'
- cidr: "{{ item.cidr }}"
- state: present
- <<: *aws_connection_info
- register: setup_subnet
- with_items:
- - zone: a
- cidr: 10.0.1.0/24
- - zone: b
- cidr: 10.0.2.0/24
-
- - name: create an internet gateway so that ECS agents can talk to ECS
- ec2_vpc_igw:
- vpc_id: '{{ setup_vpc.vpc.id }}'
- state: present
- <<: *aws_connection_info
- register: igw
-
- - name: create a security group to use for creating an ec2 instance
- ec2_group:
- name: '{{ resource_prefix }}_ecs_cluster-sg'
- description: 'created by Ansible integration tests'
- state: present
- vpc_id: '{{ setup_vpc.vpc.id }}'
- rules: # allow all ssh traffic but nothing else
- - ports: 22
- cidr: 0.0.0.0/0
- <<: *aws_connection_info
- register: setup_sg
-
- - name: find a suitable AMI
- ec2_ami_info:
- owner: amazon
- filters:
- description: "Amazon Linux AMI* ECS *"
- <<: *aws_connection_info
- register: ec2_ami_info
-
- - name: set image id fact
- set_fact:
- ecs_image_id: "{{ (ec2_ami_info.images|first).image_id }}"
-
- - name: provision ec2 instance to create an image
- ec2_instance:
- key_name: '{{ ec2_keypair|default(setup_key.key.name) }}'
- instance_type: t2.micro
- state: present
- image_id: '{{ ecs_image_id }}'
- wait: yes
- user_data: "{{ user_data }}"
- instance_role: ecsInstanceRole
- tags:
- Name: '{{ resource_prefix }}_ecs_agent'
- security_group: '{{ setup_sg.group_id }}'
- vpc_subnet_id: '{{ setup_subnet.results[0].subnet.id }}'
- <<: *aws_connection_info
- register: setup_instance
-
- - name: create target group
- elb_target_group:
- name: "{{ ecs_target_group_name }}1"
- state: present
- protocol: HTTP
- port: 8080
- modify_targets: no
- vpc_id: '{{ setup_vpc.vpc.id }}'
- target_type: instance
- <<: *aws_connection_info
- register: elb_target_group_instance
-
- - name: create second target group to use ip target_type
- elb_target_group:
- name: "{{ ecs_target_group_name }}2"
- state: present
- protocol: HTTP
- port: 8080
- modify_targets: no
- vpc_id: '{{ setup_vpc.vpc.id }}'
- target_type: ip
- <<: *aws_connection_info
- register: elb_target_group_ip
-
- - name: create load balancer
- elb_application_lb:
- name: "{{ ecs_load_balancer_name }}"
- state: present
- scheme: internal
- security_groups: '{{ setup_sg.group_id }}'
- subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}"
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ ecs_target_group_name }}1"
- - Protocol: HTTP
- Port: 81
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ ecs_target_group_name }}2"
- <<: *aws_connection_info
-
- - name: create task definition
- ecs_taskdefinition:
- containers: "{{ ecs_task_containers }}"
- family: "{{ ecs_task_name }}"
- state: present
- <<: *aws_connection_info
- register: ecs_task_definition
-
- - name: recreate task definition
- ecs_taskdefinition:
- containers: "{{ ecs_task_containers }}"
- family: "{{ ecs_task_name }}"
- state: present
- <<: *aws_connection_info
- register: ecs_task_definition_again
-
- - name: check that task definition does not change
- assert:
- that:
- - not ecs_task_definition_again.changed
- # FIXME: task definition should not change, will need #26752 or equivalent
- ignore_errors: yes
-
- - name: obtain ECS task definition facts
- ecs_taskdefinition_info:
- task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
- <<: *aws_connection_info
-
- - name: create ECS service definition
- ecs_service:
- state: present
- name: "{{ ecs_service_name }}"
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
- desired_count: 1
- deployment_configuration: "{{ ecs_service_deployment_configuration }}"
- placement_strategy: "{{ ecs_service_placement_strategy }}"
- health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}"
- load_balancers:
- - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- role: "ecsServiceRole"
- <<: *aws_connection_info
- register: ecs_service
-
- - name: check that ECS service creation changed
- assert:
- that:
- - ecs_service.changed
-
- - name: create same ECS service definition (should not change)
- ecs_service:
- state: present
- name: "{{ ecs_service_name }}"
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
- desired_count: 1
- deployment_configuration: "{{ ecs_service_deployment_configuration }}"
- placement_strategy: "{{ ecs_service_placement_strategy }}"
- health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}"
- load_balancers:
- - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- role: "ecsServiceRole"
- <<: *aws_connection_info
- register: ecs_service_again
-
- - name: check that ECS service recreation changed nothing
- assert:
- that:
- - not ecs_service_again.changed
- # FIXME: service should not change, needs fixing
- ignore_errors: yes
-
- # FIXME: attempt to update service load balancer
- - name: update ECS service definition (expected to fail)
- ecs_service:
- state: present
- name: "{{ ecs_service_name }}"
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
- desired_count: 1
- deployment_configuration: "{{ ecs_service_deployment_configuration }}"
- placement_strategy: "{{ ecs_service_placement_strategy }}"
- health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}"
- load_balancers:
- - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port|int + 1 }}"
- role: "ecsServiceRole"
- <<: *aws_connection_info
- register: update_ecs_service
- ignore_errors: yes
-
- - name: assert that updating ECS load balancer failed with helpful message
- assert:
- that:
- - update_ecs_service is failed
- - "'error' not in update_ecs_service"
- - "'msg' in update_ecs_service"
-
-
- - name: attempt to use ECS network configuration on task definition without awsvpc network_mode
- ecs_service:
- state: present
- name: "{{ ecs_service_name }}3"
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
- desired_count: 1
- deployment_configuration: "{{ ecs_service_deployment_configuration }}"
- placement_strategy: "{{ ecs_service_placement_strategy }}"
- load_balancers:
- - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- network_configuration:
- subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}"
- security_groups:
- - '{{ setup_sg.group_id }}'
- <<: *aws_connection_info
- register: ecs_service_network_without_awsvpc_task
- ignore_errors: yes
-
- - name: assert that using ECS network configuration with non AWSVPC task definition fails
- assert:
- that:
- - ecs_service_network_without_awsvpc_task is failed
-
- - name: scale down ECS service
- ecs_service:
- state: present
- name: "{{ ecs_service_name }}"
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
- desired_count: 0
- deployment_configuration: "{{ ecs_service_deployment_configuration }}"
- placement_strategy: "{{ ecs_service_placement_strategy }}"
- load_balancers:
- - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- role: "ecsServiceRole"
- <<: *aws_connection_info
- register: ecs_service_scale_down
-
- - name: pause to allow service to scale down
- pause:
- seconds: 60
-
- - name: delete ECS service definition
- ecs_service:
- state: absent
- name: "{{ ecs_service_name }}"
- cluster: "{{ ecs_cluster_name }}"
- <<: *aws_connection_info
- register: delete_ecs_service
-
- - name: assert that deleting ECS service worked
- assert:
- that:
- - delete_ecs_service.changed
-
- - name: assert that deleting ECS service worked
- assert:
- that:
- - delete_ecs_service.changed
-
- - name: create VPC-networked task definition with host port set to 0 (expected to fail)
- ecs_taskdefinition:
- containers: "{{ ecs_task_containers }}"
- family: "{{ ecs_task_name }}-vpc"
- state: present
- network_mode: awsvpc
- <<: *aws_connection_info
- register: ecs_task_definition_vpc_no_host_port
- ignore_errors: yes
-
- - name: check that awsvpc task definition with host port 0 fails gracefully
- assert:
- that:
- - ecs_task_definition_vpc_no_host_port is failed
- - "'error' not in ecs_task_definition_vpc_no_host_port"
-
- - name: create VPC-networked task definition with host port set to 8080
- ecs_taskdefinition:
- containers: "{{ ecs_task_containers }}"
- family: "{{ ecs_task_name }}-vpc"
- network_mode: awsvpc
- state: present
- <<: *aws_connection_info
- vars:
- ecs_task_host_port: 8080
- register: ecs_task_definition_vpc_with_host_port
-
- - name: obtain ECS task definition facts
- ecs_taskdefinition_info:
- task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}"
- <<: *aws_connection_info
- register: ecs_taskdefinition_info
-
- - name: assert that network mode is awsvpc
- assert:
- that:
- - "ecs_taskdefinition_info.network_mode == 'awsvpc'"
-
- - name: pause to allow service to scale down
- pause:
- seconds: 60
-
- - name: delete ECS service definition
- ecs_service:
- state: absent
- name: "{{ ecs_service_name }}4"
- cluster: "{{ ecs_cluster_name }}"
- <<: *aws_connection_info
- register: delete_ecs_service
-
- - name: create ECS service definition with network configuration
- ecs_service:
- state: present
- name: "{{ ecs_service_name }}2"
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}"
- desired_count: 1
- deployment_configuration: "{{ ecs_service_deployment_configuration }}"
- placement_strategy: "{{ ecs_service_placement_strategy }}"
- load_balancers:
- - targetGroupArn: "{{ elb_target_group_ip.target_group_arn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- network_configuration:
- subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}"
- security_groups:
- - '{{ setup_sg.group_id }}'
- <<: *aws_connection_info
- register: create_ecs_service_with_vpc
-
- - name: assert that network configuration is correct
- assert:
- that:
- - "'networkConfiguration' in create_ecs_service_with_vpc.service"
- - "'awsvpcConfiguration' in create_ecs_service_with_vpc.service.networkConfiguration"
- - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.subnets|length == 2"
- - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.securityGroups|length == 1"
-
- - name: create dummy group to update ECS service with
- ec2_group:
- name: "{{ resource_prefix }}-ecs-vpc-test-sg"
- description: "Test security group for ECS with VPC"
- vpc_id: '{{ setup_vpc.vpc.id }}'
- state: present
- <<: *aws_connection_info
-
- - name: update ECS service definition with new network configuration
- ecs_service:
- state: present
- name: "{{ ecs_service_name }}2"
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}"
- desired_count: 1
- deployment_configuration: "{{ ecs_service_deployment_configuration }}"
- placement_strategy: "{{ ecs_service_placement_strategy }}"
- load_balancers:
- - targetGroupArn: "{{ elb_target_group_ip.target_group_arn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- network_configuration:
- subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}"
- security_groups:
- - "{{ resource_prefix }}-ecs-vpc-test-sg"
- <<: *aws_connection_info
- register: update_ecs_service_with_vpc
-
- - name: check that ECS service changed
- assert:
- that:
- - update_ecs_service_with_vpc.changed
- - "'networkConfiguration' in update_ecs_service_with_vpc.service"
- - "'awsvpcConfiguration' in update_ecs_service_with_vpc.service.networkConfiguration"
- - "update_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.subnets|length == 2"
- - "update_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.securityGroups|length == 1"
-
- - name: create ecs_service using health_check_grace_period_seconds
- ecs_service:
- name: "{{ ecs_service_name }}-mft"
- cluster: "{{ ecs_cluster_name }}"
- load_balancers:
- - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
- scheduling_strategy: "REPLICA"
- health_check_grace_period_seconds: 10
- desired_count: 1
- state: present
- <<: *aws_connection_info
- register: ecs_service_creation_hcgp
-
-
- - name: health_check_grace_period_seconds sets HealthChecGracePeriodSeconds
- assert:
- that:
- - ecs_service_creation_hcgp.changed
- - "{{ecs_service_creation_hcgp.service.healthCheckGracePeriodSeconds}} == 10"
-
- - name: update ecs_service using health_check_grace_period_seconds
- ecs_service:
- name: "{{ ecs_service_name }}-mft"
- cluster: "{{ ecs_cluster_name }}"
- load_balancers:
- - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
- desired_count: 1
- health_check_grace_period_seconds: 30
- state: present
- <<: *aws_connection_info
- register: ecs_service_creation_hcgp2
- ignore_errors: no
-
- - name: check that module returns success
- assert:
- that:
- - ecs_service_creation_hcgp2.changed
- - "{{ecs_service_creation_hcgp2.service.healthCheckGracePeriodSeconds}} == 30"
-
-# until ansible supports service registries, this test can't run.
-# - name: update ecs_service using service_registries
-# ecs_service:
-# name: "{{ ecs_service_name }}-service-registries"
-# cluster: "{{ ecs_cluster_name }}"
-# load_balancers:
-# - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
-# containerName: "{{ ecs_task_name }}"
-# containerPort: "{{ ecs_task_container_port }}"
-# service_registries:
-# - containerName: "{{ ecs_task_name }}"
-# containerPort: "{{ ecs_task_container_port }}"
-# ### TODO: Figure out how to get a service registry ARN without a service registry module.
-# registryArn: "{{ ecs_task_service_registry_arn }}"
-# task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
-# desired_count: 1
-# state: present
-# <<: *aws_connection_info
-# register: ecs_service_creation_sr
-# ignore_errors: yes
-
-# - name: dump sr output
-# debug: var=ecs_service_creation_sr
-
-# - name: check that module returns success
-# assert:
-# that:
-# - ecs_service_creation_sr.changed
-
- - name: update ecs_service using REPLICA scheduling_strategy
- ecs_service:
- name: "{{ ecs_service_name }}-replica"
- cluster: "{{ ecs_cluster_name }}"
- load_balancers:
- - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- scheduling_strategy: "REPLICA"
- task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
- desired_count: 1
- state: present
- <<: *aws_connection_info
- register: ecs_service_creation_replica
-
- - name: obtain facts for all ECS services in the cluster
- ecs_service_info:
- cluster: "{{ ecs_cluster_name }}"
- details: yes
- events: no
- <<: *aws_connection_info
- register: ecs_service_info
-
- - name: assert that facts are useful
- assert:
- that:
- - "'services' in ecs_service_info"
- - ecs_service_info.services | length > 0
- - "'events' not in ecs_service_info.services[0]"
-
- - name: obtain facts for existing service in the cluster
- ecs_service_info:
- cluster: "{{ ecs_cluster_name }}"
- service: "{{ ecs_service_name }}"
- details: yes
- events: no
- <<: *aws_connection_info
- register: ecs_service_info
-
- - name: assert that existing service is available and running
- assert:
- that:
- - "ecs_service_info.services|length == 1"
- - "ecs_service_info.services_not_running|length == 0"
-
- - name: obtain facts for non-existent service in the cluster
- ecs_service_info:
- cluster: "{{ ecs_cluster_name }}"
- service: madeup
- details: yes
- events: no
- <<: *aws_connection_info
- register: ecs_service_info
-
- - name: assert that non-existent service is missing
- assert:
- that:
- - "ecs_service_info.services_not_running[0].reason == 'MISSING'"
-
- - name: obtain specific ECS service facts
- ecs_service_info:
- service: "{{ ecs_service_name }}2"
- cluster: "{{ ecs_cluster_name }}"
- details: yes
- <<: *aws_connection_info
- register: ecs_service_info
-
- - name: check that facts contain network configuration
- assert:
- that:
- - "'networkConfiguration' in ecs_service_info.services[0]"
-
- - name: attempt to get facts from missing task definition
- ecs_taskdefinition_info:
- task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition.taskdefinition.revision + 1}}"
- <<: *aws_connection_info
-
- # ============================================================
- # Begin tests for Fargate
-
- - name: ensure AmazonECSTaskExecutionRolePolicy exists
- iam_role:
- name: ecsTaskExecutionRole
- assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}"
- description: "Allows ECS containers to make calls to ECR"
- state: present
- create_instance_profile: no
- managed_policy:
- - AmazonEC2ContainerServiceRole
- <<: *aws_connection_info
- register: iam_execution_role
-
- - name: create Fargate VPC-networked task definition with host port set to 8080 and unsupported network mode (expected to fail)
- ecs_taskdefinition:
- containers: "{{ ecs_fargate_task_containers }}"
- family: "{{ ecs_task_name }}-vpc"
- network_mode: bridge
- launch_type: FARGATE
- cpu: 512
- memory: 1024
- state: present
- <<: *aws_connection_info
- vars:
- ecs_task_host_port: 8080
- ignore_errors: yes
- register: ecs_fargate_task_definition_bridged_with_host_port
-
- - name: check that fargate task definition with bridged networking fails gracefully
- assert:
- that:
- - ecs_fargate_task_definition_bridged_with_host_port is failed
- - 'ecs_fargate_task_definition_bridged_with_host_port.msg == "To use FARGATE launch type, network_mode must be awsvpc"'
-
- - name: create Fargate VPC-networked task definition without CPU or Memory (expected to Fail)
- ecs_taskdefinition:
- containers: "{{ ecs_fargate_task_containers }}"
- family: "{{ ecs_task_name }}-vpc"
- network_mode: awsvpc
- launch_type: FARGATE
- state: present
- <<: *aws_connection_info
- ignore_errors: yes
- register: ecs_fargate_task_definition_vpc_no_mem
-
- - name: check that fargate task definition without memory or cpu fails gracefully
- assert:
- that:
- - ecs_fargate_task_definition_vpc_no_mem is failed
- - 'ecs_fargate_task_definition_vpc_no_mem.msg == "launch_type is FARGATE but all of the following are missing: cpu, memory"'
-
- - name: create Fargate VPC-networked task definition with CPU or Memory and execution role
- ecs_taskdefinition:
- containers: "{{ ecs_fargate_task_containers }}"
- family: "{{ ecs_task_name }}-vpc"
- network_mode: awsvpc
- launch_type: FARGATE
- cpu: 512
- memory: 1024
- execution_role_arn: "{{ iam_execution_role.arn }}"
- state: present
- <<: *aws_connection_info
- vars:
- ecs_task_host_port: 8080
- register: ecs_fargate_task_definition
-
- - name: obtain ECS task definition facts
- ecs_taskdefinition_info:
- task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}"
- <<: *aws_connection_info
-
- - name: create fargate ECS service without network config (expected to fail)
- ecs_service:
- state: present
- name: "{{ ecs_service_name }}4"
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}"
- desired_count: 1
- deployment_configuration: "{{ ecs_service_deployment_configuration }}"
- launch_type: FARGATE
- <<: *aws_connection_info
- register: ecs_fargate_service_network_without_awsvpc
- ignore_errors: yes
-
- - name: assert that using Fargate ECS service fails
- assert:
- that:
- - ecs_fargate_service_network_without_awsvpc is failed
-
- - name: create fargate ECS service with network config
- ecs_service:
- state: present
- name: "{{ ecs_service_name }}4"
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}"
- desired_count: 1
- deployment_configuration: "{{ ecs_service_deployment_configuration }}"
- launch_type: FARGATE
- network_configuration:
- subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}"
- security_groups:
- - '{{ setup_sg.group_id }}'
- assign_public_ip: true
- <<: *aws_connection_info
- register: ecs_fargate_service_network_with_awsvpc
-
- - name: assert that public IP assignment is enabled
- assert:
- that:
- - 'ecs_fargate_service_network_with_awsvpc.service.networkConfiguration.awsvpcConfiguration.assignPublicIp == "ENABLED"'
-
- - name: create fargate ECS task with run task
- ecs_task:
- operation: run
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}-vpc"
- launch_type: FARGATE
- count: 1
- network_configuration:
- subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}"
- security_groups:
- - '{{ setup_sg.group_id }}'
- assign_public_ip: true
- started_by: ansible_user
- <<: *aws_connection_info
- register: fargate_run_task_output
-
- # aws cli not installed in docker container; make sure it's installed.
- - name: install awscli
- pip:
- state: present
- name: awscli
-
- - name: disable taskLongArnFormat
- command: aws ecs put-account-setting --name taskLongArnFormat --value disabled
- environment:
- AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
- AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
- AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
- AWS_DEFAULT_REGION: "{{ aws_region }}"
-
- - name: create fargate ECS task with run task and tags (LF disabled) (should fail)
- ecs_task:
- operation: run
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}-vpc"
- launch_type: FARGATE
- count: 1
- tags:
- tag_key: tag_value
- tag_key2: tag_value2
- network_configuration:
- subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}"
- security_groups:
- - '{{ setup_sg.group_id }}'
- assign_public_ip: true
- started_by: ansible_user
- <<: *aws_connection_info
- register: fargate_run_task_output_with_tags_fail
- ignore_errors: yes
-
- - name: enable taskLongArnFormat
- command: aws ecs put-account-setting --name taskLongArnFormat --value enabled
- environment:
- AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
- AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
- AWS_SESSION_TOKEN: "{{ security_token | default('') }}"
- AWS_DEFAULT_REGION: "{{ aws_region }}"
-
- - name: create fargate ECS task with run task and tags
- ecs_task:
- operation: run
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}-vpc"
- launch_type: FARGATE
- count: 1
- tags:
- tag_key: tag_value
- tag_key2: tag_value2
- network_configuration:
- subnets: "{{ setup_subnet.results | json_query('[].subnet.id') }}"
- security_groups:
- - '{{ setup_sg.group_id }}'
- assign_public_ip: true
- started_by: ansible_user
- <<: *aws_connection_info
- register: fargate_run_task_output_with_tags
-
-
- # ============================================================
- # End tests for Fargate
-
- - name: create task definition for absent with arn regression test
- ecs_taskdefinition:
- containers: "{{ ecs_task_containers }}"
- family: "{{ ecs_task_name }}-absent"
- state: present
- <<: *aws_connection_info
- register: ecs_task_definition_absent_with_arn
-
- - name: absent task definition by arn
- ecs_taskdefinition:
- arn: "{{ ecs_task_definition_absent_with_arn.taskdefinition.taskDefinitionArn }}"
- state: absent
- <<: *aws_connection_info
-
- always:
- # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc
- - name: Announce teardown start
- debug:
- msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****"
-
- - name: obtain ECS service facts
- ecs_service_info:
- service: "{{ ecs_service_name }}"
- cluster: "{{ ecs_cluster_name }}"
- details: yes
- <<: *aws_connection_info
- register: ecs_service_info
-
- - name: scale down ECS service
- ecs_service:
- state: present
- name: "{{ ecs_service_name }}"
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_service_info.services[0].taskDefinition }}"
- desired_count: 0
- deployment_configuration: "{{ ecs_service_deployment_configuration }}"
- placement_strategy: "{{ ecs_service_placement_strategy }}"
- load_balancers:
- - targetGroupArn: "{{ ecs_service_info.services[0].loadBalancers[0].targetGroupArn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- <<: *aws_connection_info
- ignore_errors: yes
- register: ecs_service_scale_down
-
- - name: obtain second ECS service facts
- ecs_service_info:
- service: "{{ ecs_service_name }}2"
- cluster: "{{ ecs_cluster_name }}"
- details: yes
- <<: *aws_connection_info
- ignore_errors: yes
- register: ecs_service_info
-
- - name: scale down second ECS service
- ecs_service:
- state: present
- name: "{{ ecs_service_name }}2"
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_service_info.services[0].taskDefinition }}"
- desired_count: 0
- deployment_configuration: "{{ ecs_service_deployment_configuration }}"
- placement_strategy: "{{ ecs_service_placement_strategy }}"
- load_balancers:
- - targetGroupArn: "{{ ecs_service_info.services[0].loadBalancers[0].targetGroupArn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- <<: *aws_connection_info
- ignore_errors: yes
- register: ecs_service_scale_down
-
- - name: scale down multifunction-test service
- ecs_service:
- name: "{{ ecs_service_name }}-mft"
- cluster: "{{ ecs_cluster_name }}"
- state: present
- load_balancers:
- - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
- desired_count: 0
- <<: *aws_connection_info
- ignore_errors: yes
- register: ecs_service_scale_down
-
-
-
- - name: scale down scheduling_strategy service
- ecs_service:
- name: "{{ ecs_service_name }}-replica"
- cluster: "{{ ecs_cluster_name }}"
- state: present
- load_balancers:
- - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
- containerName: "{{ ecs_task_name }}"
- containerPort: "{{ ecs_task_container_port }}"
- task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
- desired_count: 0
- <<: *aws_connection_info
- ignore_errors: yes
- register: ecs_service_scale_down
-
-
-# until ansible supports service registries, the test for it can't run and this
-# scale down is not needed
-# - name: scale down service_registries service
-# ecs_service:
-# name: "{{ ecs_service_name }}-service-registries"
-# cluster: "{{ ecs_cluster_name }}"
-# state: present
-# load_balancers:
-# - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}"
-# containerName: "{{ ecs_task_name }}"
-# containerPort: "{{ ecs_task_container_port }}"
-# task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}"
-# desired_count: 0
-# <<: *aws_connection_info
-# ignore_errors: yes
-# register: ecs_service_scale_down
-
- - name: scale down Fargate ECS service
- ecs_service:
- state: present
- name: "{{ ecs_service_name }}4"
- cluster: "{{ ecs_cluster_name }}"
- task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}"
- desired_count: 0
- deployment_configuration: "{{ ecs_service_deployment_configuration }}"
- <<: *aws_connection_info
- ignore_errors: yes
- register: ecs_service_scale_down
-
- - name: stop Fargate ECS task
- ecs_task:
- task: "{{ fargate_run_task_output.task[0].taskArn }}"
- task_definition: "{{ ecs_task_name }}-vpc"
- operation: stop
- cluster: "{{ ecs_cluster_name }}"
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: stop Fargate ECS task
- ecs_task:
- task: "{{ fargate_run_task_output_with_tags.task[0].taskArn }}"
- task_definition: "{{ ecs_task_name }}-vpc"
- operation: stop
- cluster: "{{ ecs_cluster_name }}"
- <<: *aws_connection_info
- ignore_errors: yes
- - name: pause to allow services to scale down
- pause:
- seconds: 60
- when: ecs_service_scale_down is not failed
-
- - name: remove ecs service
- ecs_service:
- state: absent
- cluster: "{{ ecs_cluster_name }}"
- name: "{{ ecs_service_name }}"
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove second ecs service
- ecs_service:
- state: absent
- cluster: "{{ ecs_cluster_name }}"
- name: "{{ ecs_service_name }}2"
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove mft ecs service
- ecs_service:
- state: absent
- cluster: "{{ ecs_cluster_name }}"
- name: "{{ ecs_service_name }}-mft"
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove scheduling_strategy ecs service
- ecs_service:
- state: absent
- cluster: "{{ ecs_cluster_name }}"
- name: "{{ ecs_service_name }}-replica"
- <<: *aws_connection_info
- ignore_errors: yes
-
-# until ansible supports service registries, the test for it can't run and this
-# removal is not needed
-# - name: remove service_registries ecs service
-# ecs_service:
-# state: absent
-# cluster: "{{ ecs_cluster_name }}"
-# name: "{{ ecs_service_name }}-service-registries"
-# <<: *aws_connection_info
-# ignore_errors: yes
-
- - name: remove fargate ECS service
- ecs_service:
- state: absent
- name: "{{ ecs_service_name }}4"
- cluster: "{{ ecs_cluster_name }}"
- <<: *aws_connection_info
- ignore_errors: yes
- register: ecs_fargate_service_network_with_awsvpc
-
- - name: remove ecs task definition
- ecs_taskdefinition:
- containers: "{{ ecs_task_containers }}"
- family: "{{ ecs_task_name }}"
- revision: "{{ ecs_task_definition.taskdefinition.revision }}"
- state: absent
- <<: *aws_connection_info
- vars:
- ecs_task_host_port: 8080
- ignore_errors: yes
-
- - name: remove ecs task definition again
- ecs_taskdefinition:
- containers: "{{ ecs_task_containers }}"
- family: "{{ ecs_task_name }}"
- revision: "{{ ecs_task_definition_again.taskdefinition.revision }}"
- state: absent
- <<: *aws_connection_info
- vars:
- ecs_task_host_port: 8080
- ignore_errors: yes
-
- - name: remove second ecs task definition
- ecs_taskdefinition:
- containers: "{{ ecs_task_containers }}"
- family: "{{ ecs_task_name }}-vpc"
- revision: "{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}"
- state: absent
- <<: *aws_connection_info
- vars:
- ecs_task_host_port: 8080
- ignore_errors: yes
-
- - name: remove fargate ecs task definition
- ecs_taskdefinition:
- containers: "{{ ecs_fargate_task_containers }}"
- family: "{{ ecs_task_name }}-vpc"
- revision: "{{ ecs_fargate_task_definition.taskdefinition.revision }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ecs task definition for absent with arn
- ecs_taskdefinition:
- containers: "{{ ecs_task_containers }}"
- family: "{{ ecs_task_name }}-absent"
- revision: "{{ ecs_task_definition_absent_with_arn.taskdefinition.revision }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove load balancer
- elb_application_lb:
- name: "{{ ecs_load_balancer_name }}"
- state: absent
- wait: yes
- <<: *aws_connection_info
- ignore_errors: yes
- register: elb_application_lb_remove
-
- - name: pause to allow target group to be disassociated
- pause:
- seconds: 30
- when: not elb_application_lb_remove is failed
-
- - name: remove target groups
- elb_target_group:
- name: "{{ item }}"
- state: absent
- <<: *aws_connection_info
- with_items:
- - "{{ ecs_target_group_name }}1"
- - "{{ ecs_target_group_name }}2"
- ignore_errors: yes
-
- - name: remove setup ec2 instance
- ec2_instance:
- instance_ids: '{{ setup_instance.instance_ids }}'
- state: absent
- wait: yes
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove setup keypair
- ec2_key:
- name: '{{ resource_prefix }}_ecs_cluster'
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove security groups
- ec2_group:
- name: '{{ item }}'
- description: 'created by Ansible integration tests'
- state: absent
- vpc_id: '{{ setup_vpc.vpc.id }}'
- <<: *aws_connection_info
- with_items:
- - "{{ resource_prefix }}-ecs-vpc-test-sg"
- - '{{ resource_prefix }}_ecs_cluster-sg'
- ignore_errors: yes
-
- - name: remove IGW
- ec2_vpc_igw:
- state: absent
- vpc_id: '{{ setup_vpc.vpc.id }}'
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove setup subnet
- ec2_vpc_subnet:
- az: '{{ aws_region }}{{ item.zone }}'
- vpc_id: '{{ setup_vpc.vpc.id }}'
- cidr: "{{ item.cidr}}"
- state: absent
- <<: *aws_connection_info
- with_items:
- - zone: a
- cidr: 10.0.1.0/24
- - zone: b
- cidr: 10.0.2.0/24
- ignore_errors: yes
-
- - name: remove setup VPC
- ec2_vpc_net:
- cidr_block: 10.0.0.0/16
- state: absent
- name: '{{ resource_prefix }}_ecs_cluster'
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ECS cluster
- ecs_cluster:
- name: "{{ ecs_cluster_name }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/ecs_cluster/tasks/main.yml b/test/integration/targets/ecs_cluster/tasks/main.yml
deleted file mode 100644
index 83ee3f3489..0000000000
--- a/test/integration/targets/ecs_cluster/tasks/main.yml
+++ /dev/null
@@ -1,53 +0,0 @@
-- set_fact:
- virtualenv: "{{ remote_tmp_dir }}/virtualenv"
- virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
-
-- set_fact:
- virtualenv_interpreter: "{{ virtualenv }}/bin/python"
-
-- pip:
- name: virtualenv
-
-- pip:
- name:
- - 'botocore<1.8.4'
- - boto3
- - coverage
- virtualenv: "{{ virtualenv }}"
- virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: no
-
-- include_tasks: network_assign_public_ip_fail.yml
- vars:
- ansible_python_interpreter: "{{ virtualenv_interpreter }}"
-
-- include_tasks: network_force_new_deployment_fail.yml
- vars:
- ansible_python_interpreter: "{{ virtualenv_interpreter }}"
-
-- file:
- path: "{{ virtualenv }}"
- state: absent
-
-# Test graceful failures when botocore<1.12.38
-
-- pip:
- name:
- - 'botocore>=1.12.60'
- - boto3
- - coverage
- virtualenv: "{{ virtualenv }}"
- virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: no
-
-- include_tasks: network_force_new_deployment.yml
- vars:
- ansible_python_interpreter: "{{ virtualenv_interpreter }}"
-
-- include_tasks: full_test.yml
- vars:
- ansible_python_interpreter: "{{ virtualenv_interpreter }}"
-
-- file:
- path: "{{ virtualenv }}"
- state: absent
diff --git a/test/integration/targets/ecs_cluster/tasks/network_assign_public_ip_fail.yml b/test/integration/targets/ecs_cluster/tasks/network_assign_public_ip_fail.yml
deleted file mode 100644
index b4b7e53121..0000000000
--- a/test/integration/targets/ecs_cluster/tasks/network_assign_public_ip_fail.yml
+++ /dev/null
@@ -1,123 +0,0 @@
-- block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: True
-
- - name: create ecs cluster
- ecs_cluster:
- name: "{{ resource_prefix }}"
- state: present
- <<: *aws_connection_info
-
- - name: create ecs_taskdefinition with bridged network
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}"
- state: present
- network_mode: bridge
- <<: *aws_connection_info
- register: ecs_taskdefinition_creation
-
- - name: create ecs_taskdefinition with awsvpc network
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}-vpc"
- state: present
- network_mode: awsvpc
- <<: *aws_connection_info
- register: ecs_taskdefinition_creation_vpc
-
- - name: ecs_taskdefinition works fine even when older botocore is used
- assert:
- that:
- - ecs_taskdefinition_creation_vpc.changed
-
- - name: create ecs_service using awsvpc network_configuration
- ecs_service:
- name: "{{ resource_prefix }}-vpc"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}-vpc"
- desired_count: 1
- network_configuration:
- subnets:
- - subnet-abcd1234
- security_groups:
- - sg-abcd1234
- assign_public_ip: true
- state: present
- <<: *aws_connection_info
- register: ecs_service_creation_vpc
- ignore_errors: yes
-
- - name: check that graceful failure message is returned from ecs_service
- assert:
- that:
- - ecs_service_creation_vpc.failed
- - 'ecs_service_creation_vpc.msg == "botocore needs to be version 1.8.4 or higher to use assign_public_ip in network_configuration"'
-
- always:
- - name: scale down ecs service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 0
- state: present
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: pause to wait for scale down
- pause:
- seconds: 30
-
- - name: remove ecs service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 1
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ecs task definition
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}"
- revision: "{{ ecs_taskdefinition_creation.taskdefinition.revision }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ecs task definition vpc
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}-vpc"
- revision: "{{ ecs_taskdefinition_creation_vpc.taskdefinition.revision }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ecs cluster
- ecs_cluster:
- name: "{{ resource_prefix }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/ecs_cluster/tasks/network_fail.yml b/test/integration/targets/ecs_cluster/tasks/network_fail.yml
deleted file mode 100644
index 4c05083720..0000000000
--- a/test/integration/targets/ecs_cluster/tasks/network_fail.yml
+++ /dev/null
@@ -1,216 +0,0 @@
-- block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: True
-
- - name: create ecs cluster
- ecs_cluster:
- name: "{{ resource_prefix }}"
- state: present
- <<: *aws_connection_info
-
- - name: create ecs_taskdefinition with bridged network
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}"
- state: present
- network_mode: bridge
- <<: *aws_connection_info
- register: ecs_taskdefinition_creation
-
- - name: create ecs_taskdefinition with awsvpc network
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}-vpc"
- state: present
- network_mode: awsvpc
- <<: *aws_connection_info
- register: ecs_taskdefinition_creation_vpc
-
- - name: create ecs_taskdefinition and execution_role_arn (expected to fail)
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}-vpc"
- execution_role_arn: not_a_real_arn
- state: present
- network_mode: awsvpc
- <<: *aws_connection_info
- ignore_errors: yes
- register: ecs_taskdefinition_arn
-
- - name: check that graceful failure message is returned from ecs_taskdefinition_arn
- assert:
- that:
- - ecs_taskdefinition_arn.failed
- - 'ecs_taskdefinition_arn.msg == "botocore needs to be version 1.10.44 or higher to use execution_role_arn"'
-
- - name: ecs_taskdefinition works fine even when older botocore is used
- assert:
- that:
- - ecs_taskdefinition_creation_vpc.changed
-
- - name: create ecs_service using bridged network
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 1
- state: present
- <<: *aws_connection_info
- register: ecs_service_creation
-
- - name: create ecs_service using awsvpc network_configuration
- ecs_service:
- name: "{{ resource_prefix }}-vpc"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}-vpc"
- desired_count: 1
- network_configuration:
- subnets:
- - subnet-abcd1234
- security_groups:
- - sg-abcd1234
- state: present
- <<: *aws_connection_info
- register: ecs_service_creation_vpc
- ignore_errors: yes
-
- - name: check that graceful failure message is returned from ecs_service
- assert:
- that:
- - ecs_service_creation_vpc.failed
- - 'ecs_service_creation_vpc.msg == "botocore needs to be version 1.7.44 or higher to use network configuration"'
-
- - name: create ecs_service using awsvpc network_configuration and launch_type
- ecs_service:
- name: "{{ resource_prefix }}-vpc"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}-vpc"
- desired_count: 1
- network_configuration:
- subnets:
- - subnet-abcd1234
- security_groups:
- - sg-abcd1234
- launch_type: FARGATE
- state: present
- <<: *aws_connection_info
- register: ecs_service_creation_vpc_launchtype
- ignore_errors: yes
-
- - name: check that graceful failure message is returned from ecs_service
- assert:
- that:
- - ecs_service_creation_vpc_launchtype.failed
- - 'ecs_service_creation_vpc_launchtype.msg == "botocore needs to be version 1.7.44 or higher to use network configuration"'
-
- - name: create ecs_service with launchtype and missing network_configuration
- ecs_service:
- name: "{{ resource_prefix }}-vpc"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}-vpc"
- desired_count: 1
- launch_type: FARGATE
- state: present
- <<: *aws_connection_info
- register: ecs_service_creation_vpc_launchtype_nonet
- ignore_errors: yes
-
- - name: check that graceful failure message is returned from ecs_service
- assert:
- that:
- - ecs_service_creation_vpc_launchtype_nonet.failed
- - 'ecs_service_creation_vpc_launchtype_nonet.msg == "launch_type is FARGATE but all of the following are missing: network_configuration"'
-
- - name: create ecs_task using awsvpc network_configuration
- ecs_task:
- cluster: "{{ resource_prefix }}-vpc"
- task_definition: "{{ resource_prefix }}"
- operation: run
- count: 1
- started_by: me
- network_configuration:
- subnets:
- - subnet-abcd1234
- security_groups:
- - sg-abcd1234
- <<: *aws_connection_info
- register: ecs_task_creation_vpc
- ignore_errors: yes
-
- - name: check that graceful failure message is returned from ecs_task
- assert:
- that:
- - ecs_task_creation_vpc.failed
- - 'ecs_task_creation_vpc.msg == "botocore needs to be version 1.7.44 or higher to use network configuration"'
-
-
- always:
- - name: scale down ecs service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 0
- state: present
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: pause to wait for scale down
- pause:
- seconds: 30
-
- - name: remove ecs service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 1
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ecs task definition
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}"
- revision: "{{ ecs_taskdefinition_creation.taskdefinition.revision }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ecs task definition vpc
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}-vpc"
- revision: "{{ ecs_taskdefinition_creation_vpc.taskdefinition.revision }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ecs cluster
- ecs_cluster:
- name: "{{ resource_prefix }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/ecs_cluster/tasks/network_force_new_deployment.yml b/test/integration/targets/ecs_cluster/tasks/network_force_new_deployment.yml
deleted file mode 100644
index c86e7222b2..0000000000
--- a/test/integration/targets/ecs_cluster/tasks/network_force_new_deployment.yml
+++ /dev/null
@@ -1,124 +0,0 @@
-- block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: True
-
- - name: create ecs cluster
- ecs_cluster:
- name: "{{ resource_prefix }}"
- state: present
- <<: *aws_connection_info
-
- - name: create ecs_taskdefinition
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}"
- state: present
- <<: *aws_connection_info
- register: ecs_taskdefinition_creation
-
- # even after deleting the cluster and recreating with a different name
- # the previous service can prevent the current service from starting
- # while it's in a draining state. Check the service info and sleep
- # if the service does not report as inactive.
-
- - name: check if service is still running from a previous task
- ecs_service_info:
- service: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- details: yes
- <<: *aws_connection_info
- register: ecs_service_info_results
- - name: delay if the service was not inactive
- debug: var=ecs_service_info_results
-
- - name: delay if the service was not inactive
- pause:
- seconds: 30
- when:
- - ecs_service_info_results.services|length >0
- - ecs_service_info_results.services[0]['status'] != 'INACTIVE'
-
- - name: create ecs_service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 1
- state: present
- <<: *aws_connection_info
- register: ecs_service_creation
-
- - name: ecs_service works fine even when older botocore is used
- assert:
- that:
- - ecs_service_creation.changed
-
- - name: create ecs_service using force_new_deployment
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 1
- force_new_deployment: true
- state: present
- <<: *aws_connection_info
- register: ecs_service_creation_force_new_deploy
- ignore_errors: yes
-
- - name: check that module returns success
- assert:
- that:
- - ecs_service_creation_force_new_deploy.changed
-
- always:
- - name: scale down ecs service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 0
- state: present
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: pause to wait for scale down
- pause:
- seconds: 30
-
- - name: remove ecs service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 1
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ecs task definition
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}"
- revision: "{{ ecs_taskdefinition_creation.taskdefinition.revision }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ecs cluster
- ecs_cluster:
- name: "{{ resource_prefix }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/ecs_cluster/tasks/network_force_new_deployment_fail.yml b/test/integration/targets/ecs_cluster/tasks/network_force_new_deployment_fail.yml
deleted file mode 100644
index 95e8c576de..0000000000
--- a/test/integration/targets/ecs_cluster/tasks/network_force_new_deployment_fail.yml
+++ /dev/null
@@ -1,125 +0,0 @@
-- block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: True
-
- - name: create ecs cluster
- ecs_cluster:
- name: "{{ resource_prefix }}"
- state: present
- <<: *aws_connection_info
-
- - name: create ecs_taskdefinition
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}"
- state: present
- <<: *aws_connection_info
- register: ecs_taskdefinition_creation
-
- # even after deleting the cluster and recreating with a different name
- # the previous service can prevent the current service from starting
- # while it's in a draining state. Check the service info and sleep
- # if the service does not report as inactive.
-
- - name: check if service is still running from a previous task
- ecs_service_info:
- service: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- details: yes
- <<: *aws_connection_info
- register: ecs_service_info_results
- - name: delay if the service was not inactive
- debug: var=ecs_service_info_results
-
- - name: delay if the service was not inactive
- pause:
- seconds: 30
- when:
- - ecs_service_info_results.services|length >0
- - ecs_service_info_results.services[0]['status'] != 'INACTIVE'
-
- - name: create ecs_service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 1
- state: present
- <<: *aws_connection_info
- register: ecs_service_creation
-
- - name: ecs_service works fine even when older botocore is used
- assert:
- that:
- - ecs_service_creation.changed
-
- - name: create ecs_service using force_new_deployment
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 1
- force_new_deployment: true
- state: present
- <<: *aws_connection_info
- register: ecs_service_creation_force_new_deploy
- ignore_errors: yes
-
- - name: check that graceful failure message is returned from ecs_service
- assert:
- that:
- - ecs_service_creation_force_new_deploy.failed
- - 'ecs_service_creation_force_new_deploy.msg == "botocore needs to be version 1.8.4 or higher to use force_new_deployment"'
-
- always:
- - name: scale down ecs service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 0
- state: present
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: pause to wait for scale down
- pause:
- seconds: 30
-
- - name: remove ecs service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 1
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ecs task definition
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}"
- revision: "{{ ecs_taskdefinition_creation.taskdefinition.revision }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: remove ecs cluster
- ecs_cluster:
- name: "{{ resource_prefix }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/ecs_ecr/aliases b/test/integration/targets/ecs_ecr/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/ecs_ecr/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/ecs_ecr/defaults/main.yml b/test/integration/targets/ecs_ecr/defaults/main.yml
deleted file mode 100644
index 4a9127942f..0000000000
--- a/test/integration/targets/ecs_ecr/defaults/main.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-policy:
- Version: '2008-10-17'
- Statement:
- - Sid: new statement
- Effect: Allow
- Principal: "*"
- Action:
- - ecr:GetDownloadUrlForLayer
- - ecr:BatchGetImage
- - ecr:BatchCheckLayerAvailability
-
-lifecycle_policy:
- rules:
- - rulePriority: 1
- description: new policy
- selection:
- tagStatus: untagged
- countType: sinceImagePushed
- countUnit: days
- countNumber: 365
- action:
- type: expire
diff --git a/test/integration/targets/ecs_ecr/meta/main.yml b/test/integration/targets/ecs_ecr/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/ecs_ecr/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/ecs_ecr/tasks/main.yml b/test/integration/targets/ecs_ecr/tasks/main.yml
deleted file mode 100644
index 362cd8175d..0000000000
--- a/test/integration/targets/ecs_ecr/tasks/main.yml
+++ /dev/null
@@ -1,543 +0,0 @@
----
-- set_fact:
- ecr_name: '{{ resource_prefix }}-ecr'
-
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: When creating with check mode
- ecs_ecr:
- name: '{{ ecr_name }}'
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - name: it should skip, change and create
- assert:
- that:
- - result is skipped
- - result is changed
- - result.created
-
-
- - name: When specifying a registry that is inaccessible
- ecs_ecr:
- registry_id: 999999999999
- name: '{{ ecr_name }}'
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: it should fail with an AccessDeniedException
- assert:
- that:
- - result is failed
- - '"AccessDeniedException" in result.msg'
-
-
- - name: When creating a repository
- ecs_ecr:
- name: '{{ ecr_name }}'
- <<: *aws_connection_info
- register: result
-
- - name: it should change and create
- assert:
- that:
- - result is changed
- - result.created
-
- - name: it should have been configured as mutable by default
- assert:
- that:
- - result.repository.imageTagMutability == "MUTABLE"
-
-
- - name: When creating a repository that already exists in check mode
- ecs_ecr:
- name: '{{ ecr_name }}'
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - name: it should not skip, should not change
- assert:
- that:
- - result is not skipped
- - result is not changed
-
-
- - name: When creating a repository that already exists
- ecs_ecr:
- name: '{{ ecr_name }}'
- <<: *aws_connection_info
- register: result
-
- - name: it should not change
- assert:
- that:
- - result is not changed
-
-
- - name: When in check mode, and deleting a policy that does not exist
- ecs_ecr:
- name: '{{ ecr_name }}'
- purge_policy: yes
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - name: it should not skip and not change
- assert:
- that:
- - result is not skipped
- - result is not changed
-
-
- - name: When in check mode, setting policy on a repository that has no policy
- ecs_ecr:
- name: '{{ ecr_name }}'
- policy: '{{ policy }}'
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - name: it should skip, change and not create
- assert:
- that:
- - result is skipped
- - result is changed
- - not result.created
-
-
- - name: When setting policy on a repository that has no policy
- ecs_ecr:
- name: '{{ ecr_name }}'
- policy: '{{ policy }}'
- <<: *aws_connection_info
- register: result
-
- - name: it should change and not create
- assert:
- that:
- - result is changed
- - not result.created
-
-
- - name: When in check mode, and deleting a policy that exists
- ecs_ecr:
- name: '{{ ecr_name }}'
- delete_policy: yes
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - name: it should skip, change but not create, have deprecations
- assert:
- that:
- - result is skipped
- - result is changed
- - not result.created
- - result.deprecations
-
-
- - name: When in check mode, and purging a policy that exists
- ecs_ecr:
- name: '{{ ecr_name }}'
- purge_policy: yes
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - name: it should skip, change but not create, no deprecations
- assert:
- that:
- - result is skipped
- - result is changed
- - not result.created
- - result.deprecations is not defined
-
-
- - name: When purging a policy that exists
- ecs_ecr:
- name: '{{ ecr_name }}'
- purge_policy: yes
- <<: *aws_connection_info
- register: result
-
- - name: it should change and not create
- assert:
- that:
- - result is changed
- - not result.created
-
-
- - name: When setting a policy as a string
- ecs_ecr:
- name: '{{ ecr_name }}'
- policy: '{{ policy | to_json }}'
- <<: *aws_connection_info
- register: result
-
- - name: it should change and not create
- assert:
- that:
- - result is changed
- - not result.created
-
-
- - name: When setting a policy to its current value
- ecs_ecr:
- name: '{{ ecr_name }}'
- policy: '{{ policy }}'
- <<: *aws_connection_info
- register: result
-
- - name: it should not change
- assert:
- that:
- - result is not changed
-
- - name: When omitting policy on a repository that has a policy
- ecs_ecr:
- name: '{{ ecr_name }}'
- <<: *aws_connection_info
- register: result
-
- - name: it should not change
- assert:
- that:
- - result is not changed
-
- - name: When specifying both policy and purge_policy
- ecs_ecr:
- name: '{{ ecr_name }}'
- policy: '{{ policy }}'
- purge_policy: yes
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: it should fail
- assert:
- that:
- - result is failed
-
-
- - name: When specifying invalid JSON for policy
- ecs_ecr:
- name: '{{ ecr_name }}'
- policy: "Ceci n'est pas une JSON"
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: it should fail
- assert:
- that:
- - result is failed
-
-
- - name: When in check mode, and purging a lifecycle policy that does not exists
- ecs_ecr:
- name: '{{ ecr_name }}'
- purge_lifecycle_policy: yes
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - name: it should not skip and not change
- assert:
- that:
- - not result is skipped
- - not result is changed
-
-
- - name: When in check mode, setting lifecyle policy on a repository that has no policy
- ecs_ecr:
- name: '{{ ecr_name }}'
- lifecycle_policy: '{{ lifecycle_policy }}'
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - name: it should skip, change and not create
- assert:
- that:
- - result is skipped
- - result is changed
- - not result.created
-
-
- - name: When setting lifecycle policy on a repository that has no policy
- ecs_ecr:
- name: '{{ ecr_name }}'
- lifecycle_policy: '{{ lifecycle_policy }}'
- <<: *aws_connection_info
- register: result
-
- - name: it should change and not create
- assert:
- that:
- - result is changed
- - not result.created
- - result.lifecycle_policy is defined
- - result.lifecycle_policy.rules|length == 1
-
-
- - name: When in check mode, and purging a lifecyle policy that exists
- ecs_ecr:
- name: '{{ ecr_name }}'
- purge_lifecycle_policy: yes
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - name: it should skip, change but not create
- assert:
- that:
- - result is skipped
- - result is changed
- - not result.created
-
-
- - name: When purging a lifecycle policy that exists
- ecs_ecr:
- name: '{{ ecr_name }}'
- purge_lifecycle_policy: yes
- <<: *aws_connection_info
- register: result
-
- - name: it should change and not create
- assert:
- that:
- - result is changed
- - not result.created
-
-
- - name: When setting a lifecyle policy as a string
- ecs_ecr:
- name: '{{ ecr_name }}'
- lifecycle_policy: '{{ lifecycle_policy | to_json }}'
- <<: *aws_connection_info
- register: result
-
- - name: it should change and not create
- assert:
- that:
- - result is changed
- - not result.created
-
-
- - name: When setting a lifecycle policy to its current value
- ecs_ecr:
- name: '{{ ecr_name }}'
- lifecycle_policy: '{{ lifecycle_policy }}'
- <<: *aws_connection_info
- register: result
-
- - name: it should not change
- assert:
- that:
- - not result is changed
-
-
- - name: When omitting lifecycle policy on a repository that has a policy
- ecs_ecr:
- name: '{{ ecr_name }}'
- <<: *aws_connection_info
- register: result
-
- - name: it should not change
- assert:
- that:
- - not result is changed
-
-
- - name: When specifying both lifecycle_policy and purge_lifecycle_policy
- ecs_ecr:
- name: '{{ ecr_name }}'
- lifecycle_policy: '{{ lifecycle_policy }}'
- purge_lifecycle_policy: yes
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: it should fail
- assert:
- that:
- - result is failed
-
-
- - name: When specifying invalid JSON for lifecycle policy
- ecs_ecr:
- name: '{{ ecr_name }}'
- lifecycle_policy: "Ceci n'est pas une JSON"
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: it should fail
- assert:
- that:
- - result is failed
-
-
- - name: When specifying an invalid document for lifecycle policy
- ecs_ecr:
- name: '{{ ecr_name }}'
- lifecycle_policy:
- rules:
- - invalid: "Ceci n'est pas une rule"
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: it should fail
- assert:
- that:
- - result is failed
-
-
- - name: When in check mode, deleting a repository that exists
- ecs_ecr:
- name: '{{ ecr_name }}'
- state: absent
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - name: it should skip, change and not create
- assert:
- that:
- - result is skipped
- - result is changed
- - not result.created
-
-
- - name: When deleting a repository that exists
- ecs_ecr:
- name: '{{ ecr_name }}'
- state: absent
- <<: *aws_connection_info
- register: result
-
- - name: it should change
- assert:
- that:
- - result is changed
-
-
- - name: When in check mode, deleting a repository that does not exist
- ecs_ecr:
- name: '{{ ecr_name }}'
- state: absent
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - name: it should not change
- assert:
- that:
- - result is not skipped
- - result is not changed
-
-
- - name: When deleting a repository that does not exist
- ecs_ecr:
- name: '{{ ecr_name }}'
- state: absent
- <<: *aws_connection_info
- register: result
-
- - name: it should not change
- assert:
- that:
- - result is not changed
-
- - name: When creating an immutable repository
- ecs_ecr:
- name: '{{ ecr_name }}'
- region: '{{ ec2_region }}'
- ec2_access_key: '{{ec2_access_key}}'
- ec2_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- image_tag_mutability: immutable
- register: result
-
- - name: it should change and create
- assert:
- that:
- - result is changed
- - result.created
-
- - name: it should have been configured as immutable
- assert:
- that:
- - result.repository.imageTagMutability == "IMMUTABLE"
-
-
- - name: When configuring an existing immutable repository to be mutable in check mode
- ecs_ecr:
- name: '{{ ecr_name }}'
- region: '{{ ec2_region }}'
- ec2_access_key: '{{ec2_access_key}}'
- ec2_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- image_tag_mutability: mutable
- register: result
- check_mode: yes
-
- - name: it should skip, change and configured mutable
- assert:
- that:
- - result is skipped
- - result is changed
- - result.repository.imageTagMutability == "MUTABLE"
-
- - name: When configuring an existing immutable repository to be mutable
- ecs_ecr:
- name: '{{ ecr_name }}'
- region: '{{ ec2_region }}'
- ec2_access_key: '{{ec2_access_key}}'
- ec2_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- image_tag_mutability: mutable
- register: result
-
- - name: it should change and configured mutable
- assert:
- that:
- - result is changed
- - result.repository.imageTagMutability == "MUTABLE"
-
- - name: When configuring an already mutable repository to be mutable
- ecs_ecr:
- name: '{{ ecr_name }}'
- region: '{{ ec2_region }}'
- ec2_access_key: '{{ec2_access_key}}'
- ec2_secret_key: '{{ec2_secret_key}}'
- security_token: '{{security_token}}'
- image_tag_mutability: mutable
- register: result
-
- - name: it should not change
- assert:
- that:
- - result is not changed
-
- always:
-
- - name: Delete lingering ECR repository
- ecs_ecr:
- name: '{{ ecr_name }}'
- state: absent
- <<: *aws_connection_info
diff --git a/test/integration/targets/ecs_tag/aliases b/test/integration/targets/ecs_tag/aliases
deleted file mode 100644
index fe51f28bd2..0000000000
--- a/test/integration/targets/ecs_tag/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-ecs_tag
-unsupported
diff --git a/test/integration/targets/ecs_tag/tasks/main.yml b/test/integration/targets/ecs_tag/tasks/main.yml
deleted file mode 100644
index 9f7ef83ae5..0000000000
--- a/test/integration/targets/ecs_tag/tasks/main.yml
+++ /dev/null
@@ -1,320 +0,0 @@
-- module_defaults:
- group/aws:
- aws_access_key: '{{ aws_access_key | default(omit) }}'
- aws_secret_key: '{{ aws_secret_key | default(omit) }}'
- security_token: '{{ security_token | default(omit) }}'
- region: '{{ aws_region | default(omit) }}'
- block:
- - name: create ecs cluster
- ecs_cluster:
- name: "{{ resource_prefix }}"
- state: present
- register: cluster_info
-
- - name: create ecs_taskdefinition
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}"
- state: present
- register: ecs_taskdefinition_creation
-
- # even after deleting the cluster and recreating with a different name
- # the previous service can prevent the current service from starting
- # while it's in a draining state. Check the service info and sleep
- # if the service does not report as inactive.
-
- - name: check if service is still running from a previous task
- ecs_service_info:
- service: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- details: yes
- register: ecs_service_info_results
-
- - name: delay if the service was not inactive
- pause:
- seconds: 30
- when:
- - ecs_service_info_results.services|length >0
- - ecs_service_info_results.services[0]['status'] != 'INACTIVE'
-
- - name: create ecs_service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 1
- state: present
- register: ecs_service_creation
-
- - name: ecs_service up
- assert:
- that:
- - ecs_service_creation.changed
-
- # Test tagging cluster resource
-
- - name: cluster tags - Add tags to cluster
- ecs_tag:
- cluster_name: "{{resource_prefix}}"
- resource: "{{resource_prefix}}"
- resource_type: cluster
- state: present
- tags:
- Name: "{{ resource_prefix }}"
- another: foobar
- register: taglist
-
- - name: cluster tags - tags should be there
- assert:
- that:
- - taglist.changed == true
- - taglist.added_tags.Name == "{{ resource_prefix }}"
- - taglist.added_tags.another == "foobar"
-
- - name: cluster tags - Add tags to cluster again
- ecs_tag:
- cluster_name: "{{resource_prefix}}"
- resource: "{{resource_prefix}}"
- resource_type: cluster
- state: present
- tags:
- Name: "{{ resource_prefix }}"
- another: foobar
- register: taglist
-
- - name: cluster tags - No change after adding again
- assert:
- that:
- - taglist.changed == false
-
- - name: cluster tags - List tags
- ecs_tag:
- cluster_name: "{{ resource_prefix}}"
- resource: "{{ resource_prefix}}"
- resource_type: cluster
- state: list
- register: taglist
-
- - name: cluster tags - should have 2 tags
- assert:
- that:
- - taglist.tags|list|length == 2
- - taglist.failed == false
- - taglist.changed == false
-
- - name: cluster tags - remove tag another
- ecs_tag:
- cluster_name: "{{resource_prefix}}"
- resource: "{{resource_prefix}}"
- resource_type: cluster
- state: absent
- tags:
- another:
- register: taglist
-
- - name: cluster tags - tag another should be gone
- assert:
- that:
- - taglist.changed == true
- - '"another" not in taglist.tags'
-
- - name: cluster tags - remove tag when not present
- ecs_tag:
- cluster_name: "{{resource_prefix}}"
- resource: "{{resource_prefix}}"
- resource_type: cluster
- state: absent
- tags:
- temp:
- temp_two:
- register: taglist
- ignore_errors: yes
-
- - name: cluster tags - check that there was no fail, but changed is false
- assert:
- that:
- - taglist.failed == false
- - taglist.changed == false
-
-
- - name: cluster tags - invalid cluster name
- ecs_tag:
- cluster_name: "{{resource_prefix}}-foo"
- resource: "{{resource_prefix}}-foo"
- resource_type: cluster
- state: absent
- tags:
- temp:
- temp_two:
- register: taglist
- ignore_errors: yes
-
- - name: cluster tags - Make sure invalid clustername is handled
- assert:
- that:
- - taglist.failed == true
- - taglist.changed == false
- - 'taglist.msg is regex("Failed to find cluster ansible-test-.*-foo")'
-
- # Test tagging service resource
-
- - name: services tags - Add name tag
- ecs_tag:
- cluster_name: "{{resource_prefix}}"
- resource: "{{ecs_service_creation.service.serviceName}}"
- resource_type: service
- state: present
- tags:
- Name: "service-{{resource_prefix}}"
- register: taglist
-
- - name: service tag - name tags should be there
- assert:
- that:
- - taglist.changed == true
- - taglist.added_tags.Name == "service-{{ resource_prefix }}"
- - taglist.tags.Name == "service-{{ resource_prefix }}"
-
- - name: services tags - Add name tag again - see no change
- ecs_tag:
- cluster_name: "{{resource_prefix}}"
- resource: "{{ecs_service_creation.service.serviceName}}"
- resource_type: service
- state: present
- tags:
- Name: "service-{{resource_prefix}}"
- register: taglist
-
- - name: service tag - test adding tag twice has no effect
- assert:
- that:
- - taglist.changed == false
- - taglist.tags.Name == "service-{{ resource_prefix }}"
-
- - name: service tags - remove service tags
- ecs_tag:
- cluster_name: "{{resource_prefix}}"
- resource: "{{ecs_service_creation.service.serviceName}}"
- resource_type: service
- state: absent
- tags:
- Name:
- register: taglist
-
- - name: service tags - all tags gone
- assert:
- that:
- - taglist.tags|list|length == 0
- - taglist.changed == true
- - '"Name" not in taglist.tags'
-
-
- # Test tagging task_definition resource
-
- - name: task_definition tags - Add name tag
- ecs_tag:
- cluster_name: "{{resource_prefix}}"
- resource: "{{ecs_taskdefinition_creation.taskdefinition.family}}"
- resource_type: task_definition
- state: present
- tags:
- Name: "task_definition-{{resource_prefix}}"
- register: taglist
-
- - name: task_definition tag - name tags should be there
- assert:
- that:
- - taglist.changed == true
- - taglist.added_tags.Name == "task_definition-{{ resource_prefix }}"
- - taglist.tags.Name == "task_definition-{{ resource_prefix }}"
-
- - name: task_definition tags - Add name tag again - see no change
- ecs_tag:
- cluster_name: "{{resource_prefix}}"
- resource: "{{ecs_taskdefinition_creation.taskdefinition.family}}"
- resource_type: task_definition
- state: present
- tags:
- Name: "task_definition-{{resource_prefix}}"
- register: taglist
-
- - name: task_definition tag - test adding tag twice has no effect
- assert:
- that:
- - taglist.changed == false
- - taglist.tags.Name == "task_definition-{{ resource_prefix }}"
-
- - name: task_definition tags - retrieve all tags on a task_definition
- ecs_tag:
- cluster_name: "{{resource_prefix}}"
- resource: "{{ecs_taskdefinition_creation.taskdefinition.family}}"
- resource_type: task_definition
- state: list
- register: taglist
-
- - name: task_definition tags - should have 1 tag
- assert:
- that:
- - taglist.tags|list|length == 1
- - taglist.failed == false
- - taglist.changed == false
-
- - name: task_definition tags - remove task_definition tags
- ecs_tag:
- cluster_name: "{{resource_prefix}}"
- resource: "{{ecs_taskdefinition_creation.taskdefinition.family}}"
- resource_type: task_definition
- state: absent
- tags:
- Name:
- register: taglist
-
- - name: task_definition tags - all tags gone
- assert:
- that:
- - taglist.tags|list|length == 0
- - taglist.changed == true
- - '"Name" not in taglist.tags'
-
- always:
- - name: scale down ecs service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 0
- state: present
- ignore_errors: yes
-
- - name: pause to wait for scale down
- pause:
- seconds: 30
-
- - name: remove ecs service
- ecs_service:
- name: "{{ resource_prefix }}"
- cluster: "{{ resource_prefix }}"
- task_definition: "{{ resource_prefix }}"
- desired_count: 1
- state: absent
- ignore_errors: yes
-
- - name: remove ecs task definition
- ecs_taskdefinition:
- containers:
- - name: my_container
- image: ubuntu
- memory: 128
- family: "{{ resource_prefix }}"
- revision: "{{ ecs_taskdefinition_creation.taskdefinition.revision }}"
- state: absent
- ignore_errors: yes
-
- - name: remove ecs cluster
- ecs_cluster:
- name: "{{ resource_prefix }}"
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/efs/aliases b/test/integration/targets/efs/aliases
deleted file mode 100644
index 3f9eda9949..0000000000
--- a/test/integration/targets/efs/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-unsupported
-efs_info
diff --git a/test/integration/targets/efs/playbooks/full_test.yml b/test/integration/targets/efs/playbooks/full_test.yml
deleted file mode 100644
index 6581151358..0000000000
--- a/test/integration/targets/efs/playbooks/full_test.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-- hosts: localhost
- connection: local
- environment: "{{ ansible_test.environment }}"
-
- vars:
- resource_prefix: 'ansible-testing'
-
- roles:
- - efs
diff --git a/test/integration/targets/efs/playbooks/roles/efs/tasks/main.yml b/test/integration/targets/efs/playbooks/roles/efs/tasks/main.yml
deleted file mode 100644
index a1a8465966..0000000000
--- a/test/integration/targets/efs/playbooks/roles/efs/tasks/main.yml
+++ /dev/null
@@ -1,327 +0,0 @@
----
-- block:
-
- # ============================================================
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: true
-
- - name: Create VPC for testing
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- cidr_block: 10.22.32.0/23
- tags:
- Name: Ansible ec2_instance Testing VPC
- tenancy: default
- <<: *aws_connection_info
- register: testing_vpc
-
- - name: Create subnet in zone A for testing
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.22.32.0/24
- az: "{{ aws_region }}a"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet-a"
- <<: *aws_connection_info
- register: testing_subnet_a
-
- - name: Create subnet in zone B for testing
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.22.33.0/24
- az: "{{ aws_region }}b"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet-b"
- <<: *aws_connection_info
- register: testing_subnet_b
-
- - name: Get default security group id for vpc
- ec2_group_info:
- <<: *aws_connection_info
- filters:
- vpc-id: "{{ testing_vpc.vpc.id }}"
- register: sg_facts
-
- - set_fact:
- vpc_default_sg_id: "{{sg_facts.security_groups[0].group_id}}"
-
-
- # ============================================================
- - name: Create Efs for testing
- efs:
- <<: *aws_connection_info
- state: present
- name: "{{ resource_prefix }}-test-efs"
- tags:
- Name: "{{ resource_prefix }}-test-tag"
- Purpose: file-storage
- targets:
- - subnet_id: "{{testing_subnet_a.subnet.id}}"
- - subnet_id: "{{testing_subnet_b.subnet.id}}"
- throughput_mode: 'bursting'
- register: created_efs
-
- # ============================================================
- - name: Get all EFS Facts
- efs_info:
- <<: *aws_connection_info
- register: efs_result
-
- - assert:
- that:
- - (efs_result.efs | length) >= 1
-
- # ============================================================
- - name: Get EFS by creation token
- efs_info:
- name: "{{ resource_prefix }}-test-efs"
- <<: *aws_connection_info
- register: efs_result
-
- - set_fact:
- efs_result_assertions:
- - efs_result is not changed
- - (efs_result.efs | length) == 1
- - efs_result.efs[0].creation_token == "{{ resource_prefix }}-test-efs"
- - efs_result.efs[0].file_system_id == created_efs.efs.file_system_id
- - efs_result.efs[0].number_of_mount_targets == 2
- - (efs_result.efs[0].mount_targets | length) == 2
- - efs_result.efs[0].name == "{{ resource_prefix }}-test-tag"
- - efs_result.efs[0].tags.Name == "{{ resource_prefix }}-test-tag"
- - efs_result.efs[0].tags.Purpose == "file-storage"
- - efs_result.efs[0].encrypted == false
- - efs_result.efs[0].life_cycle_state == "available"
- - efs_result.efs[0].performance_mode == "generalPurpose"
- - efs_result.efs[0].throughput_mode == "bursting"
- - efs_result.efs[0].mount_targets[0].security_groups[0] == vpc_default_sg_id
- - efs_result.efs[0].mount_targets[1].security_groups[0] == vpc_default_sg_id
-
- - assert:
- that: "{{efs_result_assertions}}"
-
- # ============================================================
- - name: Get EFS by id
- efs_info:
- id: "{{created_efs.efs.file_system_id}}"
- <<: *aws_connection_info
- register: efs_result
-
- - assert:
- that: "{{efs_result_assertions}}"
-
- # ============================================================
- - name: Get EFS by tag
- efs_info:
- tags:
- Name: "{{ resource_prefix }}-test-tag"
- <<: *aws_connection_info
- register: efs_result
-
- - assert:
- that: "{{efs_result_assertions}}"
-
- # ============================================================
- - name: Get EFS by target (subnet_id)
- efs_info:
- targets:
- - "{{testing_subnet_a.subnet.id}}"
- <<: *aws_connection_info
- register: efs_result
-
- - assert:
- that: "{{efs_result_assertions}}"
-
- # ============================================================
- - name: Get EFS by target (security_group_id)
- efs_info:
- targets:
- - "{{vpc_default_sg_id}}"
- <<: *aws_connection_info
- register: efs_result
-
- - assert:
- that: "{{efs_result_assertions}}"
-
- # ============================================================
- - name: Get EFS by tag and target
- efs_info:
- tags:
- Name: "{{ resource_prefix }}-test-tag"
- targets:
- - "{{testing_subnet_a.subnet.id}}"
- <<: *aws_connection_info
- register: efs_result
-
- - assert:
- that: "{{efs_result_assertions}}"
-
- # ============================================================
- # Not checking efs_result.efs["throughput_mode"] here as
- # Efs with status "life_cycle_state": "updating" might return the previous values
- - name: Update Efs to use provisioned throughput_mode
- efs:
- <<: *aws_connection_info
- state: present
- name: "{{ resource_prefix }}-test-efs"
- tags:
- Name: "{{ resource_prefix }}-test-tag"
- Purpose: file-storage
- targets:
- - subnet_id: "{{testing_subnet_a.subnet.id}}"
- - subnet_id: "{{testing_subnet_b.subnet.id}}"
- throughput_mode: 'provisioned'
- provisioned_throughput_in_mibps: 5.0
- register: efs_result
-
- - assert:
- that:
- - efs_result is changed
-
- # ============================================================
- - name: Efs same value for provisioned_throughput_in_mibps
- efs:
- <<: *aws_connection_info
- state: present
- name: "{{ resource_prefix }}-test-efs"
- tags:
- Name: "{{ resource_prefix }}-test-tag"
- Purpose: file-storage
- targets:
- - subnet_id: "{{testing_subnet_a.subnet.id}}"
- - subnet_id: "{{testing_subnet_b.subnet.id}}"
- throughput_mode: 'provisioned'
- provisioned_throughput_in_mibps: 5.0
- register: efs_result
-
- - assert:
- that:
- - efs_result is not changed
- - efs_result.efs["throughput_mode"] == "provisioned"
- - efs_result.efs["provisioned_throughput_in_mibps"] == 5.0
-
- # ============================================================
- - name: Efs new value for provisioned_throughput_in_mibps
- efs:
- <<: *aws_connection_info
- state: present
- name: "{{ resource_prefix }}-test-efs"
- tags:
- Name: "{{ resource_prefix }}-test-tag"
- Purpose: file-storage
- targets:
- - subnet_id: "{{testing_subnet_a.subnet.id}}"
- - subnet_id: "{{testing_subnet_b.subnet.id}}"
- throughput_mode: 'provisioned'
- provisioned_throughput_in_mibps: 8.0
- register: efs_result
-
- - assert:
- that:
- - efs_result is changed
- - efs_result.efs["provisioned_throughput_in_mibps"] == 8.0
-
- # ============================================================
- - name: Check new facts with provisioned mode
- efs_info:
- name: "{{ resource_prefix }}-test-efs"
- <<: *aws_connection_info
- register: efs_result
-
- - set_fact:
- efs_result_assertions:
- - efs_result is not changed
- - efs_result.efs[0].throughput_mode == "provisioned"
- - efs_result.efs[0].provisioned_throughput_in_mibps == 8.0
- - (efs_result.efs | length) == 1
- - efs_result.efs[0].creation_token == "{{ resource_prefix }}-test-efs"
- - efs_result.efs[0].file_system_id == created_efs.efs.file_system_id
-
- - assert:
- that: "{{efs_result_assertions}}"
-
- # ============================================================
- - name: Query unknown EFS by tag
- efs_info:
- tags:
- Name: "{{ resource_prefix }}-unknown"
- <<: *aws_connection_info
- register: efs_result
-
- - assert:
- that:
- - efs_result is not changed
- - (efs_result.efs | length) == 0
-
- - name: Query unknown EFS by target
- efs_info:
- targets:
- - sg-00000000000
- <<: *aws_connection_info
- register: efs_result
-
- - assert:
- that:
- - efs_result is not changed
- - (efs_result.efs | length) == 0
-
- # ============================================================
- always:
- - name: Delete EFS used for tests
- efs:
- <<: *aws_connection_info
- state: absent
- name: "{{ resource_prefix }}-test-efs"
- tags:
- Name: "{{ resource_prefix }}-test-tag"
- Purpose: file-storage
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- - name: Remove test subnet in zone A
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.22.32.0/24
- az: "{{ aws_region }}a"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet-a"
- <<: *aws_connection_info
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- - name: Remove test subnet in zone B
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ testing_vpc.vpc.id }}"
- cidr: 10.22.33.0/24
- az: "{{ aws_region }}b"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet-b"
- <<: *aws_connection_info
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- - name: remove the VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- cidr_block: 10.22.32.0/23
- state: absent
- <<: *aws_connection_info
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
diff --git a/test/integration/targets/efs/playbooks/version_fail.yml b/test/integration/targets/efs/playbooks/version_fail.yml
deleted file mode 100644
index 49c94ae35c..0000000000
--- a/test/integration/targets/efs/playbooks/version_fail.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-- hosts: localhost
- connection: local
- environment: "{{ ansible_test.environment }}"
- vars:
- resource_prefix: 'ansible-testing'
-
- tasks:
- - block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: True
-
- - name: create efs with provisioned_throughput options (fails gracefully)
- efs:
- state: present
- name: "{{ resource_prefix }}-efs"
- throughput_mode: 'provisioned'
- provisioned_throughput_in_mibps: 8.0
- <<: *aws_connection_info
- register: efs_provisioned_throughput_creation
- ignore_errors: yes
-
- - name: check that graceful error message is returned when creation with throughput_mode and old botocore
- assert:
- that:
- - efs_provisioned_throughput_creation.failed
- - 'efs_provisioned_throughput_creation.msg == "throughput_mode parameter requires botocore >= 1.10.57"'
diff --git a/test/integration/targets/efs/runme.sh b/test/integration/targets/efs/runme.sh
deleted file mode 100755
index e4f214b8e8..0000000000
--- a/test/integration/targets/efs/runme.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-
-export ANSIBLE_ROLES_PATH=../
-
-# Test graceful failure for older versions of botocore
-source virtualenv.sh
-pip install 'botocore<1.10.57' boto3
-ansible-playbook -i ../../inventory -v playbooks/version_fail.yml "$@"
-
-# Run full test suite
-source virtualenv.sh
-pip install 'botocore>=1.10.57' boto3
-ansible-playbook -i ../../inventory -v playbooks/full_test.yml "$@"
diff --git a/test/integration/targets/elb_application_lb/aliases b/test/integration/targets/elb_application_lb/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/elb_application_lb/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/elb_application_lb/defaults/main.yml b/test/integration/targets/elb_application_lb/defaults/main.yml
deleted file mode 100644
index 8100bd55ed..0000000000
--- a/test/integration/targets/elb_application_lb/defaults/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# load balancer and target group names have to be less than 32 characters
-# the 8 digit identifier at the end of resource_prefix helps determine during which test something
-# was created and allows tests to be run in parallel
-alb_name: "my-alb-{{ resource_prefix | regex_search('([0-9]+)$') }}"
-tg_name: "my-tg-{{ resource_prefix | regex_search('([0-9]+)$') }}"
diff --git a/test/integration/targets/elb_application_lb/meta/main.yml b/test/integration/targets/elb_application_lb/meta/main.yml
deleted file mode 100644
index 1810d4bec9..0000000000
--- a/test/integration/targets/elb_application_lb/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_remote_tmp_dir
diff --git a/test/integration/targets/elb_application_lb/tasks/full_test.yml b/test/integration/targets/elb_application_lb/tasks/full_test.yml
deleted file mode 100644
index ebb2a9ad0e..0000000000
--- a/test/integration/targets/elb_application_lb/tasks/full_test.yml
+++ /dev/null
@@ -1,259 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- awscli_connection_info: &awscli_connection_info
- AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
- AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
- AWS_SESSION_TOKEN: "{{ security_token }}"
- AWS_DEFAULT_REGION: "{{ aws_region }}"
- no_log: yes
-
- - name: create VPC
- ec2_vpc_net:
- cidr_block: 10.228.228.0/22
- name: "{{ resource_prefix }}_vpc"
- state: present
- <<: *aws_connection_info
- register: vpc
-
- - name: create internet gateway
- ec2_vpc_igw:
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- tags:
- Name: "{{ resource_prefix }}"
- <<: *aws_connection_info
- register: igw
-
- - name: create public subnet
- ec2_vpc_subnet:
- cidr: "{{ item.cidr }}"
- az: "{{ aws_region}}{{ item.az }}"
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- tags:
- Public: "{{ item.public|string }}"
- Name: "{{ item.public|ternary('public', 'private') }}-{{ item.az }}"
- <<: *aws_connection_info
- with_items:
- - cidr: 10.228.228.0/24
- az: "a"
- public: "True"
- - cidr: 10.228.229.0/24
- az: "b"
- public: "True"
- - cidr: 10.228.230.0/24
- az: "a"
- public: "False"
- - cidr: 10.228.231.0/24
- az: "b"
- public: "False"
- register: subnets
-
- - ec2_vpc_subnet_info:
- filters:
- vpc-id: "{{ vpc.vpc.id }}"
- <<: *aws_connection_info
- register: vpc_subnets
-
- - name: create list of subnet ids
- set_fact:
- alb_subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `True`].id') }}"
- private_subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public != `True`].id') }}"
-
- - name: create a route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- <<: *aws_connection_info
- tags:
- Name: igw-route
- Created: "{{ resource_prefix }}"
- subnets: "{{ alb_subnets + private_subnets }}"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- register: route_table
-
- - ec2_group:
- name: "{{ resource_prefix }}"
- description: "security group for Ansible ALB integration tests"
- state: present
- vpc_id: "{{ vpc.vpc.id }}"
- rules:
- - proto: tcp
- from_port: 1
- to_port: 65535
- cidr_ip: 0.0.0.0/0
- <<: *aws_connection_info
- register: sec_group
-
- - name: create a target group for testing
- elb_target_group:
- name: "{{ tg_name }}"
- protocol: http
- port: 80
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- <<: *aws_connection_info
- register: tg
-
- - name: create privatekey for testing
- openssl_privatekey:
- path: ./ansible_alb_test.pem
- size: 2048
-
- - name: create csr for cert
- openssl_csr:
- path: ./ansible_alb_test.csr
- privatekey_path: ./ansible_alb_test.pem
- C: US
- ST: AnyPrincipality
- L: AnyTown
- O: AnsibleIntegrationTest
- OU: Test
- CN: ansible-alb-test.example.com
-
- - name: create certificate
- openssl_certificate:
- path: ./ansible_alb_test.crt
- privatekey_path: ./ansible_alb_test.pem
- csr_path: ./ansible_alb_test.csr
- provider: selfsigned
-
- # This really should be an ACM Cert, but there is no acm_cert resource module
- - name: upload server cert to iam
- iam_cert:
- name: "{{ alb_name }}"
- state: present
- cert: ./ansible_alb_test.crt
- key: ./ansible_alb_test.pem
- <<: *aws_connection_info
- register: cert_upload
-
- - name: register certificate arn to acm_arn fact
- set_fact:
- cert_arn: "{{ cert_upload.arn }}"
-
- - include_tasks: test_alb_bad_listener_options.yml
- - include_tasks: test_alb_tags.yml
- - include_tasks: test_creating_alb.yml
- - include_tasks: test_alb_with_asg.yml
- - include_tasks: test_modifying_alb_listeners.yml
- - include_tasks: test_deleting_alb.yml
- - include_tasks: test_multiple_actions.yml
-
- always:
- #############################################################################
- # TEAR DOWN STARTS HERE
- #############################################################################
- - name: destroy ALB
- elb_application_lb:
- name: "{{ alb_name }}"
- state: absent
- wait: yes
- wait_timeout: 600
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: destroy target group if it was created
- elb_target_group:
- name: "{{ tg_name }}"
- protocol: http
- port: 80
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- wait: yes
- wait_timeout: 600
- <<: *aws_connection_info
- register: remove_tg
- retries: 5
- delay: 3
- until: remove_tg is success
- when: tg is defined
- ignore_errors: yes
-
- - name: destroy acm certificate
- iam_cert:
- name: "{{ alb_name }}"
- state: absent
- <<: *aws_connection_info
- register: remove_cert
- retries: 5
- delay: 3
- until: remove_cert is success
- when: cert_arn is defined
- ignore_errors: yes
-
- - name: destroy sec group
- ec2_group:
- name: "{{ sec_group.group_name }}"
- description: "security group for Ansible ALB integration tests"
- state: absent
- vpc_id: "{{ vpc.vpc.id }}"
- <<: *aws_connection_info
- register: remove_sg
- retries: 10
- delay: 5
- until: remove_sg is success
- ignore_errors: yes
-
- - name: remove route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- route_table_id: "{{ route_table.route_table.route_table_id }}"
- lookup: id
- state: absent
- <<: *aws_connection_info
- register: remove_rt
- retries: 10
- delay: 5
- until: remove_rt is success
- ignore_errors: yes
-
- - name: destroy subnets
- ec2_vpc_subnet:
- cidr: "{{ item.cidr }}"
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- <<: *aws_connection_info
- register: remove_subnet
- retries: 10
- delay: 5
- until: remove_subnet is success
- with_items:
- - cidr: 10.228.228.0/24
- - cidr: 10.228.229.0/24
- - cidr: 10.228.230.0/24
- - cidr: 10.228.231.0/24
- ignore_errors: yes
-
- - name: destroy internet gateway
- ec2_vpc_igw:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Name: "{{ resource_prefix }}"
- state: absent
- <<: *aws_connection_info
- register: remove_igw
- retries: 10
- delay: 5
- until: remove_igw is success
- ignore_errors: yes
-
- - name: destroy VPC
- ec2_vpc_net:
- cidr_block: 10.228.228.0/22
- name: "{{ resource_prefix }}_vpc"
- state: absent
- <<: *aws_connection_info
- register: remove_vpc
- retries: 10
- delay: 5
- until: remove_vpc is success
- ignore_errors: yes
diff --git a/test/integration/targets/elb_application_lb/tasks/main.yml b/test/integration/targets/elb_application_lb/tasks/main.yml
deleted file mode 100644
index 037d7fd4af..0000000000
--- a/test/integration/targets/elb_application_lb/tasks/main.yml
+++ /dev/null
@@ -1,44 +0,0 @@
-- set_fact:
- virtualenv: "{{ remote_tmp_dir }}/virtualenv"
- virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv"
-
-- set_fact:
- virtualenv_interpreter: "{{ virtualenv }}/bin/python"
-
-- pip:
- name: virtualenv
-
-- pip:
- name:
- - 'botocore<1.10.30'
- - boto3
- - boto
- - coverage
- - cryptography
- virtualenv: "{{ virtualenv }}"
- virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: no
-
-- include_tasks: multiple_actions_fail.yml
- vars:
- ansible_python_interpreter: "{{ virtualenv_interpreter }}"
-
-
-- pip:
- name:
- - 'botocore>=1.10.30'
- - boto3
- - boto
- - coverage
- - cryptography
- virtualenv: "{{ virtualenv }}"
- virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: no
-
-- include_tasks: full_test.yml
- vars:
- ansible_python_interpreter: "{{ virtualenv_interpreter }}"
-
-- file:
- path: "{{ virtualenv }}"
- state: absent
diff --git a/test/integration/targets/elb_application_lb/tasks/multiple_actions_fail.yml b/test/integration/targets/elb_application_lb/tasks/multiple_actions_fail.yml
deleted file mode 100644
index 9c66ba2c68..0000000000
--- a/test/integration/targets/elb_application_lb/tasks/multiple_actions_fail.yml
+++ /dev/null
@@ -1,253 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- awscli_connection_info: &awscli_connection_info
- AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
- AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
- AWS_SESSION_TOKEN: "{{ security_token }}"
- AWS_DEFAULT_REGION: "{{ aws_region }}"
- no_log: yes
-
- - name: create VPC
- ec2_vpc_net:
- cidr_block: 10.228.228.0/22
- name: "{{ resource_prefix }}_vpc"
- state: present
- <<: *aws_connection_info
- register: vpc
-
- - name: create internet gateway
- ec2_vpc_igw:
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- tags:
- Name: "{{ resource_prefix }}"
- <<: *aws_connection_info
- register: igw
-
- - name: create public subnet
- ec2_vpc_subnet:
- cidr: "{{ item.cidr }}"
- az: "{{ aws_region}}{{ item.az }}"
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- tags:
- Public: "{{ item.public|string }}"
- Name: "{{ item.public|ternary('public', 'private') }}-{{ item.az }}"
- <<: *aws_connection_info
- with_items:
- - cidr: 10.228.228.0/24
- az: "a"
- public: "True"
- - cidr: 10.228.229.0/24
- az: "b"
- public: "True"
- - cidr: 10.228.230.0/24
- az: "a"
- public: "False"
- - cidr: 10.228.231.0/24
- az: "b"
- public: "False"
- register: subnets
-
- - ec2_vpc_subnet_facts:
- filters:
- vpc-id: "{{ vpc.vpc.id }}"
- <<: *aws_connection_info
- register: vpc_subnets
-
- - name: create list of subnet ids
- set_fact:
- alb_subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `True`].id') }}"
- private_subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public != `True`].id') }}"
-
- - name: create a route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- <<: *aws_connection_info
- tags:
- Name: igw-route
- Created: "{{ resource_prefix }}"
- subnets: "{{ alb_subnets + private_subnets }}"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- register: route_table
-
- - ec2_group:
- name: "{{ resource_prefix }}"
- description: "security group for Ansible ALB integration tests"
- state: present
- vpc_id: "{{ vpc.vpc.id }}"
- rules:
- - proto: tcp
- from_port: 1
- to_port: 65535
- cidr_ip: 0.0.0.0/0
- <<: *aws_connection_info
- register: sec_group
-
- - name: create a target group for testing
- elb_target_group:
- name: "{{ tg_name }}"
- protocol: http
- port: 80
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- <<: *aws_connection_info
- register: tg
-
- - name: create privatekey for testing
- openssl_privatekey:
- path: ./ansible_alb_test.pem
- size: 2048
-
- - name: create csr for cert
- openssl_csr:
- path: ./ansible_alb_test.csr
- privatekey_path: ./ansible_alb_test.pem
- C: US
- ST: AnyPrincipality
- L: AnyTown
- O: AnsibleIntegrationTest
- OU: Test
- CN: ansible-alb-test.example.com
-
- - name: create certificate
- openssl_certificate:
- path: ./ansible_alb_test.crt
- privatekey_path: ./ansible_alb_test.pem
- csr_path: ./ansible_alb_test.csr
- provider: selfsigned
-
- # This really should be an ACM Cert, but there is no acm_cert resource module
- - name: upload server cert to iam
- iam_cert:
- name: "{{ alb_name }}"
- state: present
- cert: ./ansible_alb_test.crt
- key: ./ansible_alb_test.pem
- <<: *aws_connection_info
- register: cert_upload
-
- - name: register certificate arn to acm_arn fact
- set_fact:
- cert_arn: "{{ cert_upload.arn }}"
-
- - include_tasks: test_multiple_actions_fail.yml
-
- always:
- #############################################################################
- # TEAR DOWN STARTS HERE
- #############################################################################
- - name: destroy ALB
- elb_application_lb:
- name: "{{ alb_name }}"
- state: absent
- wait: yes
- wait_timeout: 600
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: destroy target group if it was created
- elb_target_group:
- name: "{{ tg_name }}"
- protocol: http
- port: 80
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- wait: yes
- wait_timeout: 600
- <<: *aws_connection_info
- register: remove_tg
- retries: 10
- delay: 5
- until: remove_tg is success
- when: tg is defined
- ignore_errors: yes
-
- - name: destroy acm certificate
- iam_cert:
- name: "{{ alb_name }}"
- state: absent
- <<: *aws_connection_info
- register: remove_cert
- retries: 10
- delay: 5
- until: remove_cert is success
- when: cert_arn is defined
- ignore_errors: yes
-
- - name: destroy sec group
- ec2_group:
- name: "{{ sec_group.group_name }}"
- description: "security group for Ansible ALB integration tests"
- state: absent
- vpc_id: "{{ vpc.vpc.id }}"
- <<: *aws_connection_info
- register: remove_sg
- retries: 10
- delay: 5
- until: remove_sg is success
- ignore_errors: yes
-
- - name: remove route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- route_table_id: "{{ route_table.route_table.route_table_id }}"
- lookup: id
- state: absent
- <<: *aws_connection_info
- register: remove_rt
- retries: 10
- delay: 5
- until: remove_rt is success
- ignore_errors: yes
-
- - name: destroy subnets
- ec2_vpc_subnet:
- cidr: "{{ item.cidr }}"
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- <<: *aws_connection_info
- register: remove_subnet
- retries: 10
- delay: 5
- until: remove_subnet is success
- with_items:
- - cidr: 10.228.228.0/24
- - cidr: 10.228.229.0/24
- - cidr: 10.228.230.0/24
- - cidr: 10.228.231.0/24
- ignore_errors: yes
-
- - name: destroy internet gateway
- ec2_vpc_igw:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Name: "{{ resource_prefix }}"
- state: absent
- <<: *aws_connection_info
- register: remove_igw
- retries: 10
- delay: 5
- until: remove_igw is success
- ignore_errors: yes
-
- - name: destroy VPC
- ec2_vpc_net:
- cidr_block: 10.228.228.0/22
- name: "{{ resource_prefix }}_vpc"
- state: absent
- <<: *aws_connection_info
- register: remove_vpc
- retries: 10
- delay: 5
- until: remove_vpc is success
- ignore_errors: yes
diff --git a/test/integration/targets/elb_application_lb/tasks/test_alb_bad_listener_options.yml b/test/integration/targets/elb_application_lb/tasks/test_alb_bad_listener_options.yml
deleted file mode 100644
index 821ad36d76..0000000000
--- a/test/integration/targets/elb_application_lb/tasks/test_alb_bad_listener_options.yml
+++ /dev/null
@@ -1,71 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: test creating an ALB with invalid listener options
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTPS
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- ignore_errors: yes
- register: alb
-
- - assert:
- that:
- - alb is failed
- - alb.msg.startswith("'SslPolicy' is a required listener dict key when Protocol = HTTPS")
-
- - name: test creating an ALB without providing required listener options
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Port: 80
- <<: *aws_connection_info
- ignore_errors: yes
- register: alb
-
- - assert:
- that:
- - alb is failed
- - '"missing required arguments" in alb.msg'
- - '"Protocol" in alb.msg'
- - '"DefaultActions" in alb.msg'
-
- - name: test creating an ALB providing an invalid listener option type
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTP
- Port: "bad type"
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- ignore_errors: yes
- register: alb
-
- - assert:
- that:
- - alb is failed
- - "'unable to convert to int' in alb.msg"
diff --git a/test/integration/targets/elb_application_lb/tasks/test_alb_tags.yml b/test/integration/targets/elb_application_lb/tasks/test_alb_tags.yml
deleted file mode 100644
index b7942fa736..0000000000
--- a/test/integration/targets/elb_application_lb/tasks/test_alb_tags.yml
+++ /dev/null
@@ -1,93 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: create ALB with no listeners
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
-
- - name: re-create ALB with no listeners
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - not alb.changed
-
- - name: add tags to ALB
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- tags:
- created_by: "ALB test {{ resource_prefix }}"
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}"}'
-
- - name: remove tags from ALB
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- tags: {}
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - not alb.tags
-
- - name: test idempotence
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- tags: {}
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - not alb.changed
- - not alb.tags
-
- - name: destroy ALB with no listeners
- elb_application_lb:
- name: "{{ alb_name }}"
- state: absent
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
diff --git a/test/integration/targets/elb_application_lb/tasks/test_alb_with_asg.yml b/test/integration/targets/elb_application_lb/tasks/test_alb_with_asg.yml
deleted file mode 100644
index de97d5bdc9..0000000000
--- a/test/integration/targets/elb_application_lb/tasks/test_alb_with_asg.yml
+++ /dev/null
@@ -1,89 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - ec2_ami_info:
- <<: *aws_connection_info
- filters:
- architecture: x86_64
- virtualization-type: hvm
- root-device-type: ebs
- name: "amzn-ami-hvm*"
- owner-alias: "amazon"
- register: amis
-
- - set_fact:
- latest_amazon_linux: "{{ amis.images | sort(attribute='creation_date') | last }}"
-
- - ec2_asg:
- <<: *aws_connection_info
- state: absent
- name: "{{ resource_prefix }}-webservers"
- wait_timeout: 900
-
- - ec2_lc:
- <<: *aws_connection_info
- name: "{{ resource_prefix }}-web-lcfg"
- state: absent
-
- - name: Create launch config for testing
- ec2_lc:
- <<: *aws_connection_info
- name: "{{ resource_prefix }}-web-lcfg"
- assign_public_ip: true
- image_id: "{{ latest_amazon_linux.image_id }}"
- security_groups: "{{ sec_group.group_id }}"
- instance_type: t2.medium
- user_data: |
- #!/bin/bash
- set -x
- yum update -y --nogpgcheck
- yum install -y --nogpgcheck httpd
- echo "Hello Ansiblings!" >> /var/www/html/index.html
- service httpd start
- volumes:
- - device_name: /dev/xvda
- volume_size: 10
- volume_type: gp2
- delete_on_termination: true
-
- - name: Create autoscaling group for app server fleet
- ec2_asg:
- <<: *aws_connection_info
- name: "{{ resource_prefix }}-webservers"
- vpc_zone_identifier: "{{ alb_subnets }}"
- launch_config_name: "{{ resource_prefix }}-web-lcfg"
- termination_policies:
- - OldestLaunchConfiguration
- - Default
- health_check_period: 600
- health_check_type: EC2
- replace_all_instances: true
- min_size: 0
- max_size: 2
- desired_capacity: 1
- wait_for_instances: true
- target_group_arns:
- - "{{ tg.target_group_arn }}"
-
- always:
-
- - ec2_asg:
- <<: *aws_connection_info
- state: absent
- name: "{{ resource_prefix }}-webservers"
- wait_timeout: 900
- ignore_errors: yes
-
- - ec2_lc:
- <<: *aws_connection_info
- name: "{{ resource_prefix }}-web-lcfg"
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/elb_application_lb/tasks/test_creating_alb.yml b/test/integration/targets/elb_application_lb/tasks/test_creating_alb.yml
deleted file mode 100644
index ee932d4ede..0000000000
--- a/test/integration/targets/elb_application_lb/tasks/test_creating_alb.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: create ALB with a listener
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - alb.listeners|length == 1
- - alb.listeners[0].rules|length == 1
-
- - name: test idempotence creating ALB with a listener
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - not alb.changed
- - alb.listeners|length == 1
- - alb.listeners[0].rules|length == 1
diff --git a/test/integration/targets/elb_application_lb/tasks/test_deleting_alb.yml b/test/integration/targets/elb_application_lb/tasks/test_deleting_alb.yml
deleted file mode 100644
index 34e278cb9f..0000000000
--- a/test/integration/targets/elb_application_lb/tasks/test_deleting_alb.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: destroy ALB with listener
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: absent
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- wait: yes
- wait_timeout: 300
- register: alb
-
- - assert:
- that:
- - alb.changed
-
- - name: test idempotence
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: absent
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- wait: yes
- wait_timeout: 300
- register: alb
-
- - assert:
- that:
- - not alb.changed
diff --git a/test/integration/targets/elb_application_lb/tasks/test_modifying_alb_listeners.yml b/test/integration/targets/elb_application_lb/tasks/test_modifying_alb_listeners.yml
deleted file mode 100644
index 943d766340..0000000000
--- a/test/integration/targets/elb_application_lb/tasks/test_modifying_alb_listeners.yml
+++ /dev/null
@@ -1,240 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: add a rule to the listener
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- Rules:
- - Conditions:
- - Field: path-pattern
- Values:
- - '/test'
- Priority: '1'
- Actions:
- - TargetGroupName: "{{ tg_name }}"
- Type: forward
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - alb.listeners[0].rules|length == 2
-
- - name: test replacing the rule with one with the same priority
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- purge_listeners: true
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- Rules:
- - Conditions:
- - Field: path-pattern
- Values:
- - '/new'
- Priority: '1'
- Actions:
- - TargetGroupName: "{{ tg_name }}"
- Type: forward
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - alb.listeners[0].rules|length == 2
-
- - name: test the rule will not be removed without purge_listeners
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - not alb.changed
- - alb.listeners[0].rules|length == 2
-
- - name: test a rule can be added and other rules will not be removed when purge_rules is no.
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- purge_rules: no
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- Rules:
- - Conditions:
- - Field: path-pattern
- Values:
- - '/new'
- Priority: '2'
- Actions:
- - TargetGroupName: "{{ tg_name }}"
- Type: forward
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - alb.listeners[0].rules|length == 3
-
- - name: add a rule that uses the host header condition to the listener
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- purge_rules: no
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- Rules:
- - Conditions:
- - Field: host-header
- Values:
- - 'local.mydomain.com'
- Priority: '3'
- Actions:
- - TargetGroupName: "{{ tg_name }}"
- Type: forward
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - alb.listeners[0].rules|length == 4
- - '{{ alb|json_query("listeners[].rules[].conditions[].host_header_config.values[]")|length == 1 }}'
-
- - name: test replacing the rule that uses the host header condition with multiple host header conditions
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- purge_rules: no
- state: present
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- Rules:
- - Conditions:
- - Field: host-header
- Values:
- - 'local.mydomain.com'
- - 'alternate.mydomain.com'
- Priority: '3'
- Actions:
- - TargetGroupName: "{{ tg_name }}"
- Type: forward
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - alb.listeners[0].rules|length == 4
- - '{{ alb|json_query("listeners[].rules[].conditions[].host_header_config.values[]")|length == 2 }}'
-
- - name: remove the rule
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- purge_listeners: true
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- Rules: []
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - alb.listeners[0].rules|length == 1
-
- - name: remove listener from ALB
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners: []
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - not alb.listeners
-
- - name: add the listener to the ALB
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - alb.listeners|length == 1
- - alb.availability_zones|length == 2
diff --git a/test/integration/targets/elb_application_lb/tasks/test_multiple_actions.yml b/test/integration/targets/elb_application_lb/tasks/test_multiple_actions.yml
deleted file mode 100644
index 6223270c3d..0000000000
--- a/test/integration/targets/elb_application_lb/tasks/test_multiple_actions.yml
+++ /dev/null
@@ -1,467 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: register dummy OIDC config
- set_fact:
- AuthenticateOidcActionConfig:
- AuthorizationEndpoint: "https://www.example.com/auth"
- ClientId: "eeeeeeeeeeeeeeeeeeeeeeeeee"
- ClientSecret: "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
- Issuer: "https://www.example.com/issuer"
- OnUnauthenticatedRequest: "authenticate"
- Scope: "openid"
- SessionCookieName: "AWSELBAuthSessionCookie"
- SessionTimeout: 604800
- TokenEndpoint: "https://www.example.com/token"
- UserInfoEndpoint: "https://www.example.com/userinfo"
- UseExistingClientSecret: true
-
- - name: register fixed response action
- set_fact:
- FixedResponseActionConfig:
- ContentType: "text/plain"
- MessageBody: "This is the page you're looking for"
- StatusCode: "200"
-
- - name: register redirect action
- set_fact:
- RedirectActionConfig:
- Host: "#{host}"
- Path: "/example/redir" # or /#{path}
- Port: "#{port}"
- Protocol: "#{protocol}"
- Query: "#{query}"
- StatusCode: "HTTP_302" # or HTTP_301
-
- - name: delete existing ALB to avoid target group association issues
- elb_application_lb:
- name: "{{ alb_name }}"
- state: absent
- <<: *aws_connection_info
- wait: yes
- wait_timeout: 600
-
- - name: cleanup tg to avoid target group association issues
- elb_target_group:
- name: "{{ tg_name }}"
- protocol: http
- port: 80
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- wait: yes
- wait_timeout: 600
- <<: *aws_connection_info
- register: cleanup_tg
- retries: 5
- delay: 3
- until: cleanup_tg is success
-
- - name: recreate a target group
- elb_target_group:
- name: "{{ tg_name }}"
- protocol: http
- port: 80
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- <<: *aws_connection_info
- register: tg
-
- - name: create ALB with redirect DefaultAction
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTPS
- Port: 443
- DefaultActions:
- - Type: redirect
- RedirectConfig: "{{ RedirectActionConfig }}"
- Certificates:
- - CertificateArn: "{{ cert_arn }}"
- SslPolicy: ELBSecurityPolicy-2016-08
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - alb.listeners|length == 1
- - alb.listeners[0].rules[0].actions|length == 1
- - alb.listeners[0].rules[0].actions[0].type == "redirect"
-
- - name: test idempotence with redirect DefaultAction
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTPS
- Port: 443
- DefaultActions:
- - Type: redirect
- RedirectConfig: "{{ RedirectActionConfig }}"
- Certificates:
- - CertificateArn: "{{ cert_arn }}"
- SslPolicy: ELBSecurityPolicy-2016-08
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - not alb.changed
- - alb.listeners|length == 1
- - alb.listeners[0].rules[0].actions|length == 1
- - alb.listeners[0].rules[0].actions[0].type == "redirect"
-
- - name: update ALB with fixed-response DefaultAction
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTPS
- Port: 443
- DefaultActions:
- - Type: fixed-response
- FixedResponseConfig: "{{ FixedResponseActionConfig }}"
- Certificates:
- - CertificateArn: "{{ cert_arn }}"
- SslPolicy: ELBSecurityPolicy-2016-08
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - alb.listeners|length == 1
- - alb.listeners[0].rules[0].actions|length == 1
- - alb.listeners[0].rules[0].actions[0].type == "fixed-response"
-
- - name: test idempotence with fixed-response DefaultAction
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTPS
- Port: 443
- DefaultActions:
- - Type: fixed-response
- FixedResponseConfig: "{{ FixedResponseActionConfig }}"
- Certificates:
- - CertificateArn: "{{ cert_arn }}"
- SslPolicy: ELBSecurityPolicy-2016-08
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - not alb.changed
- - alb.listeners|length == 1
- - alb.listeners[0].rules[0].actions|length == 1
- - alb.listeners[0].rules[0].actions[0].type == "fixed-response"
-
- - name: test multiple non-default rules
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTPS
- Port: 443
- DefaultActions:
- - Type: fixed-response
- FixedResponseConfig: "{{ FixedResponseActionConfig }}"
- Certificates:
- - CertificateArn: "{{ cert_arn }}"
- SslPolicy: ELBSecurityPolicy-2016-08
- Rules:
- - Conditions:
- - Field: path-pattern
- Values:
- - "/forward-path/*"
- Priority: 1
- Actions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- - Conditions:
- - Field: path-pattern
- Values:
- - "/redirect-path/*"
- Priority: 2
- Actions:
- - Type: redirect
- RedirectConfig: "{{ RedirectActionConfig }}"
- - Conditions:
- - Field: path-pattern
- Values:
- - "/fixed-response-path/"
- Priority: 3
- Actions:
- - Type: fixed-response
- FixedResponseConfig: "{{ FixedResponseActionConfig }}"
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - alb.changed
- - alb.listeners|length == 1
- - alb.listeners[0].rules|length == 4 ## defaultactions is included as a rule
- - alb.listeners[0].rules[0].actions|length == 1
- - alb.listeners[0].rules[0].actions[0].type == "forward"
- - alb.listeners[0].rules[1].actions|length == 1
- - alb.listeners[0].rules[1].actions[0].type == "redirect"
- - alb.listeners[0].rules[2].actions|length == 1
- - alb.listeners[0].rules[2].actions[0].type == "fixed-response"
-
- - name: test idempotence multiple non-default rules
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTPS
- Port: 443
- DefaultActions:
- - Type: fixed-response
- FixedResponseConfig: "{{ FixedResponseActionConfig }}"
- Certificates:
- - CertificateArn: "{{ cert_arn }}"
- SslPolicy: ELBSecurityPolicy-2016-08
- Rules:
- - Conditions:
- - Field: path-pattern
- Values:
- - "/forward-path/*"
- Priority: 1
- Actions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- - Conditions:
- - Field: path-pattern
- Values:
- - "/redirect-path/*"
- Priority: 2
- Actions:
- - Type: redirect
- RedirectConfig: "{{ RedirectActionConfig }}"
- - Conditions:
- - Field: path-pattern
- Values:
- - "/fixed-response-path/"
- Priority: 3
- Actions:
- - Type: fixed-response
- FixedResponseConfig: "{{ FixedResponseActionConfig }}"
- <<: *aws_connection_info
- register: alb
-
- - assert:
- that:
- - not alb.changed
- - alb.listeners|length == 1
- - alb.listeners[0].rules|length == 4 ## defaultactions is included as a rule
- - alb.listeners[0].rules[0].actions|length == 1
- - alb.listeners[0].rules[0].actions[0].type == "forward"
- - alb.listeners[0].rules[1].actions|length == 1
- - alb.listeners[0].rules[1].actions[0].type == "redirect"
- - alb.listeners[0].rules[2].actions|length == 1
- - alb.listeners[0].rules[2].actions[0].type == "fixed-response"
-
-
-# - name: test creating ALB with a default listener with multiple actions
-# elb_application_lb:
-# name: "{{ alb_name }}"
-# subnets: "{{ alb_subnets }}"
-# security_groups: "{{ sec_group.group_id }}"
-# state: present
-# listeners:
-# - Protocol: HTTPS
-# Port: 443
-# DefaultActions:
-# - Type: forward
-# TargetGroupName: "{{ tg_name }}"
-# Order: 2
-# - Type: authenticate-oidc
-# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
-# Order: 1
-# Certificates:
-# - CertificateArn: "{{ cert_arn }}"
-# SslPolicy: ELBSecurityPolicy-2016-08
-# <<: *aws_connection_info
-# register: alb
-#
-# - assert:
-# that:
-# - alb.listeners|length == 1
-# - alb.listeners[0].rules[0].actions|length == 2
-#
-# - name: test changing order of actions
-# elb_application_lb:
-# name: "{{ alb_name }}"
-# subnets: "{{ alb_subnets }}"
-# security_groups: "{{ sec_group.group_id }}"
-# state: present
-# listeners:
-# - Protocol: HTTPS
-# Port: 443
-# DefaultActions:
-# - Type: authenticate-oidc
-# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
-# Order: 1
-# - Type: forward
-# TargetGroupName: "{{ tg_name }}"
-# Order: 2
-# Certificates:
-# - CertificateArn: "{{ cert_arn }}"
-# SslPolicy: ELBSecurityPolicy-2016-08
-# <<: *aws_connection_info
-# register: alb
-#
-# - assert:
-# that:
-# - not alb.changed
-# - alb.listeners|length == 1
-# - alb.listeners[0].rules[0].actions|length == 2
-#
-# - name: test non-default rule with multiple actions
-# elb_application_lb:
-# name: "{{ alb_name }}"
-# subnets: "{{ alb_subnets }}"
-# security_groups: "{{ sec_group.group_id }}"
-# state: present
-# listeners:
-# - Protocol: HTTPS
-# Port: 443
-# DefaultActions:
-# - Type: authenticate-oidc
-# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
-# Order: 1
-# - Type: forward
-# TargetGroupName: "{{ tg_name }}"
-# Order: 2
-# Certificates:
-# - CertificateArn: "{{ cert_arn }}"
-# SslPolicy: ELBSecurityPolicy-2016-08
-# Rules:
-# - Conditions:
-# - Field: path-pattern
-# Values:
-# - "*"
-# Priority: 1
-# Actions:
-# - Type: forward
-# TargetGroupName: "{{ tg_name }}"
-# Order: 2
-# - Type: authenticate-oidc
-# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
-# Order: 1
-# <<: *aws_connection_info
-# register: alb
-#
-# - assert:
-# that:
-# - alb.changed
-# - alb.listeners|length == 1
-# - alb.listeners[0].rules[0].actions|length == 2
-# - alb.listeners[0].rules[1].actions|length == 2
-#
-# - name: test idempotency non-default rule with multiple actions
-# elb_application_lb:
-# name: "{{ alb_name }}"
-# subnets: "{{ alb_subnets }}"
-# security_groups: "{{ sec_group.group_id }}"
-# state: present
-# listeners:
-# - Protocol: HTTPS
-# Port: 443
-# DefaultActions:
-# - Type: authenticate-oidc
-# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
-# Order: 1
-# - Type: forward
-# TargetGroupName: "{{ tg_name }}"
-# Order: 2
-# Certificates:
-# - CertificateArn: "{{ cert_arn }}"
-# SslPolicy: ELBSecurityPolicy-2016-08
-# Rules:
-# - Conditions:
-# - Field: path-pattern
-# Values:
-# - "*"
-# Priority: 1
-# Actions:
-# - Type: forward
-# TargetGroupName: "{{ tg_name }}"
-# Order: 2
-# - Type: authenticate-oidc
-# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
-# Order: 1
-# <<: *aws_connection_info
-# register: alb
-#
-# - assert:
-# that:
-# - not alb.changed
-# - alb.listeners|length == 1
-# - alb.listeners[0].rules[0].actions|length == 2
-# - alb.listeners[0].rules[1].actions|length == 2
-#
-# - name: test non-default rule action order change
-# elb_application_lb:
-# name: "{{ alb_name }}"
-# subnets: "{{ alb_subnets }}"
-# security_groups: "{{ sec_group.group_id }}"
-# state: present
-# listeners:
-# - Protocol: HTTPS
-# Port: 443
-# DefaultActions:
-# - Type: authenticate-oidc
-# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
-# Order: 1
-# - Type: forward
-# TargetGroupName: "{{ tg_name }}"
-# Order: 2
-# Certificates:
-# - CertificateArn: "{{ cert_arn }}"
-# SslPolicy: ELBSecurityPolicy-2016-08
-# Rules:
-# - Conditions:
-# - Field: path-pattern
-# Values:
-# - "*"
-# Priority: 1
-# Actions:
-# - Type: authenticate-oidc
-# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
-# Order: 1
-# - Type: forward
-# TargetGroupName: "{{ tg_name }}"
-# Order: 2
-# <<: *aws_connection_info
-# register: alb
-#
-# - assert:
-# that:
-# - not alb.changed
-# - alb.listeners|length == 1
-# - alb.listeners[0].rules[0].actions|length == 2
-# - alb.listeners[0].rules[1].actions|length == 2
diff --git a/test/integration/targets/elb_application_lb/tasks/test_multiple_actions_fail.yml b/test/integration/targets/elb_application_lb/tasks/test_multiple_actions_fail.yml
deleted file mode 100644
index 722002f259..0000000000
--- a/test/integration/targets/elb_application_lb/tasks/test_multiple_actions_fail.yml
+++ /dev/null
@@ -1,53 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: register dummy OIDC config
- set_fact:
- AuthenticateOidcActionConfig:
- AuthorizationEndpoint: "https://www.example.com/auth"
- ClientId: "eeeeeeeeeeeeeeeeeeeeeeeeee"
- ClientSecret: "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
- Issuer: "https://www.example.com/issuer"
- OnUnauthenticatedRequest: "authenticate"
- Scope: "openid"
- SessionCookieName: "AWSELBAuthSessionCookie"
- SessionTimeout: 604800
- TokenEndpoint: "https://www.example.com/token"
- UserInfoEndpoint: "https://www.example.com/userinfo"
-
- - name: create ALB with multiple DefaultActions
- elb_application_lb:
- name: "{{ alb_name }}"
- subnets: "{{ alb_subnets }}"
- security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: HTTPS
- Port: 443
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- Order: 2
- - Type: authenticate-oidc
- AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}"
- Order: 1
- Certificates:
- - CertificateArn: "{{ cert_arn }}"
- SslPolicy: ELBSecurityPolicy-2016-08
- <<: *aws_connection_info
- register: alb
- ignore_errors: yes
-
- - name: check for a graceful failure message
- assert:
- that:
- - alb.failed
- - 'alb.msg == "installed version of botocore does not support multiple actions, please upgrade botocore to version 1.10.30 or higher"'
diff --git a/test/integration/targets/elb_classic_lb/aliases b/test/integration/targets/elb_classic_lb/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/elb_classic_lb/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/elb_classic_lb/defaults/main.yml b/test/integration/targets/elb_classic_lb/defaults/main.yml
deleted file mode 100644
index 76164523d4..0000000000
--- a/test/integration/targets/elb_classic_lb/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# defaults file for test_ec2_eip
-tag_prefix: '{{resource_prefix}}'
diff --git a/test/integration/targets/elb_classic_lb/meta/main.yml b/test/integration/targets/elb_classic_lb/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/elb_classic_lb/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/elb_classic_lb/tasks/main.yml b/test/integration/targets/elb_classic_lb/tasks/main.yml
deleted file mode 100644
index 2b368c6f9e..0000000000
--- a/test/integration/targets/elb_classic_lb/tasks/main.yml
+++ /dev/null
@@ -1,425 +0,0 @@
----
-# __Test Info__
-# Create a self signed cert and upload it to AWS
-# http://www.akadia.com/services/ssh_test_certificate.html
-# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/ssl-server-cert.html
-
-# __Test Outline__
-#
-# __elb_classic_lb__
-# create test elb with listeners and certificate
-# change AZ's
-# change listeners
-# remove listeners
-# remove elb
-
-# __ec2-common__
-# test environment variable EC2_REGION
-# test with no parameters
-# test with only instance_id
-# test invalid region parameter
-# test valid region parameter
-# test invalid ec2_url parameter
-# test valid ec2_url parameter
-# test credentials from environment
-# test credential parameters
-
-- block:
-
- # ============================================================
- # create test elb with listeners, certificate, and health check
-
- - name: Create ELB
- elb_classic_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- ec2_access_key: "{{ ec2_access_key }}"
- ec2_secret_key: "{{ ec2_secret_key }}"
- security_token: "{{ security_token }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- - protocol: http
- load_balancer_port: 8080
- instance_port: 8080
- health_check:
- ping_protocol: http
- ping_port: 80
- ping_path: "/index.html"
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 10
- register: info
-
- - assert:
- that:
- - 'info.changed'
- - 'info.elb.status == "created"'
- - '"{{ ec2_region }}a" in info.elb.zones'
- - '"{{ ec2_region }}b" in info.elb.zones'
- - 'info.elb.health_check.healthy_threshold == 10'
- - 'info.elb.health_check.interval == 30'
- - 'info.elb.health_check.target == "HTTP:80/index.html"'
- - 'info.elb.health_check.timeout == 5'
- - 'info.elb.health_check.unhealthy_threshold == 2'
- - '[80, 80, "HTTP", "HTTP"] in info.elb.listeners'
- - '[8080, 8080, "HTTP", "HTTP"] in info.elb.listeners'
-
- # ============================================================
-
- # check ports, would be cool, but we are at the mercy of AWS
- # to start things in a timely manner
-
- #- name: check to make sure 80 is listening
- # wait_for: host={{ info.elb.dns_name }} port=80 timeout=600
- # register: result
-
- #- name: assert can connect to port#
- # assert: 'result.state == "started"'
-
- #- name: check to make sure 443 is listening
- # wait_for: host={{ info.elb.dns_name }} port=443 timeout=600
- # register: result
-
- #- name: assert can connect to port#
- # assert: 'result.state == "started"'
-
- # ============================================================
-
- # Change AZ's
-
- - name: Change AZ's
- elb_classic_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- ec2_access_key: "{{ ec2_access_key }}"
- ec2_secret_key: "{{ ec2_secret_key }}"
- security_token: "{{ security_token }}"
- state: present
- zones:
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- purge_zones: yes
- health_check:
- ping_protocol: http
- ping_port: 80
- ping_path: "/index.html"
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 10
- register: info
-
-
-
- - assert:
- that:
- - 'info.elb.status == "ok"'
- - 'info.changed'
- - 'info.elb.zones[0] == "{{ ec2_region }}c"'
-
- # ============================================================
-
- # Update AZ's
-
- - name: Update AZ's
- elb_classic_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- ec2_access_key: "{{ ec2_access_key }}"
- ec2_secret_key: "{{ ec2_secret_key }}"
- security_token: "{{ security_token }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- purge_zones: yes
- register: info
-
- - assert:
- that:
- - 'info.changed'
- - 'info.elb.status == "ok"'
- - '"{{ ec2_region }}a" in info.elb.zones'
- - '"{{ ec2_region }}b" in info.elb.zones'
- - '"{{ ec2_region }}c" in info.elb.zones'
-
-
- # ============================================================
-
- # Purge Listeners
-
- - name: Purge Listeners
- elb_classic_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- ec2_access_key: "{{ ec2_access_key }}"
- ec2_secret_key: "{{ ec2_secret_key }}"
- security_token: "{{ security_token }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 81
- purge_listeners: yes
- register: info
-
- - assert:
- that:
- - 'info.elb.status == "ok"'
- - 'info.changed'
- - '[80, 81, "HTTP", "HTTP"] in info.elb.listeners'
- - 'info.elb.listeners|length == 1'
-
-
-
- # ============================================================
-
- # add Listeners
-
- - name: Add Listeners
- elb_classic_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- ec2_access_key: "{{ ec2_access_key }}"
- ec2_secret_key: "{{ ec2_secret_key }}"
- security_token: "{{ security_token }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 8081
- instance_port: 8081
- purge_listeners: no
- register: info
-
- - assert:
- that:
- - 'info.elb.status == "ok"'
- - 'info.changed'
- - '[80, 81, "HTTP", "HTTP"] in info.elb.listeners'
- - '[8081, 8081, "HTTP", "HTTP"] in info.elb.listeners'
- - 'info.elb.listeners|length == 2'
-
-
- # ============================================================
-
- - name: test with no parameters
- elb_classic_lb:
- register: result
- ignore_errors: true
-
- - name: assert failure when called with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("missing required arguments: ")'
-
-
-
- # ============================================================
- - name: test with only name
- elb_classic_lb:
- name="{{ tag_prefix }}"
- register: result
- ignore_errors: true
-
- - name: assert failure when called with only name
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "missing required arguments: state"'
-
-
- # ============================================================
- - name: test invalid region parameter
- elb_classic_lb:
- name: "{{ tag_prefix }}"
- region: 'asdf querty 1234'
- state: present
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- register: result
- ignore_errors: true
-
- - name: assert invalid region parameter
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("Region asdf querty 1234 does not seem to be available ")'
-
-
- # ============================================================
- - name: test valid region parameter
- elb_classic_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
-
- register: result
- ignore_errors: true
-
- - name: assert valid region parameter
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("No handler was ready to authenticate.")'
-
-
- # ============================================================
-
- - name: test invalid ec2_url parameter
- elb_classic_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- environment:
- EC2_URL: bogus.example.com
- register: result
- ignore_errors: true
-
- - name: assert invalid ec2_url parameter
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("No handler was ready to authenticate.")'
-
-
- # ============================================================
- - name: test valid ec2_url parameter
- elb_classic_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- environment:
- EC2_URL: '{{ec2_url}}'
- register: result
- ignore_errors: true
-
- - name: assert valid ec2_url parameter
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("No handler was ready to authenticate.")'
-
-
- # ============================================================
- - name: test credentials from environment
- elb_classic_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- environment:
- EC2_ACCESS_KEY: bogus_access_key
- EC2_SECRET_KEY: bogus_secret_key
- register: result
- ignore_errors: true
-
- - name: assert credentials from environment
- assert:
- that:
- - 'result.failed'
- - '"InvalidClientTokenId" in result.exception'
-
-
- # ============================================================
- - name: test credential parameters
- elb_classic_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- state: present
- zones:
- - "{{ ec2_region }}a"
- - "{{ ec2_region }}b"
- - "{{ ec2_region }}c"
- listeners:
- - protocol: http
- load_balancer_port: 80
- instance_port: 80
- register: result
- ignore_errors: true
-
- - name: assert credential parameters
- assert:
- that:
- - 'result.failed'
- - '"No handler was ready to authenticate. 1 handlers were checked." in result.msg'
-
- always:
-
- # ============================================================
- - name: remove the test load balancer completely
- elb_classic_lb:
- name: "{{ tag_prefix }}"
- region: "{{ ec2_region }}"
- state: absent
- ec2_access_key: "{{ ec2_access_key }}"
- ec2_secret_key: "{{ ec2_secret_key }}"
- security_token: "{{ security_token }}"
- register: result
-
- - name: assert the load balancer was removed
- assert:
- that:
- - 'result.changed'
- - 'result.elb.name == "{{tag_prefix}}"'
- - 'result.elb.status == "deleted"'
diff --git a/test/integration/targets/elb_classic_lb/vars/main.yml b/test/integration/targets/elb_classic_lb/vars/main.yml
deleted file mode 100644
index 79194af1ef..0000000000
--- a/test/integration/targets/elb_classic_lb/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for test_ec2_elb_lb
diff --git a/test/integration/targets/elb_network_lb/aliases b/test/integration/targets/elb_network_lb/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/elb_network_lb/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/elb_network_lb/defaults/main.yml b/test/integration/targets/elb_network_lb/defaults/main.yml
deleted file mode 100644
index 5ee3fa4537..0000000000
--- a/test/integration/targets/elb_network_lb/defaults/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-# load balancer and target group names have to be less than 32 characters
-# the 8 digit identifier at the end of resource_prefix helps determine during which test something
-# was created and allows tests to be run in parallel
-nlb_name: "my-nlb-{{ resource_prefix | regex_search('([0-9]+)$') }}"
-tg_name: "my-tg-{{ resource_prefix | regex_search('([0-9]+)$') }}"
-tg_tcpudp_name: "my-tg-tcpudp-{{ resource_prefix | regex_search('([0-9]+)$') }}"
diff --git a/test/integration/targets/elb_network_lb/files/cert.pem b/test/integration/targets/elb_network_lb/files/cert.pem
deleted file mode 100644
index 81df3a6fa1..0000000000
--- a/test/integration/targets/elb_network_lb/files/cert.pem
+++ /dev/null
@@ -1,32 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIFiTCCA3GgAwIBAgIJAKE+XLUKdWtPMA0GCSqGSIb3DQEBCwUAMFsxCzAJBgNV
-BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
-aWRnaXRzIFB0eSBMdGQxFDASBgNVBAMMC2V4YW1wbGUuY29tMB4XDTE5MDYxNDEx
-MzIzM1oXDTIwMDYxMzExMzIzM1owWzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNv
-bWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEUMBIG
-A1UEAwwLZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
-AQDOFNh5eQ6+9tYvtzjrqvFDzPoXmZuOFeqFS7iBH4gLorvmQQIQcVEAH7O+tkhW
-Z8+6PgvXXd43GFtEbY8jZoBQwupl7lIdzywFRoyZYkREXodmDixkwxlPvUWdrb3r
-ZDRON6qIbX8LrzTPD1+JL4Rtkgr1RTlLrHT3ABEqEV1fQODOdbRd7rq6fmqwPlbl
-zS5kN3RPFuJVDZrnCPcEMOA3QftQgDTzyOlZJYWDZsJxel7H/O9qZjPBTitNJxg1
-ierPaIXT6u6CdWA0A7t3Knyn2+vcyvemjsbQg9v/U5zKR3h+6F0slqgOT/ZnrEos
-AzxdeaA5POJFy6xCHZiVgsE7OVaPB9imWrrAYbKsHVLP2rdlhnGZQnnebmTYCll5
-SvXWCIr5vp4i1qxIa95QBU/xmEY6kTy9GjAOSmYXj7UnwnBZwgEop0yUdBMb4s9G
-x8S6Yxaj1DZVyiyrzInBri9lqabkPLPQNaK7wTKN5zl7r5pSCsF8rl4R+mvcxyyY
-dS+cqseGjn98ubdd/vyQWqLbQtr5Njk4ROs5Rv6/2z/RUFdwsqB5aXztxOs3J7aJ
-5ScTgmoK+wkQY+tej6H5pgT02vKuXLwe4wHKKAYepgH7Azkm7XoFlHhBEUy+uUsI
-PMBm2Meo1JzOc8E5QqLX2YO/MDiZhI+NYOMJF0/huWqM7wIDAQABo1AwTjAdBgNV
-HQ4EFgQU3cPtQEUQYkj4lTM5fbdkd8FSVVIwHwYDVR0jBBgwFoAU3cPtQEUQYkj4
-lTM5fbdkd8FSVVIwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEACWQX
-CJK6f+/Zci1e7Gvw0gd+1xV8jbz2lH2zOGSpoQ6UCRVrANEX+p0nZZRpnTTrJfGa
-Yx+TjIJ4SSoaCud+N2IGXWxzuMan+NskyycTaHSA/i6OwZ8jKki/iVvE5XQN+gHw
-h5lWnEh5PiGG91lYi+FShUN0DX7Su776UK3mloLlq+ANCubWxDycWc0xBNH8iD2U
-xBV7MfuCX9rSei+2NU+hnOPrzh0OKNGOTSPHY1N1KU3grxTLTpF8PTMHC55sEy2E
-EMokRb3V/lFyrfX755KT5cQG6uQKQorI95BoxoNjeek54tuBUjenLfOH6dMO/6Xd
-WaouLXiDM73fwFI0ByQ5CTJizC0ehoZtv2XB7yjZJEuI7zz4wXBBwgNlBV2j4aJJ
-0wNG9210ZC7NxNH7MRfZfzLQpsOMTm9UP85TXsB1ieaN6OD2CnHP9O6umy633Aie
-dsQt5yaC9+N86NBceb44IlkqlTv720BQjq7Dz5UCthhNg0VYpICzymD9kAKPfx9Z
-1ug2gB4i2r6eHqFIexwSfa29DxW+KEPlL7pP9P9FVsYNyZYOvX8OgVERkA/9L53i
-MpzRPPzTyjW/sJcDWVfrQU0NhPj5lOxdMcbvvU+KD3afraUXPiN4N4+FFWENOFZ/
-HEKjPj+As/3OHNyUXrCciYjq6gCLZ6SV945l2h8=
------END CERTIFICATE-----
diff --git a/test/integration/targets/elb_network_lb/files/key.pem b/test/integration/targets/elb_network_lb/files/key.pem
deleted file mode 100644
index 3b4da6feda..0000000000
--- a/test/integration/targets/elb_network_lb/files/key.pem
+++ /dev/null
@@ -1,52 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDOFNh5eQ6+9tYv
-tzjrqvFDzPoXmZuOFeqFS7iBH4gLorvmQQIQcVEAH7O+tkhWZ8+6PgvXXd43GFtE
-bY8jZoBQwupl7lIdzywFRoyZYkREXodmDixkwxlPvUWdrb3rZDRON6qIbX8LrzTP
-D1+JL4Rtkgr1RTlLrHT3ABEqEV1fQODOdbRd7rq6fmqwPlblzS5kN3RPFuJVDZrn
-CPcEMOA3QftQgDTzyOlZJYWDZsJxel7H/O9qZjPBTitNJxg1ierPaIXT6u6CdWA0
-A7t3Knyn2+vcyvemjsbQg9v/U5zKR3h+6F0slqgOT/ZnrEosAzxdeaA5POJFy6xC
-HZiVgsE7OVaPB9imWrrAYbKsHVLP2rdlhnGZQnnebmTYCll5SvXWCIr5vp4i1qxI
-a95QBU/xmEY6kTy9GjAOSmYXj7UnwnBZwgEop0yUdBMb4s9Gx8S6Yxaj1DZVyiyr
-zInBri9lqabkPLPQNaK7wTKN5zl7r5pSCsF8rl4R+mvcxyyYdS+cqseGjn98ubdd
-/vyQWqLbQtr5Njk4ROs5Rv6/2z/RUFdwsqB5aXztxOs3J7aJ5ScTgmoK+wkQY+te
-j6H5pgT02vKuXLwe4wHKKAYepgH7Azkm7XoFlHhBEUy+uUsIPMBm2Meo1JzOc8E5
-QqLX2YO/MDiZhI+NYOMJF0/huWqM7wIDAQABAoICAB3iqAH1rE3FPgptZ7PFdnd+
-okYJ4KUaSIhMEtWm3PPTBay+gK4hwL1j240sohDlvRolJVJ2KmOTBKlHuhpIIxOT
-MKrXhNEN2jRpproXpg7EJp6GL6ntIR6PNClJqOEaBvvQ1soyFtp67g2ZDSG34lyB
-cVVgVI7E07F+MP8IxaGqpu9J4n48wJeK/a3RXIi22KNv504Q44GyF2SpyCizbdCV
-oPxrm0I/QJfM+S+1Fz2doWEfLRkg+SBvVZg6sygQeBzb64xv5WbF3s2sPONrJeix
-2+KJDKD605ophR3h4jrzYmYFDH4K2xQ4RGOEeL0pOvfTS4kBa07z2mc8I4SLEbpi
-VzQblmftRvwye2eKk74GVhJho7Il6ssTL29TJxIyzEljVFrprILkmAVEV8SOn544
-pgSj6i7gDcav4OdhxldT6dk7PNqMq49p3acYzLtXCknlLkHOODEFH3BWP1oAWN6e
-m34kwPGFviKEIYkurWV0LGV9h/zLL3kxjdbgFyLY24jVbvWuJ9VeJkcHVgL3Rs1A
-5irHFpW9TSKYg+R8zLM50S5HRcnL0wV+hl02TcJbkjyVToFh5FeDdyIxN+sQnh+a
-b+g/IA+um2RbqjEUoaVxCdIo7/oPzzj0u8Pw5FvAedNM1a8sZiUJ/1CW7In8yRPC
-Nb5rONsL/eEHAJU9EWIBAoIBAQDnzEl7NUZ50wjqBTna69u9luxb6ixJM3o9asnY
-BXU90Bp7kl14WbHUmTPYGKoGIEKDmmKpA/LhGwD1vykjtHWkUcgGYYucXIBuzRb7
-hEcAa9qkce6Ik4HS8khRmmikNDu/t5zJU2tkgNuGVQlHvsjpJz/rdX857G5Cv8v7
-GSBL4aNxhp9OoMav3/XEUpRSoccR7WEAdfeTfiR4EgaIy864w4twxr2nLroB6SzN
-dYSPZ4hMkTS34ixzjO233QioAGiEnG22XyBA7DTB41EoRFIBcbPrCMqDONkNHbeO
-j25g4okNjK+7ihmIHZBP0awN+mlfNHnDXuJ6L2LCrxWHQQtHAoIBAQDjmS6h51/H
-gcBDUxot16M/7PPJZUbJ8y+qqPbaqu3ORADyvguE/Ww80we+8xeaOa5tVpVE6diZ
-tg5BfBGwltyCEwKDAG1g9/D3IVw3yE1AuxyG0+1q0/rTcdZl01PgGVwi+28YCLyO
-VxgyIvpGFwgQ9WV22l16JatyhsZLRIWFk78ECJO3v5X8KuCJLnKfcd9nkem9DXdS
-iKqiylOXzvIKGUe5HxeDd/itI8V8b8OTQQxM0jEwCOZQg1o1BNN0uEJo4dENkuYa
-dZyJFYe0ZsM5ZRm5HmcIYMlPejcYaINRX30TZHRNE/X+fCfrIwg0LmJxFVieFcc3
-Dc3ZU1K5T3UZAoIBAQDCAK3ji+RPY/xK+VLB0rOYSy/JggXMwmPl8XG79l14/aqc
-kBTg/JkkqZDFpWbwN6HTyawXb3GhS9uS0oqZEpl/jN8P0CZsGNN5aPd6TOysApj9
-F0ogTuzSY+t5OPWpsPns7L0xlzsD5AFXveZFgP1gfnQ2GqIAFcz22tXbc90fBVup
-UZYV1eRVIOnuhXsUachWTcno+9LZRDM7t0J2zbVX2HnlSsFCoo4VuVXBJEFtUKa4
-BrQLzyWLFIGFaF6tVaIkk1QT1iwFimxhdmLfg8bq5IJEDZiJGVQ4cQ3HKG6mchNp
-Hr2aBex2it/UnlUVYec9QotCpDCDORO8g5NOH3dTAoIBAQCJH9htqVYWgIESzvW9
-2ua7dAqpM0CEGeWFH8mik0s1c9lSZtfP51OYDdjj3dhBjjEeJQVIANAERCCwqYyK
-5UmzgLqJrO85dgiTDGFgJmkM7+7O+M5ZB6BeVn7C2KD3KrBNT0lupIzeAXFNe69o
-HSY5+W+JPSUGm72BAdxkqsL7aLm0N2qwUViPFlIztG1QzS25W7sEsSFL85VDAT1+
-ACvpk7OXwDjNd7G2tw+b2kZt6Mn9WsJR23rP1WO6/85ay00PncXzNKc4F9YY7YTW
-VveWE+h8lOmkrZN8M/kP1qAPncVgsqwzaCxUh/8Q9wlRTwT2dtLuY9ajv8hfAImd
-pla5AoIBAQCduiA3aZBDqGrUTH9bcMRtyU8RTlc2Y5+jFSBP4KfcS/ORoILVdvZz
-v+o0jw01keGAtW77Mq70ZCpc8HjOm8Ju8GYtwUGmPRS+hQaZwT8/QseF6q2Q+Bi5
-Wc0Lqa4YA0cI7XViJRhHIPfdV8YEEAW8rIAUqFSoAT6G7z/o0K4zlSa+2RbG0l1v
-zLWmJtF8OJfM8IboIyER0PHrWjNFzxKCJssu2WE7WT6/Rupus04XVXRR+Fb6DAGb
-yw2MpB3kLvjugQpolx4YbXE4n+F1mkqm9fHjo4fbfSwjmeFnPsRvRmiRTomHxq/s
-DUZ6eZM8TIlGhUrx/Y1TP0GQjKxDN6ZQ
------END PRIVATE KEY-----
diff --git a/test/integration/targets/elb_network_lb/meta/main.yml b/test/integration/targets/elb_network_lb/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/elb_network_lb/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/elb_network_lb/tasks/main.yml b/test/integration/targets/elb_network_lb/tasks/main.yml
deleted file mode 100644
index f2d77a67bb..0000000000
--- a/test/integration/targets/elb_network_lb/tasks/main.yml
+++ /dev/null
@@ -1,248 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: create certificate
- iam_cert:
- name: test_cert
- state: present
- cert: "{{ lookup('file', 'cert.pem') }}"
- key: "{{ lookup('file', 'key.pem') }}"
- <<: *aws_connection_info
- register: cert
-
- - name: create VPC
- ec2_vpc_net:
- cidr_block: 10.228.228.0/22
- name: "{{ resource_prefix }}_vpc"
- state: present
- <<: *aws_connection_info
- register: vpc
-
- - name: create internet gateway
- ec2_vpc_igw:
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- tags:
- Name: "{{ resource_prefix }}"
- <<: *aws_connection_info
- register: igw
-
- - name: create subnets
- ec2_vpc_subnet:
- cidr: "{{ item.cidr }}"
- az: "{{ aws_region}}{{ item.az }}"
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- tags:
- Created_By: "{{ resource_prefix }}"
- Public: "{{ item.public }}"
- <<: *aws_connection_info
- with_items:
- - cidr: 10.228.228.0/24
- az: "a"
- public: True
- - cidr: 10.228.229.0/24
- az: "b"
- public: True
- - cidr: 10.228.230.0/24
- az: "a"
- public: False
- - cidr: 10.228.231.0/24
- az: "b"
- public: False
- register: subnets
-
- - ec2_vpc_subnet_info:
- filters:
- vpc-id: "{{ vpc.vpc.id }}"
- <<: *aws_connection_info
- register: vpc_subnets
-
- - name: create list of subnet ids
- set_fact:
- nlb_subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public == `True`].id') }}"
- private_subnets: "{{ vpc_subnets|json_query('subnets[?tags.Public != `True`].id') }}"
-
- - name: create a route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- <<: *aws_connection_info
- tags:
- Name: igw-route
- Created: "{{ resource_prefix }}"
- subnets: "{{ nlb_subnets + private_subnets }}"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- register: route_table
-
- - ec2_group:
- name: "{{ resource_prefix }}"
- description: "security group for Ansible NLB integration tests"
- state: present
- vpc_id: "{{ vpc.vpc.id }}"
- rules:
- - proto: tcp
- from_port: 1
- to_port: 65535
- cidr_ip: 0.0.0.0/0
- - proto: all
- ports: 80
- cidr_ip: 10.228.228.0/22
- <<: *aws_connection_info
- register: sec_group
-
- - name: create a target group for testing
- elb_target_group:
- name: "{{ tg_name }}"
- protocol: tcp
- port: 80
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- <<: *aws_connection_info
- register: tg
-
- - name: create a target group for testing tcp_udp protocols
- elb_target_group:
- name: "{{ tg_tcpudp_name }}"
- protocol: tcp_udp
- port: 80
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- <<: *aws_connection_info
- register: tg_tcpudp
-
- - include_tasks: test_nlb_bad_listener_options.yml
- - include_tasks: test_nlb_tags.yml
- - include_tasks: test_creating_nlb.yml
- - include_tasks: test_nlb_with_asg.yml
- - include_tasks: test_modifying_nlb_listeners.yml
- - include_tasks: test_deleting_nlb.yml
-
- always:
-
- - name: destroy NLB
- elb_network_lb:
- name: "{{ nlb_name }}"
- state: absent
- wait: yes
- wait_timeout: 600
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: destroy target group if it was created
- elb_target_group:
- name: "{{ tg_name }}"
- protocol: tcp
- port: 80
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- wait: yes
- wait_timeout: 600
- <<: *aws_connection_info
- register: remove_tg
- retries: 5
- delay: 3
- until: remove_tg is success
- when: tg is defined
- ignore_errors: yes
-
- - name: destroy tcp_udp target group if it was created
- elb_target_group:
- name: "{{ tg_tcpudp_name }}"
- protocol: tcp_udp
- port: 80
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- wait: yes
- wait_timeout: 600
- <<: *aws_connection_info
- register: remove_tg
- retries: 5
- delay: 3
- until: remove_tg is success
- when: tg_tcpudp is defined
- ignore_errors: yes
-
- - name: destroy sec group
- ec2_group:
- name: "{{ sec_group.group_name }}"
- description: "security group for Ansible NLB integration tests"
- state: absent
- vpc_id: "{{ vpc.vpc.id }}"
- <<: *aws_connection_info
- register: remove_sg
- retries: 10
- delay: 5
- until: remove_sg is success
- ignore_errors: yes
-
- - name: remove route table
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- route_table_id: "{{ route_table.route_table.route_table_id }}"
- lookup: id
- state: absent
- <<: *aws_connection_info
- register: remove_rt
- retries: 10
- delay: 5
- until: remove_rt is success
- ignore_errors: yes
-
- - name: destroy subnets
- ec2_vpc_subnet:
- cidr: "{{ item.cidr }}"
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- <<: *aws_connection_info
- register: remove_subnet
- retries: 10
- delay: 5
- until: remove_subnet is success
- with_items:
- - cidr: 10.228.228.0/24
- - cidr: 10.228.229.0/24
- - cidr: 10.228.230.0/24
- - cidr: 10.228.231.0/24
- ignore_errors: yes
-
- - name: destroy internet gateway
- ec2_vpc_igw:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- Name: "{{ resource_prefix }}"
- state: absent
- <<: *aws_connection_info
- register: remove_igw
- retries: 10
- delay: 5
- until: remove_igw is success
- ignore_errors: yes
-
- - name: destroy VPC
- ec2_vpc_net:
- cidr_block: 10.228.228.0/22
- name: "{{ resource_prefix }}_vpc"
- state: absent
- <<: *aws_connection_info
- register: remove_vpc
- retries: 10
- delay: 5
- until: remove_vpc is success
- ignore_errors: yes
-
- - name: destroy certificate
- iam_cert:
- name: test_cert
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/elb_network_lb/tasks/test_creating_nlb.yml b/test/integration/targets/elb_network_lb/tasks/test_creating_nlb.yml
deleted file mode 100644
index b99af17b38..0000000000
--- a/test/integration/targets/elb_network_lb/tasks/test_creating_nlb.yml
+++ /dev/null
@@ -1,82 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: create NLB with listeners
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: present
- listeners:
- - Protocol: TCP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- - Protocol: TLS
- Port: 443
- Certificates:
- - CertificateArn: "{{ cert.arn }}"
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- - Protocol: UDP
- Port: 13
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_tcpudp_name }}"
- - Protocol: TCP_UDP
- Port: 17
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_tcpudp_name }}"
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - nlb.changed
- - nlb.listeners|length == 4
-
- - name: test idempotence creating NLB with listeners
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: present
- listeners:
- - Protocol: TCP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- - Protocol: TLS
- Port: 443
- Certificates:
- - CertificateArn: "{{ cert.arn }}"
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- - Protocol: UDP
- Port: 13
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_tcpudp_name }}"
- - Protocol: TCP_UDP
- Port: 17
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_tcpudp_name }}"
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - not nlb.changed
- - nlb.listeners|length == 4
diff --git a/test/integration/targets/elb_network_lb/tasks/test_deleting_nlb.yml b/test/integration/targets/elb_network_lb/tasks/test_deleting_nlb.yml
deleted file mode 100644
index 23d1d53588..0000000000
--- a/test/integration/targets/elb_network_lb/tasks/test_deleting_nlb.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: destroy NLB with listener
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: absent
- listeners:
- - Protocol: TCP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- wait: yes
- wait_timeout: 300
- register: nlb
-
- - assert:
- that:
- - nlb.changed
-
- - name: test idempotence
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: absent
- listeners:
- - Protocol: TCP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- wait: yes
- wait_timeout: 300
- register: nlb
-
- - assert:
- that:
- - not nlb.changed
diff --git a/test/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml b/test/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml
deleted file mode 100644
index 67ab99e870..0000000000
--- a/test/integration/targets/elb_network_lb/tasks/test_modifying_nlb_listeners.yml
+++ /dev/null
@@ -1,88 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: add a listener
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: present
- listeners:
- - Protocol: TCP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- - Protocol: TCP
- Port: 443
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - nlb.changed
- - nlb.listeners|length == 2
-
- - name: test an omitted listener will not be removed without purge_listeners
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: present
- purge_listeners: false
- listeners:
- - Protocol: TCP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - not nlb.changed
- - nlb.listeners|length == 2
-
- - name: remove the rule
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: present
- purge_listeners: true
- listeners:
- - Protocol: TCP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - nlb.changed
- - nlb.listeners|length == 1
-
- - name: remove listener from NLB
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: present
- listeners: []
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - nlb.changed
- - not nlb.listeners
diff --git a/test/integration/targets/elb_network_lb/tasks/test_nlb_bad_listener_options.yml b/test/integration/targets/elb_network_lb/tasks/test_nlb_bad_listener_options.yml
deleted file mode 100644
index 5372cae37c..0000000000
--- a/test/integration/targets/elb_network_lb/tasks/test_nlb_bad_listener_options.yml
+++ /dev/null
@@ -1,72 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: test creating an NLB with invalid listener options
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- #security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: TCP
- Port: 80
- Certificates: {'CertificateArn': 'test', 'IsDefault': 'True'}
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- ignore_errors: yes
- register: nlb
-
- - assert:
- that:
- - nlb is failed
- - "'unable to convert to list' in nlb.msg"
-
- - name: test creating an NLB without providing required listener options
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- #security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Port: 80
- <<: *aws_connection_info
- ignore_errors: yes
- register: nlb
-
- - assert:
- that:
- - nlb is failed
- - '"missing required arguments" in nlb.msg'
- - '"Protocol" in nlb.msg'
- - '"DefaultActions" in nlb.msg'
-
- - name: test creating an NLB providing an invalid listener option type
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- #security_groups: "{{ sec_group.group_id }}"
- state: present
- listeners:
- - Protocol: TCP
- Port: "bad type"
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}"
- <<: *aws_connection_info
- ignore_errors: yes
- register: nlb
-
- - assert:
- that:
- - nlb is failed
- - "'unable to convert to int' in nlb.msg"
diff --git a/test/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml b/test/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml
deleted file mode 100644
index 6b81e90c53..0000000000
--- a/test/integration/targets/elb_network_lb/tasks/test_nlb_tags.yml
+++ /dev/null
@@ -1,101 +0,0 @@
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: create NLB with no listeners
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: present
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - nlb.changed
-
- - name: re-create NLB with no listeners
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: present
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - not nlb.changed
-
- - name: add tags to NLB
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: present
- tags:
- created_by: "NLB test {{ resource_prefix }}"
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - nlb.changed
- - 'nlb.tags.created_by == "NLB test {{ resource_prefix }}"'
-
- - name: test tags are not removed if unspecified
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: present
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - not nlb.changed
- - 'nlb.tags.created_by == "NLB test {{ resource_prefix }}"'
-
- - name: remove tags from NLB
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: present
- tags: {}
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - nlb.changed
- - not nlb.tags
-
- - name: test idempotence
- elb_network_lb:
- name: "{{ nlb_name }}"
- subnets: "{{ nlb_subnets }}"
- state: present
- tags: {}
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - not nlb.changed
- - not nlb.tags
-
- - name: destroy NLB with no listeners
- elb_network_lb:
- name: "{{ nlb_name }}"
- state: absent
- <<: *aws_connection_info
- register: nlb
-
- - assert:
- that:
- - nlb.changed
diff --git a/test/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml b/test/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml
deleted file mode 100644
index f5005df6ea..0000000000
--- a/test/integration/targets/elb_network_lb/tasks/test_nlb_with_asg.yml
+++ /dev/null
@@ -1,90 +0,0 @@
-- block:
-
- # create instances
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - ec2_ami_info:
- <<: *aws_connection_info
- filters:
- architecture: x86_64
- virtualization-type: hvm
- root-device-type: ebs
- name: "amzn-ami-hvm*"
- register: amis
-
- - set_fact:
- latest_amazon_linux: "{{ amis.images | sort(attribute='creation_date') | last }}"
-
- - ec2_asg:
- <<: *aws_connection_info
- state: absent
- name: "{{ resource_prefix }}-webservers"
- wait_timeout: 900
-
- - ec2_lc:
- <<: *aws_connection_info
- name: "{{ resource_prefix }}-web-lcfg"
- state: absent
-
- - name: Create launch config for testing
- ec2_lc:
- <<: *aws_connection_info
- name: "{{ resource_prefix }}-web-lcfg"
- assign_public_ip: true
- image_id: "{{ latest_amazon_linux.image_id }}"
- security_groups: "{{ sec_group.group_id }}"
- instance_type: t2.micro
- user_data: |
- #!/bin/bash
- set -x
- yum update -y --nogpgcheck
- yum install -y --nogpgcheck httpd
- echo "Hello Ansiblings!" >> /var/www/html/index.html
- service httpd start
- volumes:
- - device_name: /dev/xvda
- volume_size: 10
- volume_type: gp2
- delete_on_termination: true
-
- - name: Create autoscaling group for app server fleet
- ec2_asg:
- <<: *aws_connection_info
- name: "{{ resource_prefix }}-webservers"
- vpc_zone_identifier: "{{ nlb_subnets }}"
- launch_config_name: "{{ resource_prefix }}-web-lcfg"
- termination_policies:
- - OldestLaunchConfiguration
- - Default
- health_check_period: 600
- health_check_type: EC2
- replace_all_instances: true
- min_size: 0
- max_size: 2
- desired_capacity: 1
- wait_for_instances: true
- target_group_arns:
- - "{{ tg.target_group_arn }}"
-
- always:
-
- - ec2_asg:
- <<: *aws_connection_info
- state: absent
- name: "{{ resource_prefix }}-webservers"
- wait_timeout: 900
- ignore_errors: yes
-
- - ec2_lc:
- <<: *aws_connection_info
- name: "{{ resource_prefix }}-web-lcfg"
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/elb_target/aliases b/test/integration/targets/elb_target/aliases
deleted file mode 100644
index db6a9b06d8..0000000000
--- a/test/integration/targets/elb_target/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/aws
-elb_target_group
-shippable/aws/group4
-unstable
diff --git a/test/integration/targets/elb_target/playbooks/full_test.yml b/test/integration/targets/elb_target/playbooks/full_test.yml
deleted file mode 100644
index 18657f8f27..0000000000
--- a/test/integration/targets/elb_target/playbooks/full_test.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-- hosts: localhost
- connection: local
- environment: "{{ ansible_test.environment }}"
-
- roles:
- - elb_lambda_target
- - elb_target
diff --git a/test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/defaults/main.yml b/test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/defaults/main.yml
deleted file mode 100644
index a28253eb35..0000000000
--- a/test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/defaults/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-resource_shortprefix: 'ansible-test-{{ resource_prefix | regex_search("([0-9]+)$") }}'
-lambda_role_name: '{{ resource_shortprefix }}-elb-target-lambda'
-#lambda_role_name: '{{ resource_prefix }}-elb-target-lambda'
-lambda_name: '{{ resource_prefix }}-elb-target-lambda'
-elb_target_group_name: '{{ resource_shortprefix }}-elb-tg'
diff --git a/test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/ansible_lambda_target.py b/test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/ansible_lambda_target.py
deleted file mode 100644
index 3ea22472e9..0000000000
--- a/test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/ansible_lambda_target.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-import json
-
-
-def lambda_handler(event, context):
- return {
- 'statusCode': 200,
- 'body': json.dumps('Hello from Lambda!')
- }
diff --git a/test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/assume-role.json b/test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/assume-role.json
deleted file mode 100644
index 06456f7996..0000000000
--- a/test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/files/assume-role.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": {
- "Effect": "Allow",
- "Principal": { "Service": "lambda.amazonaws.com" },
- "Action": "sts:AssumeRole"
- }
-}
diff --git a/test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/tasks/main.yml b/test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/tasks/main.yml
deleted file mode 100644
index 4fc3d5e2d8..0000000000
--- a/test/integration/targets/elb_target/playbooks/roles/elb_lambda_target/tasks/main.yml
+++ /dev/null
@@ -1,126 +0,0 @@
----
-- name: set up lambda as elb_target
-
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
-
- block:
- - name: create zip to deploy lambda code
- archive:
- path: "{{ role_path }}/files/ansible_lambda_target.py"
- dest: /tmp/lambda.zip
- format: zip
-
- - name: "create or update service-role for lambda"
- iam_role:
- name: '{{ lambda_role_name }}'
- assume_role_policy_document: "{{ lookup('file', role_path + '/files/assume-role.json') }}"
- managed_policy:
- - arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
- register: ROLE_ARN
-
- - name: when it is too fast, the role is not usable.
- pause:
- seconds: 10
-
- - name: deploy lambda.zip to ansible_lambda_target function
- lambda:
- name: "{{ lambda_name }}"
- state: present
- zip_file: "/tmp/lambda.zip"
- runtime: "python3.7"
- role: "{{ ROLE_ARN.arn }}"
- handler: "ansible_lambda_target.lambda_handler"
- timeout: 30
- register: lambda_function
- retries: 3
- delay: 15
- until: lambda_function.changed
-
- - name: create empty target group
- elb_target_group:
- name: '{{ elb_target_group_name }}'
- target_type: lambda
- state: present
- modify_targets: False
- register: elb_target_group
-
- - name: tg is created, state must be changed
- assert:
- that:
- - elb_target_group.changed
-
- - name: allow elb to invoke the lambda function
- lambda_policy:
- state: present
- function_name: "{{ lambda_name }}"
- version: "{{ lambda_function.configuration.version }}"
- statement_id: elb1
- action: lambda:InvokeFunction
- principal: elasticloadbalancing.amazonaws.com
- source_arn: "{{ elb_target_group.target_group_arn }}"
-
- - name: add lambda to elb target
- elb_target_group:
- name: "{{ elb_target_group_name }}"
- target_type: lambda
- state: present
- targets:
- - Id: "{{ lambda_function.configuration.function_arn }}"
- register: elb_target_group
-
- - name: target is updated, state must be changed
- assert:
- that:
- - elb_target_group.changed
-
- - name: re-add lambda to elb target (idempotency)
- elb_target_group:
- name: "{{ elb_target_group_name }}"
- target_type: lambda
- state: present
- targets:
- - Id: "{{ lambda_function.configuration.function_arn }}"
- register: elb_target_group
-
- - name: target is still the same, state must not be changed (idempotency)
- assert:
- that:
- - not elb_target_group.changed
-
- - name: remove lambda target from target group
- elb_target_group:
- name: "{{ elb_target_group_name }}"
- target_type: lambda
- state: absent
- targets: []
- register: elb_target_group
-
- - name: target is still the same, state must not be changed (idempotency)
- assert:
- that:
- - elb_target_group.changed
-
- always:
- - name: remove elb target group
- elb_target_group:
- name: "{{ elb_target_group_name }}"
- target_type: lambda
- state: absent
- ignore_errors: yes
-
- - name: remove lambda function
- lambda:
- name: "{{ lambda_name }}"
- state: absent
- ignore_errors: yes
-
- - name: remove iam role for lambda
- iam_role:
- name: '{{ lambda_role_name }}'
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/elb_target/playbooks/roles/elb_target/defaults/main.yml b/test/integration/targets/elb_target/playbooks/roles/elb_target/defaults/main.yml
deleted file mode 100644
index 731c84d618..0000000000
--- a/test/integration/targets/elb_target/playbooks/roles/elb_target/defaults/main.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-ec2_ami_name: 'amzn2-ami-hvm-2.0.20190612-x86_64-gp2'
-
-resource_shortprefix: 'ansible-test-{{ resource_prefix | regex_search("([0-9]+)$") }}'
-tg_name: "{{ resource_shortprefix }}-tg"
-tg_tcpudp_name: "{{ resource_shortprefix }}-tgtcpudp"
-lb_name: "{{ resource_shortprefix }}-lb"
-
-healthy_state:
- state: 'healthy'
diff --git a/test/integration/targets/elb_target/playbooks/roles/elb_target/tasks/main.yml b/test/integration/targets/elb_target/playbooks/roles/elb_target/tasks/main.yml
deleted file mode 100644
index 9f57070f13..0000000000
--- a/test/integration/targets/elb_target/playbooks/roles/elb_target/tasks/main.yml
+++ /dev/null
@@ -1,482 +0,0 @@
----
- - name: set up elb_target test prerequisites
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
-
- block:
-
- # ============================================================
-
- - name:
- debug: msg="********** Setting up elb_target test dependencies **********"
-
- # ============================================================
- - name: Find AMI to use
- ec2_ami_info:
- owners: 'amazon'
- filters:
- name: '{{ ec2_ami_name }}'
- register: ec2_amis
- - set_fact:
- ec2_ami_image: '{{ ec2_amis.images[0].image_id }}'
-
-
- - name: set up testing VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: present
- cidr_block: 20.0.0.0/16
- tags:
- Name: "{{ resource_prefix }}-vpc"
- Description: "Created by ansible-test"
- register: vpc
-
- - name: set up testing internet gateway
- ec2_vpc_igw:
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- register: igw
-
- - name: set up testing subnet
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ vpc.vpc.id }}"
- cidr: 20.0.0.0/18
- az: "{{ aws_region }}a"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet"
- register: subnet_1
-
- - name: set up testing subnet
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ vpc.vpc.id }}"
- cidr: 20.0.64.0/18
- az: "{{ aws_region }}b"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet"
- register: subnet_2
-
- - name: create routing rules
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- created: "{{ resource_prefix }}-route"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- subnets:
- - "{{ subnet_1.subnet.id }}"
- - "{{ subnet_2.subnet.id }}"
- register: route_table
-
- - name: create testing security group
- ec2_group:
- name: "{{ resource_prefix }}-sg"
- description: a security group for ansible tests
- vpc_id: "{{ vpc.vpc.id }}"
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- register: sg
-
- - name: set up testing target group (type=instance)
- elb_target_group:
- name: "{{ tg_name }}"
- health_check_port: 80
- protocol: http
- port: 80
- vpc_id: '{{ vpc.vpc.id }}'
- state: present
- target_type: instance
- tags:
- Description: "Created by {{ resource_prefix }}"
-
- - name: set up testing target group (type=instance) with UDP protocol
- elb_target_group:
- name: "{{ tg_tcpudp_name }}"
- protocol: udp
- port: 53
- vpc_id: '{{ vpc.vpc.id }}'
- state: present
- target_type: instance
- tags:
- Protocol: "UDP"
- Description: "Created by {{ resource_prefix }}"
-
- - name: set up testing target group for ALB (type=instance)
- elb_target_group:
- name: "{{ tg_name }}-used"
- health_check_port: 80
- protocol: http
- port: 80
- vpc_id: '{{ vpc.vpc.id }}'
- state: present
- target_type: instance
- tags:
- Description: "Created by {{ resource_prefix }}"
-
- - name: set up ec2 instance to use as a target
- ec2:
- group_id: "{{ sg.group_id }}"
- instance_type: t3.micro
- image: "{{ ec2_ami_image }}"
- vpc_subnet_id: "{{ subnet_2.subnet.id }}"
- instance_tags:
- Name: "{{ resource_prefix }}-inst"
- exact_count: 1
- count_tag:
- Name: "{{ resource_prefix }}-inst"
- assign_public_ip: true
- volumes: []
- wait: true
- ebs_optimized: false
- user_data: |
- #cloud-config
- package_upgrade: true
- package_update: true
- packages:
- - httpd
- runcmd:
- - "service httpd start"
- - echo "HELLO ANSIBLE" > /var/www/html/index.html
- register: ec2
-
- - name: create an application load balancer
- elb_application_lb:
- name: "{{ lb_name }}"
- security_groups:
- - "{{ sg.group_id }}"
- subnets:
- - "{{ subnet_1.subnet.id }}"
- - "{{ subnet_2.subnet.id }}"
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}-used"
- state: present
-
- # ============================================================
-
- - name:
- debug: msg="********** Running elb_target integration tests **********"
-
- # ============================================================
-
- - name: register an instance to unused target group
- elb_target:
- target_group_name: "{{ tg_name }}"
- target_id: "{{ ec2.instance_ids[0] }}"
- state: present
- register: result
-
- - name: target is registered
- assert:
- that:
- - result.changed
- - result.target_group_arn
- - result.target_health_descriptions.target.id == ec2.instance_ids[0]
-
- # ============================================================
-
- - name: test idempotence
- elb_target:
- target_group_name: "{{ tg_name }}"
- target_id: "{{ ec2.instance_ids[0] }}"
- state: present
- register: result
-
- - name: target was already registered
- assert:
- that:
- - not result.changed
-
- # ============================================================
-
- - name: remove an unused target
- elb_target:
- target_group_name: "{{ tg_name }}"
- target_id: "{{ ec2.instance_ids[0] }}"
- state: absent
- deregister_unused: true
- register: result
-
- - name: target group was deleted
- assert:
- that:
- - result.changed
- - not result.target_health_descriptions
-
- # ============================================================
-
- - name: register an instance to used target group and wait until healthy
- elb_target:
- target_group_name: "{{ tg_name }}-used"
- target_id: "{{ ec2.instance_ids[0] }}"
- state: present
- target_status: healthy
- target_status_timeout: 400
- register: result
-
- - name: target is registered
- assert:
- that:
- - result.changed
- - result.target_group_arn
- - result.target_health_descriptions.target.id == ec2.instance_ids[0]
- - result.target_health_descriptions.target_health == healthy_state
-
- # ============================================================
-
- - name: remove a target from used target group
- elb_target:
- target_group_name: "{{ tg_name }}-used"
- target_id: "{{ ec2.instance_ids[0] }}"
- state: absent
- target_status: unused
- target_status_timeout: 400
- register: result
-
- - name: target was deregistered
- assert:
- that:
- - result.changed
-
- # ============================================================
-
- - name: test idempotence
- elb_target:
- target_group_name: "{{ tg_name }}-used"
- target_id: "{{ ec2.instance_ids[0] }}"
- state: absent
- register: result
-
- - name: target was already deregistered
- assert:
- that:
- - not result.changed
-
- # ============================================================
-
- - name: register an instance to used target group and wait until healthy again to test deregistering differently
- elb_target:
- target_group_name: "{{ tg_name }}-used"
- target_id: "{{ ec2.instance_ids[0] }}"
- state: present
- target_status: healthy
- target_status_timeout: 400
- register: result
-
- - name: target is registered
- assert:
- that:
- - result.changed
- - result.target_group_arn
- - result.target_health_descriptions.target.id == ec2.instance_ids[0]
- - result.target_health_descriptions.target_health == healthy_state
-
- - name: start deregisteration but don't wait
- elb_target:
- target_group_name: "{{ tg_name }}-used"
- target_id: "{{ ec2.instance_ids[0] }}"
- state: absent
- register: result
-
- - name: target is starting to deregister
- assert:
- that:
- - result.changed
- - result.target_health_descriptions.target_health.reason == "Target.DeregistrationInProgress"
-
- - name: now wait for target to finish deregistering
- elb_target:
- target_group_name: "{{ tg_name }}-used"
- target_id: "{{ ec2.instance_ids[0] }}"
- state: absent
- target_status: unused
- target_status_timeout: 400
- register: result
-
- - name: target was deregistered already and now has finished
- assert:
- that:
- - not result.changed
- - not result.target_health_descriptions
-
- # ============================================================
-
- always:
-
- - name:
- debug: msg="********** Tearing down elb_target test dependencies **********"
-
- - name: remove ec2 instance
- ec2:
- group_id: "{{ sg.group_id }}"
- instance_type: t2.micro
- image: "{{ ec2_ami_image }}"
- vpc_subnet_id: "{{ subnet_2.subnet.id }}"
- instance_tags:
- Name: "{{ resource_prefix }}-inst"
- exact_count: 0
- count_tag:
- Name: "{{ resource_prefix }}-inst"
- assign_public_ip: true
- volumes: []
- wait: true
- ebs_optimized: false
- ignore_errors: true
-
- - name: remove testing target groups
- elb_target_group:
- name: "{{ item }}"
- health_check_port: 80
- protocol: http
- port: 80
- vpc_id: '{{ vpc.vpc.id }}'
- state: absent
- target_type: instance
- tags:
- Description: "Created by {{ resource_prefix }}"
- wait: true
- wait_timeout: 400
- register: removed
- retries: 10
- until: removed is not failed
- with_items:
- - "{{ tg_name }}"
- - "{{ tg_name }}-used"
- ignore_errors: true
-
- - name: remove udp testing target groups
- elb_target_group:
- name: "{{ item }}"
- protocol: udp
- port: 53
- vpc_id: '{{ vpc.vpc.id }}'
- state: absent
- target_type: instance
- tags:
- Description: "Created by {{ resource_prefix }}"
- Protocol: "UDP"
- wait: true
- wait_timeout: 400
- register: removed
- retries: 10
- until: removed is not failed
- with_items:
- - "{{ tg_tcpudp_name }}"
- ignore_errors: true
-
- - name: remove application load balancer
- elb_application_lb:
- name: "{{ lb_name }}"
- security_groups:
- - "{{ sg.group_id }}"
- subnets:
- - "{{ subnet_1.subnet.id }}"
- - "{{ subnet_2.subnet.id }}"
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}-used"
- state: absent
- wait: true
- wait_timeout: 400
- register: removed
- retries: 10
- until: removed is not failed
- ignore_errors: true
-
- - name: remove testing security group
- ec2_group:
- state: absent
- name: "{{ resource_prefix }}-sg"
- description: a security group for ansible tests
- vpc_id: "{{ vpc.vpc.id }}"
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- register: removed
- retries: 10
- until: removed is not failed
- ignore_errors: true
-
- - name: remove routing rules
- ec2_vpc_route_table:
- state: absent
- lookup: id
- route_table_id: "{{ route_table.route_table.id }}"
- register: removed
- retries: 10
- until: removed is not failed
- ignore_errors: true
-
- - name: remove testing subnet
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ vpc.vpc.id }}"
- cidr: 20.0.0.0/18
- az: "{{ aws_region }}a"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet"
- register: removed
- retries: 10
- until: removed is not failed
- ignore_errors: true
-
- - name: remove testing subnet
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ vpc.vpc.id }}"
- cidr: 20.0.64.0/18
- az: "{{ aws_region }}b"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet"
- register: removed
- retries: 10
- until: removed is not failed
- ignore_errors: true
-
- - name: remove testing internet gateway
- ec2_vpc_igw:
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- register: removed
- retries: 10
- until: removed is not failed
- ignore_errors: true
-
- - name: remove testing VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: absent
- cidr_block: 20.0.0.0/16
- tags:
- Name: "{{ resource_prefix }}-vpc"
- Description: "Created by ansible-test"
- register: removed
- retries: 10
- until: removed is not failed
-
- # ============================================================
diff --git a/test/integration/targets/elb_target/playbooks/version_fail.yml b/test/integration/targets/elb_target/playbooks/version_fail.yml
deleted file mode 100644
index 43cdba8500..0000000000
--- a/test/integration/targets/elb_target/playbooks/version_fail.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-- hosts: localhost
- connection: local
- environment: "{{ ansible_test.environment }}"
-
- tasks:
- - name: set up aws connection info
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- - name: set up testing target group (type=ip)
- elb_target_group:
- state: present
- #name: "{{ resource_shortprefix }}-tg"
- name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-tg"
- health_check_port: 80
- protocol: http
- port: 80
- vpc_id: 'vpc-abcd1234'
- target_type: ip
- tags:
- Description: "Created by {{ resource_prefix }}"
- register: elb_target_group_type_ip
- ignore_errors: yes
-
- - name: check that setting up target group with type=ip fails with friendly message
- assert:
- that:
- - elb_target_group_type_ip is failed
- - "'msg' in elb_target_group_type_ip"
-
- # In the off-chance that this went (partially) through when it shouldn't...
- always:
- - name: Remove testing target group (type=ip)
- elb_target_group:
- state: absent
- #name: "{{ resource_shortprefix }}-tg"
- name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-tg"
diff --git a/test/integration/targets/elb_target/runme.sh b/test/integration/targets/elb_target/runme.sh
deleted file mode 100755
index e379f24b74..0000000000
--- a/test/integration/targets/elb_target/runme.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-
-# Test graceful failure for older versions of botocore
-source virtualenv.sh
-pip install 'botocore<=1.7.1' boto3
-ansible-playbook -i ../../inventory -v playbooks/version_fail.yml "$@"
-
-# Run full test suite
-source virtualenv.sh
-pip install 'botocore>=1.8.0' boto3
-ansible-playbook -i ../../inventory -v playbooks/full_test.yml "$@"
diff --git a/test/integration/targets/elb_target_info/aliases b/test/integration/targets/elb_target_info/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/elb_target_info/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/elb_target_info/playbooks/full_test.yml b/test/integration/targets/elb_target_info/playbooks/full_test.yml
deleted file mode 100644
index 2094252758..0000000000
--- a/test/integration/targets/elb_target_info/playbooks/full_test.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-- hosts: localhost
- connection: local
- environment: "{{ ansible_test.environment }}"
-
- roles:
- - elb_target_info
diff --git a/test/integration/targets/elb_target_info/playbooks/roles/elb_target_info/defaults/main.yml b/test/integration/targets/elb_target_info/playbooks/roles/elb_target_info/defaults/main.yml
deleted file mode 100644
index 4420a8d51b..0000000000
--- a/test/integration/targets/elb_target_info/playbooks/roles/elb_target_info/defaults/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-ec2_ami_name: 'amzn2-ami-hvm-2.0.20190612-x86_64-gp2'
-
-tg_name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-tg"
-lb_name: "ansible-test-{{ resource_prefix | regex_search('([0-9]+)$') }}-lb"
diff --git a/test/integration/targets/elb_target_info/playbooks/roles/elb_target_info/tasks/main.yml b/test/integration/targets/elb_target_info/playbooks/roles/elb_target_info/tasks/main.yml
deleted file mode 100644
index 6faa2abb00..0000000000
--- a/test/integration/targets/elb_target_info/playbooks/roles/elb_target_info/tasks/main.yml
+++ /dev/null
@@ -1,505 +0,0 @@
----
- - name: set up elb_target_info test prerequisites
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
-
- block:
-
- # ============================================================
-
- - name:
- debug: msg="********** Setting up elb_target_info test dependencies **********"
-
- - name: Find AMI to use
- ec2_ami_info:
- owners: 'amazon'
- filters:
- name: '{{ ec2_ami_name }}'
- register: ec2_amis
- - set_fact:
- ec2_ami_image: '{{ ec2_amis.images[0].image_id }}'
-
- # ============================================================
-
- - name: set up testing VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: present
- cidr_block: 20.0.0.0/16
- tags:
- Name: "{{ resource_prefix }}-vpc"
- Description: "Created by ansible-test"
- register: vpc
-
- - name: set up testing internet gateway
- ec2_vpc_igw:
- vpc_id: "{{ vpc.vpc.id }}"
- state: present
- register: igw
-
- - name: set up testing subnet
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ vpc.vpc.id }}"
- cidr: 20.0.0.0/18
- az: "{{ aws_region }}a"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet"
- register: subnet_1
-
- - name: set up testing subnet
- ec2_vpc_subnet:
- state: present
- vpc_id: "{{ vpc.vpc.id }}"
- cidr: 20.0.64.0/18
- az: "{{ aws_region }}b"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet"
- register: subnet_2
-
- - name: create routing rules
- ec2_vpc_route_table:
- vpc_id: "{{ vpc.vpc.id }}"
- tags:
- created: "{{ resource_prefix }}-route"
- routes:
- - dest: 0.0.0.0/0
- gateway_id: "{{ igw.gateway_id }}"
- subnets:
- - "{{ subnet_1.subnet.id }}"
- - "{{ subnet_2.subnet.id }}"
- register: route_table
-
- - name: create testing security group
- ec2_group:
- name: "{{ resource_prefix }}-sg"
- description: a security group for ansible tests
- vpc_id: "{{ vpc.vpc.id }}"
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- register: sg
-
- - name: set up testing target group (type=instance)
- register: alb_target_group
- elb_target_group:
- name: "{{ tg_name }}-inst"
- health_check_port: 80
- protocol: http
- port: 80
- vpc_id: '{{ vpc.vpc.id }}'
- state: present
- target_type: instance
- # set this to 30 to test polling for changes, instead of having everything go out immediately
- deregistration_delay_timeout: 30
- tags:
- Description: "Created by {{ resource_prefix }}"
-
- - name: set up testing target group (type=ip)
- register: nlb_target_group
- elb_target_group:
- name: "{{ tg_name }}-ip"
- health_check_port: 80
- protocol: tcp
- port: 80
- vpc_id: '{{ vpc.vpc.id }}'
- state: present
- # set this to 30 to test polling for changes, instead of having everything go out immediately
- deregistration_delay_timeout: 30
- target_type: ip
- tags:
- Description: "Created by {{ resource_prefix }}"
-
- - name: set up testing target group which will not be associated with any load balancers
- register: idle_target_group
- elb_target_group:
- name: "{{ tg_name }}-idle"
- health_check_port: 80
- protocol: tcp
- port: 80
- vpc_id: '{{ vpc.vpc.id }}'
- state: present
- target_type: instance
- tags:
- Description: "Created by {{ resource_prefix }}"
-
- - name: set up ec2 instance to use as a target
- ec2:
- group_id: "{{ sg.group_id }}"
- instance_type: t2.micro
- image: "{{ ec2_ami_image }}"
- vpc_subnet_id: "{{ subnet_2.subnet.id }}"
- instance_tags:
- Name: "{{ resource_prefix }}-inst"
- exact_count: 1
- count_tag:
- Name: "{{ resource_prefix }}-inst"
- assign_public_ip: true
- volumes: []
- wait: true
- ebs_optimized: false
- user_data: |
- #cloud-config
- package_upgrade: true
- package_update: true
- packages:
- - httpd
- runcmd:
- - "service httpd start"
- - echo "HELLO ANSIBLE" > /var/www/html/index.html
- register: ec2
-
- - name: create an application load balancer
- elb_application_lb:
- name: "{{ lb_name }}-alb"
- security_groups:
- - "{{ sg.group_id }}"
- subnets:
- - "{{ subnet_1.subnet.id }}"
- - "{{ subnet_2.subnet.id }}"
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}-inst"
- state: present
-
-
- - name: create a network load balancer
- elb_network_lb:
- name: "{{ lb_name }}-nlb"
- subnets:
- - "{{ subnet_1.subnet.id }}"
- - "{{ subnet_2.subnet.id }}"
- listeners:
- - Protocol: TCP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}-ip"
- state: present
-
- - name: register with the ALB
- elb_target:
- target_group_name: "{{ tg_name }}-inst"
- target_id: "{{ ec2.instance_ids[0] }}"
- state: present
- target_status: "initial"
-
- - name: register with the NLB IP target group
- elb_target:
- target_group_name: "{{ tg_name }}-ip"
- target_id: "{{ ec2.instances[0].private_ip }}"
- state: present
- target_status: "initial"
-
- # ============================================================
-
- - debug: msg="********** Running elb_target_info integration tests **********"
-
- # ============================================================
- - name: gather facts
- elb_target_info:
- instance_id: "{{ ec2.instance_ids[0]}}"
- register: target_facts
-
- - assert:
- that:
- - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - "{{ idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - (target_facts.instance_target_groups | length) == 2
- msg: "target facts showed the target in the right target groups"
-
-
- - name: register with unused target group
- elb_target:
- target_group_name: "{{ tg_name }}-idle"
- target_id: "{{ ec2.instance_ids[0]}}"
- state: present
- target_status: "unused"
-
- - name: gather facts again, including the idle group
- elb_target_info:
- instance_id: "{{ ec2.instance_ids[0]}}"
- register: target_facts
-
- - assert:
- that:
- - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - "{{ idle_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - (target_facts.instance_target_groups | length) == 3
- msg: "target facts reflected the addition of the target to the idle group"
-
- - name: gather facts again, this time excluding the idle group
- elb_target_info:
- instance_id: "{{ ec2.instance_ids[0]}}"
- get_unused_target_groups: false
- register: target_facts
-
- - assert:
- that:
- - "{{ alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - "{{ nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - "{{ idle_target_group.target_group_arn not in (target_facts.instance_target_groups | map(attribute='target_group_arn')) }}"
- - (target_facts.instance_target_groups | length) == 2
- msg: "target_facts.instance_target_groups did not gather unused target groups when variable was set"
-
- - name: register twice in the same target group
- elb_target:
- target_group_name: "{{ tg_name }}-ip"
- target_port: 22
- target_id: "{{ ec2.instances[0].private_ip }}"
- state: present
- target_status: "healthy"
- target_status_timeout: 400
-
- - name: gather facts
- elb_target_info:
- instance_id: "{{ ec2.instance_ids[0] }}"
- get_unused_target_groups: false
- register: target_facts
-
- - assert:
- that:
- - alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))
- - nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))
- - (target_facts.instance_target_groups | length) == 2
- - (target_facts.instance_target_groups |
- selectattr('target_group_arn', 'equalto', nlb_target_group.target_group_arn) |
- map(attribute='targets') |
- flatten |
- list |
- length) == 2
- msg: "registering a target twice didn't affect the overall target group count, increased target count"
-
- - set_fact:
- original_target_groups: "{{ target_facts.instance_target_groups }}"
-
- - name: Deregister instance from all target groups
- elb_target:
- target_group_arn: "{{ item.0.target_group_arn }}"
- target_port: "{{ item.1.target_port }}"
- target_az: "{{ item.1.target_az }}"
- target_id: "{{ item.1.target_id }}"
- state: absent
- target_status: "draining"
- with_subelements:
- - "{{ original_target_groups }}"
- - "targets"
-
- - name: wait for all targets to deregister simultaneously
- elb_target_info:
- get_unused_target_groups: false
- instance_id: "{{ ec2.instance_ids[0] }}"
- register: target_facts
- until: (target_facts.instance_target_groups | length) == 0
- retries: 60
- delay: 10
-
- - name: reregister in elbv2s
- elb_target:
- target_group_arn: "{{ item.0.target_group_arn }}"
- target_port: "{{ item.1.target_port }}"
- target_az: "{{ item.1.target_az }}"
- target_id: "{{ item.1.target_id }}"
- state: present
- target_status: "initial"
- with_subelements:
- - "{{ original_target_groups }}"
- - "targets"
-
- # wait until all groups associated with this instance are 'healthy' or
- # 'unused'
- - name: wait for registration
- elb_target_info:
- get_unused_target_groups: false
- instance_id: "{{ ec2.instance_ids[0] }}"
- register: target_facts
- until: >
- (target_facts.instance_target_groups |
- map(attribute='targets') |
- flatten |
- map(attribute='target_health') |
- rejectattr('state', 'equalto', 'healthy') |
- rejectattr('state', 'equalto', 'unused') |
- list |
- length) == 0
- retries: 61
- delay: 10
-
- - assert:
- that:
- - alb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))
- - nlb_target_group.target_group_arn in (target_facts.instance_target_groups | map(attribute='target_group_arn'))
- - (target_facts.instance_target_groups | length) == 2
- - (target_facts.instance_target_groups |
- selectattr('target_group_arn', 'equalto', nlb_target_group.target_group_arn) |
- map(attribute='targets') |
- flatten |
- list |
- length) == 2
- msg: "reregistration completed successfully"
-
- always:
-
- - name:
- debug: msg="********** Tearing down elb_target_info test dependencies **********"
-
- - name: remove ec2 instance
- ec2:
- group_id: "{{ sg.group_id }}"
- instance_type: t2.micro
- image: "{{ ec2_ami_image }}"
- vpc_subnet_id: "{{ subnet_2.subnet.id }}"
- instance_tags:
- Name: "{{ resource_prefix }}-inst"
- exact_count: 0
- count_tag:
- Name: "{{ resource_prefix }}-inst"
- assign_public_ip: true
- volumes: []
- wait: true
- ebs_optimized: false
- ignore_errors: true
-
- - name: remove application load balancer
- elb_application_lb:
- name: "{{ lb_name }}-alb"
- security_groups:
- - "{{ sg.group_id }}"
- subnets:
- - "{{ subnet_1.subnet.id }}"
- - "{{ subnet_2.subnet.id }}"
- listeners:
- - Protocol: HTTP
- Port: 80
- DefaultActions:
- - Type: forward
- TargetGroupName: "{{ tg_name }}-inst"
- state: absent
- wait: true
- wait_timeout: 200
- register: removed
- retries: 10
- until: removed is not failed
- ignore_errors: true
-
- - name: remove NLB
- ignore_errors: true
- elb_network_lb:
- name: "{{ lb_name }}-nlb"
- state: absent
-
- - name: remove testing target groups
- elb_target_group:
- name: "{{ item }}"
- health_check_port: 80
- protocol: http
- port: 80
- vpc_id: '{{ vpc.vpc.id }}'
- state: absent
- target_type: instance
- tags:
- Description: "Created by {{ resource_prefix }}"
- wait: true
- wait_timeout: 200
- register: removed
- retries: 10
- until: removed is not failed
- with_items:
- - "{{ tg_name }}-idle"
- - "{{ tg_name }}-ip"
- - "{{ tg_name }}-inst"
- ignore_errors: true
-
- - name: remove testing security group
- ec2_group:
- state: absent
- name: "{{ resource_prefix }}-sg"
- description: a security group for ansible tests
- vpc_id: "{{ vpc.vpc.id }}"
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- register: removed
- retries: 10
- until: removed is not failed
- ignore_errors: true
-
- - name: remove routing rules
- ec2_vpc_route_table:
- state: absent
- lookup: id
- route_table_id: "{{ route_table.route_table.id }}"
- register: removed
- retries: 10
- until: removed is not failed
- ignore_errors: true
-
- - name: remove testing subnet
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ vpc.vpc.id }}"
- cidr: 20.0.0.0/18
- az: "{{ aws_region }}a"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet"
- register: removed
- retries: 10
- until: removed is not failed
- ignore_errors: true
-
- - name: remove testing subnet
- ec2_vpc_subnet:
- state: absent
- vpc_id: "{{ vpc.vpc.id }}"
- cidr: 20.0.64.0/18
- az: "{{ aws_region }}b"
- resource_tags:
- Name: "{{ resource_prefix }}-subnet"
- register: removed
- retries: 10
- until: removed is not failed
- ignore_errors: true
-
- - name: remove testing internet gateway
- ec2_vpc_igw:
- vpc_id: "{{ vpc.vpc.id }}"
- state: absent
- register: removed
- retries: 10
- until: removed is not failed
- ignore_errors: true
-
- - name: remove testing VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: absent
- cidr_block: 20.0.0.0/16
- tags:
- Name: "{{ resource_prefix }}-vpc"
- Description: "Created by ansible-test"
- register: removed
- retries: 10
- until: removed is not failed
-
- # ============================================================
diff --git a/test/integration/targets/elb_target_info/runme.sh b/test/integration/targets/elb_target_info/runme.sh
deleted file mode 100755
index 33d2b8d0fb..0000000000
--- a/test/integration/targets/elb_target_info/runme.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-
-ansible-playbook -i ../../inventory -v playbooks/full_test.yml "$@"
diff --git a/test/integration/targets/iam_group/aliases b/test/integration/targets/iam_group/aliases
deleted file mode 100644
index 67ae2cc73b..0000000000
--- a/test/integration/targets/iam_group/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-unsupported
-cloud/aws
diff --git a/test/integration/targets/iam_group/defaults/main.yml b/test/integration/targets/iam_group/defaults/main.yml
deleted file mode 100644
index f5112b1a42..0000000000
--- a/test/integration/targets/iam_group/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-test_user: '{{ resource_prefix }}-user'
-test_group: '{{ resource_prefix }}-group'
diff --git a/test/integration/targets/iam_group/meta/main.yml b/test/integration/targets/iam_group/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/iam_group/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/iam_group/tasks/main.yml b/test/integration/targets/iam_group/tasks/main.yml
deleted file mode 100644
index 328fd7dbd0..0000000000
--- a/test/integration/targets/iam_group/tasks/main.yml
+++ /dev/null
@@ -1,125 +0,0 @@
----
-- name: set up aws connection info
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- - name: ensure ansible user exists
- iam_user:
- name: '{{ test_user }}'
- state: present
-
- - name: ensure group exists
- iam_group:
- name: '{{ test_group }}'
- users:
- - '{{ test_user }}'
- state: present
- register: iam_group
-
- - assert:
- that:
- - iam_group.iam_group.users
- - iam_group is changed
-
- - name: add non existent user to group
- iam_group:
- name: '{{ test_group }}'
- users:
- - '{{ test_user }}'
- - NonExistentUser
- state: present
- ignore_errors: yes
- register: iam_group
-
- - name: assert that adding non existent user to group fails with helpful message
- assert:
- that:
- - iam_group is failed
- - iam_group.msg.startswith("Couldn't add user NonExistentUser to group {{ test_group }}")
-
- - name: remove a user
- iam_group:
- name: '{{ test_group }}'
- purge_users: True
- users: []
- state: present
- register: iam_group
-
- - assert:
- that:
- - iam_group is changed
- - not iam_group.iam_group.users
-
- - name: re-remove a user (no change)
- iam_group:
- name: '{{ test_group }}'
- purge_users: True
- users: []
- state: present
- register: iam_group
-
- - assert:
- that:
- - iam_group is not changed
- - not iam_group.iam_group.users
-
- - name: Add the user again
- iam_group:
- name: '{{ test_group }}'
- users:
- - '{{ test_user }}'
- state: present
- register: iam_group
-
- - assert:
- that:
- - iam_group is changed
- - iam_group.iam_group.users
-
- - name: Re-add the user
- iam_group:
- name: '{{ test_group }}'
- users:
- - '{{ test_user }}'
- state: present
- register: iam_group
-
- - assert:
- that:
- - iam_group is not changed
- - iam_group.iam_group.users
-
- - name: remove group
- iam_group:
- name: '{{ test_group }}'
- state: absent
- register: iam_group
-
- - assert:
- that:
- - iam_group is changed
-
- - name: re-remove group
- iam_group:
- name: '{{ test_group }}'
- state: absent
- register: iam_group
-
- - assert:
- that:
- - iam_group is not changed
-
- always:
- - name: remove group
- iam_group:
- name: '{{ test_group }}'
- state: absent
-
- - name: remove ansible user
- iam_user:
- name: '{{ test_user }}'
- state: absent
diff --git a/test/integration/targets/iam_password_policy/aliases b/test/integration/targets/iam_password_policy/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/iam_password_policy/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/iam_password_policy/tasks/main.yaml b/test/integration/targets/iam_password_policy/tasks/main.yaml
deleted file mode 100644
index 6cffea003a..0000000000
--- a/test/integration/targets/iam_password_policy/tasks/main.yaml
+++ /dev/null
@@ -1,105 +0,0 @@
-- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- - name: set iam password policy
- iam_password_policy:
- state: present
- min_pw_length: 8
- require_symbols: false
- require_numbers: true
- require_uppercase: true
- require_lowercase: true
- allow_pw_change: true
- pw_max_age: 60
- pw_reuse_prevent: 5
- pw_expire: false
- register: result
-
- - name: assert that changes were made
- assert:
- that:
- - result.changed
-
- - name: verify iam password policy has been created
- iam_password_policy:
- state: present
- min_pw_length: 8
- require_symbols: false
- require_numbers: true
- require_uppercase: true
- require_lowercase: true
- allow_pw_change: true
- pw_max_age: 60
- pw_reuse_prevent: 5
- pw_expire: false
- register: result
-
- - name: assert that no changes were made
- assert:
- that:
- - not result.changed
-
- - name: update iam password policy with different settings
- iam_password_policy:
- state: present
- min_pw_length: 15
- require_symbols: true
- require_numbers: true
- require_uppercase: true
- require_lowercase: true
- allow_pw_change: true
- pw_max_age: 30
- pw_reuse_prevent: 10
- pw_expire: true
- register: result
-
- - name: assert that updates were made
- assert:
- that:
- - result.changed
-
- # Test for regression of #59102
- - name: update iam password policy without expiry
- iam_password_policy:
- state: present
- min_pw_length: 15
- require_symbols: true
- require_numbers: true
- require_uppercase: true
- require_lowercase: true
- allow_pw_change: true
- register: result
-
- - name: assert that changes were made
- assert:
- that:
- - result.changed
-
- - name: remove iam password policy
- iam_password_policy:
- state: absent
- register: result
-
- - name: assert password policy has been removed
- assert:
- that:
- - result.changed
-
- - name: verify password policy has been removed
- iam_password_policy:
- state: absent
- register: result
-
- - name: assert no changes were made
- assert:
- that:
- - not result.changed
- always:
- - name: remove iam password policy
- iam_password_policy:
- state: absent
- register: result
diff --git a/test/integration/targets/iam_policy/aliases b/test/integration/targets/iam_policy/aliases
deleted file mode 100644
index 3f812e1914..0000000000
--- a/test/integration/targets/iam_policy/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-iam_policy_info
-cloud/aws
-unsupported
diff --git a/test/integration/targets/iam_policy/defaults/main.yml b/test/integration/targets/iam_policy/defaults/main.yml
deleted file mode 100644
index 93759404df..0000000000
--- a/test/integration/targets/iam_policy/defaults/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-iam_name: '{{resource_prefix}}'
-iam_policy_name_a: '{{resource_prefix}}-document-a'
-iam_policy_name_b: '{{resource_prefix}}-document-b'
-iam_policy_name_c: '{{resource_prefix}}-json-a'
-iam_policy_name_d: '{{resource_prefix}}-json-b'
diff --git a/test/integration/targets/iam_policy/files/no_access.json b/test/integration/targets/iam_policy/files/no_access.json
deleted file mode 100644
index a2f2997575..0000000000
--- a/test/integration/targets/iam_policy/files/no_access.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Deny",
- "Action": "*",
- "Resource": "*"
- }
- ]
-}
diff --git a/test/integration/targets/iam_policy/files/no_access_with_id.json b/test/integration/targets/iam_policy/files/no_access_with_id.json
deleted file mode 100644
index 9d40dd54a8..0000000000
--- a/test/integration/targets/iam_policy/files/no_access_with_id.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "Id": "MyId",
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Deny",
- "Action": "*",
- "Resource": "*"
- }
- ]
-}
diff --git a/test/integration/targets/iam_policy/files/no_access_with_second_id.json b/test/integration/targets/iam_policy/files/no_access_with_second_id.json
deleted file mode 100644
index 0efbc31d49..0000000000
--- a/test/integration/targets/iam_policy/files/no_access_with_second_id.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "Id": "MyOtherId",
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Deny",
- "Action": "*",
- "Resource": "*"
- }
- ]
-}
diff --git a/test/integration/targets/iam_policy/files/no_trust.json b/test/integration/targets/iam_policy/files/no_trust.json
deleted file mode 100644
index c36616187a..0000000000
--- a/test/integration/targets/iam_policy/files/no_trust.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Deny",
- "Principal": {"AWS": "*"},
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/iam_policy/tasks/main.yml b/test/integration/targets/iam_policy/tasks/main.yml
deleted file mode 100644
index b9f3542915..0000000000
--- a/test/integration/targets/iam_policy/tasks/main.yml
+++ /dev/null
@@ -1,97 +0,0 @@
----
-- name: 'Run integration tests for IAM (inline) Policy management'
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- # ============================================================
- - name: Create a temporary folder for the policies
- tempfile:
- state: directory
- register: tmpdir
- - name: Copy over policy
- copy:
- src: no_access.json
- dest: "{{ tmpdir.path }}"
- - name: Copy over other policy
- copy:
- src: no_access_with_id.json
- dest: "{{ tmpdir.path }}"
- - name: Copy over other policy
- copy:
- src: no_access_with_second_id.json
- dest: "{{ tmpdir.path }}"
-
- # ============================================================
- - name: Create user for tests
- iam_user:
- state: present
- name: "{{ iam_name }}"
- register: result
- - name: Ensure user was created
- assert:
- that:
- - result is changed
-
- - name: Create role for tests
- iam_role:
- state: present
- name: "{{ iam_name }}"
- assume_role_policy_document: "{{ lookup('file','no_trust.json') }}"
- register: result
- - name: Ensure role was created
- assert:
- that:
- - result is changed
-
- - name: Create group for tests
- iam_group:
- state: present
- name: "{{ iam_name }}"
- register: result
- - name: Ensure group was created
- assert:
- that:
- - result is changed
-
- # ============================================================
-
- - name: Run tests for each type of object
- include_tasks: object.yml
- loop_control:
- loop_var: iam_type
- with_items:
- - user
- - group
- - role
-
- # ============================================================
-
- always:
- # ============================================================
- - name: Remove user
- iam_user:
- state: absent
- name: "{{ iam_name }}"
- ignore_errors: yes
-
- - name: Remove role
- iam_role:
- state: absent
- name: "{{ iam_name }}"
- ignore_errors: yes
-
- - name: Remove group
- iam_group:
- state: absent
- name: "{{ iam_name }}"
- ignore_errors: yes
-
- # ============================================================
- - name: Delete temporary folder containing the policies
- file:
- state: absent
- path: "{{ tmpdir.path }}/"
diff --git a/test/integration/targets/iam_policy/tasks/object.yml b/test/integration/targets/iam_policy/tasks/object.yml
deleted file mode 100644
index 79fcda42ca..0000000000
--- a/test/integration/targets/iam_policy/tasks/object.yml
+++ /dev/null
@@ -1,1065 +0,0 @@
----
-- name: 'Run integration tests for IAM (inline) Policy management on {{ iam_type }}s'
- vars:
- iam_object_key: '{{ iam_type }}_name'
- block:
- # ============================================================
- - name: 'Fetch policies from {{ iam_type }} before making changes'
- iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- register: iam_policy_info
-
- - name: 'Assert empty policy list'
- assert:
- that:
- - iam_policy_info is succeeded
- - iam_policy_info.policies | length == 0
- - iam_policy_info.all_policy_names | length == 0
- - iam_policy_info.policy_names | length == 0
-
- - name: 'Fetch policies from non-existent {{ iam_type }}'
- iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}-junk'
- register: iam_policy_info
-
- - name: 'Assert not failed'
- assert:
- that:
- - iam_policy_info is succeeded
-
- # ============================================================
- - name: 'Create policy using document for {{ iam_type }} (check mode)'
- check_mode: yes
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- policy_document: '{{ tmpdir.path }}/no_access.json'
- skip_duplicates: yes
- register: result
-
- - name: 'Assert policy would be added for {{ iam_type }}'
- assert:
- that:
- - result is changed
-
- - name: 'Create policy using document for {{ iam_type }}'
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- policy_document: '{{ tmpdir.path }}/no_access.json'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- register: iam_policy_info
-
- - name: 'Assert policy was added for {{ iam_type }}'
- assert:
- that:
- - result is changed
- - result.policies | length == 1
- - iam_policy_name_a in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_name_a in iam_policy_info.policy_names
- - iam_policy_info.policy_names | length == 1
- - iam_policy_info.policies | length == 1
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_info.all_policy_names | length == 1
- - iam_policy_info.policies[0].policy_name == iam_policy_name_a
- - '"Id" not in iam_policy_info.policies[0].policy_document'
-
- - name: 'Create policy using document for {{ iam_type }} (idempotency)'
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- policy_document: '{{ tmpdir.path }}/no_access.json'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- register: iam_policy_info
-
- - name: 'Assert no change'
- assert:
- that:
- - result is not changed
- - result.policies | length == 1
- - iam_policy_name_a in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.policies | length == 1
- - iam_policy_info.all_policy_names | length == 1
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_info.policies[0].policy_name == iam_policy_name_a
- - '"Id" not in iam_policy_info.policies[0].policy_document'
-
- # ============================================================
- - name: 'Create policy using document for {{ iam_type }} (check mode) (skip_duplicates)'
- check_mode: yes
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- policy_document: '{{ tmpdir.path }}/no_access.json'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- register: iam_policy_info
-
- - name: 'Assert policy would be added for {{ iam_type }}'
- assert:
- that:
- - result is not changed
- - iam_policy_info.all_policy_names | length == 1
- - '"policies" not in iam_policy_info'
- - iam_policy_name_b not in iam_policy_info.all_policy_names
-
- - name: 'Create policy using document for {{ iam_type }} (skip_duplicates)'
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- policy_document: '{{ tmpdir.path }}/no_access.json'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- register: iam_policy_info
-
- - name: 'Assert policy was not added for {{ iam_type }} (skip_duplicates)'
- assert:
- that:
- - result is not changed
- - result.policies | length == 1
- - iam_policy_name_b not in result.policies
- - result[iam_object_key] == iam_name
- - '"policies" not in iam_policy_info'
- - '"policy_names" not in iam_policy_info'
- - iam_policy_info.all_policy_names | length == 1
- - iam_policy_name_b not in iam_policy_info.all_policy_names
-
- - name: 'Create policy using document for {{ iam_type }} (check mode) (skip_duplicates = no)'
- check_mode: yes
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- policy_document: '{{ tmpdir.path }}/no_access.json'
- skip_duplicates: no
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- register: iam_policy_info
-
- - name: 'Assert policy would be added for {{ iam_type }}'
- assert:
- that:
- - result.changed == True
- - '"policies" not in iam_policy_info'
- - iam_policy_info.all_policy_names | length == 1
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_name_b not in iam_policy_info.all_policy_names
-
- - name: 'Create policy using document for {{ iam_type }} (skip_duplicates = no)'
- iam_policy:
- state: present
- skip_duplicates: no
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- policy_document: '{{ tmpdir.path }}/no_access.json'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- register: iam_policy_info
-
- - name: 'Assert policy was added for {{ iam_type }}'
- assert:
- that:
- - result is changed
- - result.policies | length == 2
- - iam_policy_name_b in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.policies | length == 1
- - iam_policy_info.all_policy_names | length == 2
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_name_b in iam_policy_info.all_policy_names
- - iam_policy_info.policies[0].policy_name == iam_policy_name_b
- - '"Id" not in iam_policy_info.policies[0].policy_document'
-
- - name: 'Create policy using document for {{ iam_type }} (idempotency) (skip_duplicates = no)'
- iam_policy:
- state: present
- skip_duplicates: no
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- policy_document: '{{ tmpdir.path }}/no_access.json'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- register: iam_policy_info
-
- - name: 'Assert no change'
- assert:
- that:
- - result is not changed
- - result.policies | length == 2
- - iam_policy_name_b in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.policies | length == 1
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_name_b in iam_policy_info.all_policy_names
- - iam_policy_info.all_policy_names | length == 2
- - iam_policy_info.policies[0].policy_name == iam_policy_name_b
- - '"Id" not in iam_policy_info.policies[0].policy_document'
-
- # ============================================================
- - name: 'Create policy using json for {{ iam_type }} (check mode)'
- check_mode: yes
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- register: iam_policy_info
-
- - name: 'Assert policy would be added for {{ iam_type }}'
- assert:
- that:
- - result is changed
- - '"policies" not in iam_policy_info'
- - iam_policy_info.all_policy_names | length == 2
- - iam_policy_name_c not in iam_policy_info.all_policy_names
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_name_b in iam_policy_info.all_policy_names
-
- - name: 'Create policy using json for {{ iam_type }}'
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- register: iam_policy_info
-
- - name: 'Assert policy was added for {{ iam_type }}'
- assert:
- that:
- - result is changed
- - result.policies | length == 3
- - iam_policy_name_c in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.policies | length == 1
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_name_b in iam_policy_info.all_policy_names
- - iam_policy_name_c in iam_policy_info.all_policy_names
- - iam_policy_info.all_policy_names | length == 3
- - iam_policy_info.policies[0].policy_name == iam_policy_name_c
- - iam_policy_info.policies[0].policy_document.Id == 'MyId'
-
- - name: 'Create policy using json for {{ iam_type }} (idempotency)'
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- register: iam_policy_info
-
- - name: 'Assert no change'
- assert:
- that:
- - result is not changed
- - result.policies | length == 3
- - iam_policy_name_c in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_name_b in iam_policy_info.all_policy_names
- - iam_policy_name_c in iam_policy_info.all_policy_names
- - iam_policy_info.all_policy_names | length == 3
- - iam_policy_info.policies[0].policy_name == iam_policy_name_c
- - iam_policy_info.policies[0].policy_document.Id == 'MyId'
-
- # ============================================================
- - name: 'Create policy using json for {{ iam_type }} (check mode) (skip_duplicates)'
- check_mode: yes
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: iam_policy_info
-
- - name: 'Assert policy would be added for {{ iam_type }}'
- assert:
- that:
- - result is not changed
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_name_b in iam_policy_info.all_policy_names
- - iam_policy_name_c in iam_policy_info.all_policy_names
- - iam_policy_name_d not in iam_policy_info.all_policy_names
- - iam_policy_info.all_policy_names | length == 3
- - '"policies" not in iam_policy_info'
-
- - name: 'Create policy using json for {{ iam_type }} (skip_duplicates)'
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: iam_policy_info
-
- - name: 'Assert policy was not added for {{ iam_type }} (skip_duplicates)'
- assert:
- that:
- - result is not changed
- - result.policies | length == 3
- - iam_policy_name_d not in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_name_b in iam_policy_info.all_policy_names
- - iam_policy_name_c in iam_policy_info.all_policy_names
- - iam_policy_name_d not in iam_policy_info.all_policy_names
- - iam_policy_info.all_policy_names | length == 3
- - '"policies" not in iam_policy_info'
-
- - name: 'Create policy using json for {{ iam_type }} (check mode) (skip_duplicates = no)'
- check_mode: yes
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
- skip_duplicates: no
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: iam_policy_info
-
- - name: 'Assert policy would be added for {{ iam_type }}'
- assert:
- that:
- - result.changed == True
-
- - name: 'Create policy using json for {{ iam_type }} (skip_duplicates = no)'
- iam_policy:
- state: present
- skip_duplicates: no
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: iam_policy_info
-
- - name: 'Assert policy was added for {{ iam_type }}'
- assert:
- that:
- - result is changed
- - result.policies | length == 4
- - iam_policy_name_d in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_name_b in iam_policy_info.all_policy_names
- - iam_policy_name_c in iam_policy_info.all_policy_names
- - iam_policy_name_d in iam_policy_info.all_policy_names
- - iam_policy_name_a not in iam_policy_info.policy_names
- - iam_policy_name_b not in iam_policy_info.policy_names
- - iam_policy_name_c not in iam_policy_info.policy_names
- - iam_policy_name_d in iam_policy_info.policy_names
- - iam_policy_info.policy_names | length == 1
- - iam_policy_info.all_policy_names | length == 4
- - iam_policy_info.policies[0].policy_name == iam_policy_name_d
- - iam_policy_info.policies[0].policy_document.Id == 'MyId'
-
- - name: 'Create policy using json for {{ iam_type }} (idempotency) (skip_duplicates = no)'
- iam_policy:
- state: present
- skip_duplicates: no
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_id.json") }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: iam_policy_info
-
- - name: 'Assert no change'
- assert:
- that:
- - result is not changed
- - result.policies | length == 4
- - iam_policy_name_d in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_name_b in iam_policy_info.all_policy_names
- - iam_policy_name_c in iam_policy_info.all_policy_names
- - iam_policy_name_d in iam_policy_info.all_policy_names
- - iam_policy_info.all_policy_names | length == 4
- - iam_policy_info.policies[0].policy_name == iam_policy_name_d
- - iam_policy_info.policies[0].policy_document.Id == 'MyId'
-
- # ============================================================
- - name: 'Test fetching multiple policies from {{ iam_type }}'
- iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- register: iam_policy_info
-
- - name: 'Assert all policies returned'
- assert:
- that:
- - iam_policy_info is succeeded
- - iam_policy_info.policies | length == 4
- - iam_policy_info.all_policy_names | length == 4
- - iam_policy_name_a in iam_policy_info.all_policy_names
- - iam_policy_name_b in iam_policy_info.all_policy_names
- - iam_policy_name_c in iam_policy_info.all_policy_names
- - iam_policy_name_d in iam_policy_info.all_policy_names
- # Quick test that the policies are the ones we expect
- - iam_policy_info.policies | json_query('[*].policy_name') | length == 4
- - iam_policy_info.policies | json_query('[?policy_document.Id == `MyId`].policy_name') | length == 2
- - iam_policy_name_c in (iam_policy_info.policies | json_query('[?policy_document.Id == `MyId`].policy_name') | list)
- - iam_policy_name_d in (iam_policy_info.policies | json_query('[?policy_document.Id == `MyId`].policy_name') | list)
-
- # ============================================================
- - name: 'Update policy using document for {{ iam_type }} (check mode) (skip_duplicates)'
- check_mode: yes
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- policy_document: '{{ tmpdir.path }}/no_access_with_id.json'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- register: iam_policy_info
-
- - name: 'Assert policy would be added for {{ iam_type }}'
- assert:
- that:
- - result is not changed
- - iam_policy_info.policies[0].policy_name == iam_policy_name_a
- - '"Id" not in iam_policy_info.policies[0].policy_document'
-
- - name: 'Update policy using document for {{ iam_type }} (skip_duplicates)'
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- policy_document: '{{ tmpdir.path }}/no_access_with_id.json'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- register: iam_policy_info
-
- - name: 'Assert policy was not updated for {{ iam_type }} (skip_duplicates)'
- assert:
- that:
- - result is not changed
- - result.policies | length == 4
- - iam_policy_name_a in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.all_policy_names | length == 4
- - iam_policy_info.policies[0].policy_name == iam_policy_name_a
- - '"Id" not in iam_policy_info.policies[0].policy_document'
-
- - name: 'Update policy using document for {{ iam_type }} (check mode) (skip_duplicates = no)'
- check_mode: yes
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- policy_document: '{{ tmpdir.path }}/no_access_with_id.json'
- skip_duplicates: no
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- register: iam_policy_info
-
- - name: 'Assert policy would be updated for {{ iam_type }}'
- assert:
- that:
- - result.changed == True
- - iam_policy_info.all_policy_names | length == 4
- - iam_policy_info.policies[0].policy_name == iam_policy_name_a
- - '"Id" not in iam_policy_info.policies[0].policy_document'
-
- - name: 'Update policy using document for {{ iam_type }} (skip_duplicates = no)'
- iam_policy:
- state: present
- skip_duplicates: no
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- policy_document: '{{ tmpdir.path }}/no_access_with_id.json'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- register: iam_policy_info
-
- - name: 'Assert policy was updated for {{ iam_type }}'
- assert:
- that:
- - result is changed
- - result.policies | length == 4
- - iam_policy_name_a in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.policies[0].policy_document.Id == 'MyId'
-
- - name: 'Update policy using document for {{ iam_type }} (idempotency) (skip_duplicates = no)'
- iam_policy:
- state: present
- skip_duplicates: no
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- policy_document: '{{ tmpdir.path }}/no_access_with_id.json'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- register: iam_policy_info
-
- - name: 'Assert no change'
- assert:
- that:
- - result is not changed
- - result.policies | length == 4
- - iam_policy_name_a in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.policies[0].policy_document.Id == 'MyId'
-
- - name: 'Delete policy A'
- iam_policy:
- state: absent
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- register: iam_policy_info
-
- - name: 'Assert deleted'
- assert:
- that:
- - result is changed
- - result.policies | length == 3
- - iam_policy_name_a not in result.policies
- - result[iam_object_key] == iam_name
- - '"policies" not in iam_policy_info'
- - iam_policy_info.all_policy_names | length == 3
- - iam_policy_name_a not in iam_policy_info.all_policy_names
-
- # ============================================================
- # Update C with no_access.json
- # Delete C
-
- - name: 'Update policy using json for {{ iam_type }} (check mode) (skip_duplicates)'
- check_mode: yes
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access.json") }}'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- register: iam_policy_info
-
- - name: 'Assert policy would be added for {{ iam_type }}'
- assert:
- that:
- - result is not changed
- - iam_policy_info.policies[0].policy_document.Id == 'MyId'
-
- - name: 'Update policy using json for {{ iam_type }} (skip_duplicates)'
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access.json") }}'
- skip_duplicates: yes
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- register: iam_policy_info
-
- - name: 'Assert policy was not updated for {{ iam_type }} (skip_duplicates)'
- assert:
- that:
- - result is not changed
- - result.policies | length == 3
- - iam_policy_name_c in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.policies[0].policy_document.Id == 'MyId'
-
- - name: 'Update policy using json for {{ iam_type }} (check mode) (skip_duplicates = no)'
- check_mode: yes
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access.json") }}'
- skip_duplicates: no
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- register: iam_policy_info
-
- - name: 'Assert policy would be updated for {{ iam_type }}'
- assert:
- that:
- - result.changed == True
- - iam_policy_info.policies[0].policy_document.Id == 'MyId'
-
- - name: 'Update policy using json for {{ iam_type }} (skip_duplicates = no)'
- iam_policy:
- state: present
- skip_duplicates: no
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access.json") }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- register: iam_policy_info
-
- - name: 'Assert policy was updated for {{ iam_type }}'
- assert:
- that:
- - result is changed
- - result.policies | length == 3
- - iam_policy_name_c in result.policies
- - result[iam_object_key] == iam_name
- - '"Id" not in iam_policy_info.policies[0].policy_document'
-
- - name: 'Update policy using json for {{ iam_type }} (idempotency) (skip_duplicates = no)'
- iam_policy:
- state: present
- skip_duplicates: no
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access.json") }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- register: iam_policy_info
-
- - name: 'Assert no change'
- assert:
- that:
- - result is not changed
- - result.policies | length == 3
- - iam_policy_name_c in result.policies
- - result[iam_object_key] == iam_name
- - '"Id" not in iam_policy_info.policies[0].policy_document'
-
- - name: 'Delete policy C'
- iam_policy:
- state: absent
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- register: iam_policy_info
-
- - name: 'Assert deleted'
- assert:
- that:
- - result is changed
- - result.policies | length == 2
- - iam_policy_name_c not in result.policies
- - result[iam_object_key] == iam_name
- - '"policies" not in iam_policy_info'
- - iam_policy_info.all_policy_names | length == 2
- - iam_policy_name_c not in iam_policy_info.all_policy_names
-
- # ============================================================
- - name: 'Update policy using document for {{ iam_type }} (check mode)'
- check_mode: yes
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- policy_document: '{{ tmpdir.path }}/no_access_with_second_id.json'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- register: iam_policy_info
-
- - name: 'Assert policy would be updated for {{ iam_type }}'
- assert:
- that:
- - result.changed == True
- - '"Id" not in iam_policy_info.policies[0].policy_document'
-
- - name: 'Update policy using document for {{ iam_type }}'
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- policy_document: '{{ tmpdir.path }}/no_access_with_second_id.json'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- register: iam_policy_info
-
- - name: 'Assert policy was updated for {{ iam_type }}'
- assert:
- that:
- - result is changed
- - result.policies | length == 2
- - iam_policy_name_b in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
-
- - name: 'Update policy using document for {{ iam_type }} (idempotency)'
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- policy_document: '{{ tmpdir.path }}/no_access_with_second_id.json'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- register: iam_policy_info
-
- - name: 'Assert no change'
- assert:
- that:
- - result is not changed
- - result.policies | length == 2
- - iam_policy_name_b in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
-
- - name: 'Delete policy B'
- iam_policy:
- state: absent
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- register: iam_policy_info
-
- - name: 'Assert deleted'
- assert:
- that:
- - result is changed
- - result.policies | length == 1
- - iam_policy_name_b not in result.policies
- - result[iam_object_key] == iam_name
- - '"policies" not in iam_policy_info'
- - iam_policy_info.all_policy_names | length == 1
- - iam_policy_name_b not in iam_policy_info.all_policy_names
-
- # ============================================================
- - name: 'Update policy using json for {{ iam_type }} (check mode)'
- check_mode: yes
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_second_id.json") }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: iam_policy_info
-
- - name: 'Assert policy would be updated for {{ iam_type }}'
- assert:
- that:
- - result.changed == True
- - iam_policy_info.policies[0].policy_document.Id == 'MyId'
-
- - name: 'Update policy using json for {{ iam_type }}'
- iam_policy:
- state: present
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_second_id.json") }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: iam_policy_info
-
- - name: 'Assert policy was updated for {{ iam_type }}'
- assert:
- that:
- - result is changed
- - result.policies | length == 1
- - iam_policy_name_d in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
-
- - name: 'Update policy using json for {{ iam_type }} (idempotency)'
- iam_policy:
- state: present
- skip_duplicates: no
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- policy_json: '{{ lookup("file", "{{ tmpdir.path }}/no_access_with_second_id.json") }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: iam_policy_info
-
- - name: 'Assert no change'
- assert:
- that:
- - result is not changed
- - result.policies | length == 1
- - iam_policy_name_d in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
-
- # ============================================================
- - name: 'Delete policy D (check_mode)'
- check_mode: yes
- iam_policy:
- state: absent
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: iam_policy_info
-
- - name: 'Assert not deleted'
- assert:
- that:
- - result is changed
- - result.policies | length == 1
- - iam_policy_name_d in result.policies
- - result[iam_object_key] == iam_name
- - iam_policy_info.all_policy_names | length == 1
- - iam_policy_name_d in iam_policy_info.all_policy_names
- - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId'
-
- - name: 'Delete policy D'
- iam_policy:
- state: absent
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: iam_policy_info
-
- - name: 'Assert deleted'
- assert:
- that:
- - result is changed
- - '"policies" not in iam_policy_info'
- - iam_policy_name_d not in result.policies
- - result[iam_object_key] == iam_name
- - '"policies" not in iam_policy_info'
- - iam_policy_info.all_policy_names | length == 0
-
- - name: 'Delete policy D (test idempotency)'
- iam_policy:
- state: absent
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: iam_policy_info
-
- - name: 'Assert deleted'
- assert:
- that:
- - result is not changed
- - '"policies" not in iam_policy_info'
- - iam_policy_info.all_policy_names | length == 0
-
- - name: 'Delete policy D (check_mode) (test idempotency)'
- check_mode: yes
- iam_policy:
- state: absent
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: result
- - iam_policy_info:
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- register: iam_policy_info
-
- - name: 'Assert deleted'
- assert:
- that:
- - result is not changed
- - '"policies" not in iam_policy_info'
- - iam_policy_info.all_policy_names | length == 0
-
- always:
- # ============================================================
- - name: 'Delete policy A for {{ iam_type }}'
- iam_policy:
- state: absent
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_a }}'
- ignore_errors: yes
-
- - name: 'Delete policy B for {{ iam_type }}'
- iam_policy:
- state: absent
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_b }}'
- ignore_errors: yes
-
- - name: 'Delete policy C for {{ iam_type }}'
- iam_policy:
- state: absent
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_c }}'
- ignore_errors: yes
-
- - name: 'Delete policy D for {{ iam_type }}'
- iam_policy:
- state: absent
- iam_type: '{{ iam_type }}'
- iam_name: '{{ iam_name }}'
- policy_name: '{{ iam_policy_name_d }}'
- ignore_errors: yes
diff --git a/test/integration/targets/iam_role/aliases b/test/integration/targets/iam_role/aliases
deleted file mode 100644
index 3d7a2c9f14..0000000000
--- a/test/integration/targets/iam_role/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-iam_role_info
-unsupported
-cloud/aws
diff --git a/test/integration/targets/iam_role/defaults/main.yml b/test/integration/targets/iam_role/defaults/main.yml
deleted file mode 100644
index 46db605072..0000000000
--- a/test/integration/targets/iam_role/defaults/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-test_role: '{{ resource_prefix }}-role'
-test_path: '/{{ resource_prefix }}/'
-safe_managed_policy: 'AWSDenyAll'
-custom_policy_name: '{{ resource_prefix }}-denyall'
-boundary_policy: 'arn:aws:iam::aws:policy/AWSDenyAll'
-paranoid_pauses: no
-standard_pauses: no
diff --git a/test/integration/targets/iam_role/files/deny-all-a.json b/test/integration/targets/iam_role/files/deny-all-a.json
deleted file mode 100644
index ae62fd1975..0000000000
--- a/test/integration/targets/iam_role/files/deny-all-a.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Action": [
- "*"
- ],
- "Effect": "Deny",
- "Resource": "*",
- "Sid": "DenyA"
- }
- ]
-}
diff --git a/test/integration/targets/iam_role/files/deny-all-b.json b/test/integration/targets/iam_role/files/deny-all-b.json
deleted file mode 100644
index 3a4704a46a..0000000000
--- a/test/integration/targets/iam_role/files/deny-all-b.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Action": [
- "*"
- ],
- "Effect": "Deny",
- "Resource": "*",
- "Sid": "DenyB"
- }
- ]
-}
diff --git a/test/integration/targets/iam_role/files/deny-all.json b/test/integration/targets/iam_role/files/deny-all.json
deleted file mode 100644
index 3d324b9b9c..0000000000
--- a/test/integration/targets/iam_role/files/deny-all.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Action": [
- "*"
- ],
- "Effect": "Deny",
- "Resource": "*"
- }
- ]
-}
diff --git a/test/integration/targets/iam_role/files/deny-assume.json b/test/integration/targets/iam_role/files/deny-assume.json
deleted file mode 100644
index 73e8771586..0000000000
--- a/test/integration/targets/iam_role/files/deny-assume.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Action": "sts:AssumeRole",
- "Principal": { "Service": "ec2.amazonaws.com" },
- "Effect": "Deny"
- }
- ]
-}
diff --git a/test/integration/targets/iam_role/meta/main.yml b/test/integration/targets/iam_role/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/iam_role/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/iam_role/tasks/main.yml b/test/integration/targets/iam_role/tasks/main.yml
deleted file mode 100644
index 676179bd71..0000000000
--- a/test/integration/targets/iam_role/tasks/main.yml
+++ /dev/null
@@ -1,1519 +0,0 @@
----
-# Tests for iam_role and iam_role_info
-#
-# Tests:
-# - Minimal Role creation
-# - Role deletion
-# - Fetching a specific role
-# - Creating roles w/ and w/o instance profiles
-# - Creating roles w/ a path
-# - Updating Max Session Duration
-# - Updating Description
-# - Managing list of managed policies
-# - Managing list of inline policies (for testing _info)
-# - Managing boundary policy
-#
-# Notes:
-# - Only tests *documented* return values ( RESULT.iam_role )
-# - There are some known timing issues with boto3 returning before actions
-# complete in the case of problems with "changed" status it's worth enabling
-# the standard_pauses and paranoid_pauses options as a first step in debugging
-#
-# Possible Bugs:
-# - Fails to delete role if inline policies not removed first
-
-- name: 'Setup AWS connection info'
- module_defaults:
- group/aws:
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token | default(omit) }}'
- region: '{{ aws_region }}'
- iam_role:
- assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
- block:
- # ===================================================================
- # Parameter Checks
- - name: 'Friendly message when creating an instance profile and adding a boundary profile'
- iam_role:
- name: '{{ test_role }}'
- boundary: '{{ boundary_policy }}'
- register: iam_role
- ignore_errors: yes
- - assert:
- that:
- - iam_role is failed
- - '"boundary policy" in iam_role.msg'
- - '"create_instance_profile" in iam_role.msg'
- - '"false" in iam_role.msg'
-
- - name: 'Friendly message when boundary profile is not an ARN'
- iam_role:
- name: '{{ test_role }}'
- boundary: 'AWSDenyAll'
- create_instance_profile: no
- register: iam_role
- ignore_errors: yes
- - assert:
- that:
- - iam_role is failed
- - '"Boundary policy" in iam_role.msg'
- - '"ARN" in iam_role.msg'
-
- - name: 'Friendly message when "present" without assume_role_policy_document'
- module_defaults: { iam_role: {} }
- iam_role:
- name: '{{ test_role }}'
- register: iam_role
- ignore_errors: yes
- - assert:
- that:
- - iam_role is failed
- - 'iam_role.msg.startswith("state is present but all of the following are missing")'
- - '"assume_role_policy_document" in iam_role.msg'
-
- - name: 'Maximum Session Duration needs to be between 1 and 12 hours'
- iam_role:
- name: '{{ test_role }}'
- max_session_duration: 3599
- register: iam_role
- ignore_errors: yes
- - assert:
- that:
- - iam_role is failed
- - '"max_session_duration must be between" in iam_role.msg'
-
- - name: 'Maximum Session Duration needs to be between 1 and 12 hours'
- iam_role:
- name: '{{ test_role }}'
- max_session_duration: 43201
- register: iam_role
- ignore_errors: yes
- - assert:
- that:
- - iam_role is failed
- - '"max_session_duration must be between" in iam_role.msg'
-
- - name: 'Role Paths must start with /'
- iam_role:
- name: '{{ test_role }}'
- path: 'test/'
- register: iam_role
- ignore_errors: yes
- - assert:
- that:
- - iam_role is failed
- - '"path must begin and end with /" in iam_role.msg'
-
- - name: 'Role Paths must end with /'
- iam_role:
- name: '{{ test_role }}'
- path: '/test'
- register: iam_role
- ignore_errors: yes
- - assert:
- that:
- - iam_role is failed
- - '"path must begin and end with /" in iam_role.msg'
-
- # ===================================================================
- # Supplemental resource pre-creation
- - name: 'Create Safe IAM Managed Policy'
- iam_managed_policy:
- state: present
- policy_name: '{{ custom_policy_name }}'
- policy_description: "A safe (deny-all) managed policy"
- policy: "{{ lookup('file', 'deny-all.json') }}"
- register: create_managed_policy
- - assert:
- that:
- - create_managed_policy is succeeded
-
- # ===================================================================
- # Rapid Role Creation and deletion
- - name: Try running some rapid fire create/delete tests
- # We've previously seen issues with iam_role returning before creation's
- # actually complete, if we think the issue's gone, let's try creating and
- # deleting things in quick succession
- when: not (standard_pauses | bool)
- block:
- - name: 'Minimal IAM Role without instance profile (rapid)'
- iam_role:
- name: '{{ test_role }}'
- create_instance_profile: no
- register: iam_role
- - name: 'Minimal IAM Role without instance profile (rapid)'
- iam_role:
- name: '{{ test_role }}'
- create_instance_profile: no
- register: iam_role_again
- - assert:
- that:
- - iam_role is changed
- - iam_role_again is not changed
- - name: 'Remove IAM Role (rapid)'
- iam_role:
- state: absent
- name: '{{ test_role }}'
- register: iam_role
- - name: 'Remove IAM Role (rapid)'
- iam_role:
- state: absent
- name: '{{ test_role }}'
- register: iam_role_again
- - assert:
- that:
- - iam_role is changed
- - iam_role_again is not changed
-
- - name: 'Minimal IAM Role without instance profile (rapid)'
- iam_role:
- name: '{{ test_role }}'
- create_instance_profile: no
- register: iam_role
- - name: 'Remove IAM Role (rapid)'
- iam_role:
- state: absent
- name: '{{ test_role }}'
- register: iam_role_again
- - assert:
- that:
- - iam_role is changed
- - iam_role_again is changed
-
- # ===================================================================
- # Role Creation
- # (without Instance profile)
- - name: 'iam_role_info before Role creation (no args)'
- iam_role_info:
- register: role_info
- - assert:
- that:
- - role_info is succeeded
-
- - name: 'iam_role_info before Role creation (search for test role)'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 0
-
- - name: 'Minimal IAM Role (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- create_instance_profile: no
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- # Pause this first time, just in case we actually created something...
- - name: Short pause for role creation to finish
- pause:
- seconds: 10
- when: standard_pauses | bool
-
- - name: 'iam_role_info after Role creation in check_mode'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 0
-
- - name: 'Minimal IAM Role without instance profile'
- iam_role:
- name: '{{ test_role }}'
- create_instance_profile: no
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
- - 'iam_role.iam_role.arn.startswith("arn")'
- - 'iam_role.iam_role.arn.endswith("role/" + test_role )'
- # Would be nice to test the contents...
- - '"assume_role_policy_document" in iam_role.iam_role'
- - iam_role.iam_role.attached_policies | length == 0
- - iam_role.iam_role.max_session_duration == 3600
- - iam_role.iam_role.path == '/'
- - iam_role.iam_role.role_name == test_role
- - '"create_date" in iam_role.iam_role'
- - '"role_id" in iam_role.iam_role'
- - name: Short pause for role creation to finish
- pause:
- seconds: 10
- when: standard_pauses | bool
-
- - name: 'Minimal IAM Role without instance profile (no change)'
- iam_role:
- name: '{{ test_role }}'
- create_instance_profile: no
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'iam_role_info after Role creation'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - '"description" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 0
- - role_info.iam_roles[0].managed_policies | length == 0
- - role_info.iam_roles[0].max_session_duration == 3600
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 0
-
- - name: 'Remove IAM Role'
- iam_role:
- state: absent
- name: '{{ test_role }}'
- delete_instance_profile: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - name: Short pause for role removal to finish
- pause:
- seconds: 10
- when: paranoid_pauses | bool
-
- - name: 'iam_role_info after Role deletion'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 0
-
- # (with path)
- - name: 'Minimal IAM Role with path (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- path: '{{ test_path }}'
- register: iam_role
- check_mode: yes
- - assert:
- that:
- - iam_role is changed
-
- - name: 'Minimal IAM Role with path'
- iam_role:
- name: '{{ test_role }}'
- path: '{{ test_path }}'
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
- - 'iam_role.iam_role.arn.startswith("arn")'
- - 'iam_role.iam_role.arn.endswith("role" + test_path + test_role )'
- # Would be nice to test the contents...
- - '"assume_role_policy_document" in iam_role.iam_role'
- - iam_role.iam_role.attached_policies | length == 0
- - iam_role.iam_role.max_session_duration == 3600
- - iam_role.iam_role.path == '{{ test_path }}'
- - iam_role.iam_role.role_name == test_role
- - '"create_date" in iam_role.iam_role'
- - '"role_id" in iam_role.iam_role'
- - name: Short pause for role creation to finish
- pause:
- seconds: 10
- when: standard_pauses | bool
-
- - name: 'Minimal IAM Role with path (no change)'
- iam_role:
- name: '{{ test_role }}'
- path: '{{ test_path }}'
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'iam_role_info after Role creation'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - '"description" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile" + test_path + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 0
- - role_info.iam_roles[0].max_session_duration == 3600
- - role_info.iam_roles[0].path == '{{ test_path }}'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 0
-
- - name: 'iam_role_info after Role creation (searching a path)'
- iam_role_info:
- path_prefix: '{{ test_path }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - '"description" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile" + test_path + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 0
- - role_info.iam_roles[0].max_session_duration == 3600
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].path == '{{ test_path }}'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 0
-
- - name: 'Remove IAM Role'
- iam_role:
- state: absent
- name: '{{ test_role }}'
- path: '{{ test_path }}'
- # If we don't delete the existing profile it'll be reused (with the path)
- # by the test below.
- delete_instance_profile: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - name: Short pause for role removal to finish
- pause:
- seconds: 10
- when: paranoid_pauses | bool
-
- - name: 'iam_role_info after Role deletion'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 0
-
- # (with Instance profile)
- - name: 'Minimal IAM Role with instance profile'
- iam_role:
- name: '{{ test_role }}'
- create_instance_profile: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
- - 'iam_role.iam_role.arn.startswith("arn")'
- - 'iam_role.iam_role.arn.endswith("role/" + test_role )'
- # Would be nice to test the contents...
- - '"assume_role_policy_document" in iam_role.iam_role'
- - iam_role.iam_role.attached_policies | length == 0
- - iam_role.iam_role.max_session_duration == 3600
- - iam_role.iam_role.path == '/'
- - iam_role.iam_role.role_name == test_role
- - '"create_date" in iam_role.iam_role'
- - '"role_id" in iam_role.iam_role'
- - name: Short pause for role creation to finish
- pause:
- seconds: 10
- when: standard_pauses | bool
-
- - name: 'Minimal IAM Role wth instance profile (no change)'
- iam_role:
- name: '{{ test_role }}'
- create_instance_profile: yes
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'iam_role_info after Role creation'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - '"description" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 0
- - role_info.iam_roles[0].max_session_duration == 3600
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 0
-
- # ===================================================================
- # Max Session Duration Manipulation
-
- - name: 'Update Max Session Duration (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- max_session_duration: 43200
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
-
- - name: 'Update Max Session Duration'
- iam_role:
- name: '{{ test_role }}'
- max_session_duration: 43200
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
- - iam_role.iam_role.max_session_duration == 43200
-
- - name: 'Update Max Session Duration (no change)'
- iam_role:
- name: '{{ test_role }}'
- max_session_duration: 43200
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'iam_role_info after updating Max Session Duration'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - '"description" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 0
- - role_info.iam_roles[0].max_session_duration == 43200
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 0
-
- # ===================================================================
- # Description Manipulation
-
- - name: 'Add Description (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- description: 'Ansible Test Role {{ resource_prefix }}'
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
-
- - name: 'Add Description'
- iam_role:
- name: '{{ test_role }}'
- description: 'Ansible Test Role {{ resource_prefix }}'
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
- - iam_role.iam_role.description == 'Ansible Test Role {{ resource_prefix }}'
-
- - name: 'Add Description (no change)'
- iam_role:
- name: '{{ test_role }}'
- description: 'Ansible Test Role {{ resource_prefix }}'
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
- - iam_role.iam_role.description == 'Ansible Test Role {{ resource_prefix }}'
-
- - name: 'iam_role_info after adding Description'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - 'role_info.iam_roles[0].description == "Ansible Test Role {{ resource_prefix }}"'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 0
- - role_info.iam_roles[0].max_session_duration == 43200
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 0
-
- - name: 'Update Description (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- description: 'Ansible Test Role (updated) {{ resource_prefix }}'
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
-
- - name: 'Update Description'
- iam_role:
- name: '{{ test_role }}'
- description: 'Ansible Test Role (updated) {{ resource_prefix }}'
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
- - iam_role.iam_role.description == 'Ansible Test Role (updated) {{ resource_prefix }}'
-
- - name: 'Update Description (no change)'
- iam_role:
- name: '{{ test_role }}'
- description: 'Ansible Test Role (updated) {{ resource_prefix }}'
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
- - iam_role.iam_role.description == 'Ansible Test Role (updated) {{ resource_prefix }}'
-
- - name: 'iam_role_info after updating Description'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 0
- - role_info.iam_roles[0].max_session_duration == 43200
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 0
-
-
- # ===================================================================
- # Tag Manipulation
-
- - name: 'Add Tag (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- tags:
- TagA: ValueA
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
-
- - name: 'Add Tag'
- iam_role:
- name: '{{ test_role }}'
- tags:
- TagA: ValueA
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
- - iam_role.iam_role.tags | length == 1
- - '"TagA" in iam_role.iam_role.tags'
- - iam_role.iam_role.tags.TagA == "ValueA"
-
- - name: 'Add Tag (no change)'
- iam_role:
- name: '{{ test_role }}'
- tags:
- TagA: ValueA
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
- - '"TagA" in iam_role.iam_role.tags'
- - iam_role.iam_role.tags.TagA == "ValueA"
-
- - name: 'iam_role_info after adding Tags'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 0
- - role_info.iam_roles[0].max_session_duration == 43200
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 1
- - '"TagA" in role_info.iam_roles[0].tags'
- - role_info.iam_roles[0].tags.TagA == "ValueA"
-
- - name: 'Update Tag (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- tags:
- TagA: AValue
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
-
- - name: 'Update Tag'
- iam_role:
- name: '{{ test_role }}'
- tags:
- TagA: AValue
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
- - '"TagA" in iam_role.iam_role.tags'
- - iam_role.iam_role.tags.TagA == "AValue"
-
- - name: 'Update Tag (no change)'
- iam_role:
- name: '{{ test_role }}'
- tags:
- TagA: AValue
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
- - '"TagA" in iam_role.iam_role.tags'
- - iam_role.iam_role.tags.TagA == "AValue"
-
- - name: 'iam_role_info after updating Tag'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 0
- - role_info.iam_roles[0].max_session_duration == 43200
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 1
- - '"TagA" in role_info.iam_roles[0].tags'
- - role_info.iam_roles[0].tags.TagA == "AValue"
-
- - name: 'Add second Tag without purge (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- purge_tags: no
- tags:
- TagB: ValueB
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
-
- - name: 'Add second Tag without purge'
- iam_role:
- name: '{{ test_role }}'
- purge_tags: no
- tags:
- TagB: ValueB
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
- - '"TagB" in iam_role.iam_role.tags'
- - iam_role.iam_role.tags.TagB == "ValueB"
-
- - name: 'Add second Tag without purge (no change)'
- iam_role:
- name: '{{ test_role }}'
- purge_tags: no
- tags:
- TagB: ValueB
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
- - '"TagB" in iam_role.iam_role.tags'
- - iam_role.iam_role.tags.TagB == "ValueB"
-
- - name: 'iam_role_info after adding second Tag without purge'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 0
- - role_info.iam_roles[0].max_session_duration == 43200
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 2
- - '"TagA" in role_info.iam_roles[0].tags'
- - role_info.iam_roles[0].tags.TagA == "AValue"
- - '"TagB" in role_info.iam_roles[0].tags'
- - role_info.iam_roles[0].tags.TagB == "ValueB"
-
- - name: 'Purge first tag (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- purge_tags: yes
- tags:
- TagB: ValueB
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
-
- - name: 'Purge first tag'
- iam_role:
- name: '{{ test_role }}'
- purge_tags: yes
- tags:
- TagB: ValueB
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
- - '"TagB" in iam_role.iam_role.tags'
- - iam_role.iam_role.tags.TagB == "ValueB"
-
- - name: 'Purge first tag (no change)'
- iam_role:
- name: '{{ test_role }}'
- purge_tags: yes
- tags:
- TagB: ValueB
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
- - '"TagB" in iam_role.iam_role.tags'
- - iam_role.iam_role.tags.TagB == "ValueB"
-
- - name: 'iam_role_info after purging first Tag'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 0
- - role_info.iam_roles[0].max_session_duration == 43200
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 1
- - '"TagA" not in role_info.iam_roles[0].tags'
- - '"TagB" in role_info.iam_roles[0].tags'
- - role_info.iam_roles[0].tags.TagB == "ValueB"
-
-
- # ===================================================================
- # Policy Manipulation
-
- - name: 'Add Managed Policy (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- purge_policies: no
- managed_policy:
- - '{{ safe_managed_policy }}'
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
-
- - name: 'Add Managed Policy'
- iam_role:
- name: '{{ test_role }}'
- purge_policies: no
- managed_policy:
- - '{{ safe_managed_policy }}'
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'Add Managed Policy (no change)'
- iam_role:
- name: '{{ test_role }}'
- purge_policies: no
- managed_policy:
- - '{{ safe_managed_policy }}'
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'iam_role_info after adding Managed Policy'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 1
- - safe_managed_policy in ( role_info | json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
- - custom_policy_name not in ( role_info | json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
- - role_info.iam_roles[0].max_session_duration == 43200
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 1
- - '"TagB" in role_info.iam_roles[0].tags'
- - role_info.iam_roles[0].tags.TagB == "ValueB"
-
- - name: 'Update Managed Policy without purge (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- purge_policies: no
- managed_policy:
- - '{{ custom_policy_name }}'
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
-
- - name: 'Update Managed Policy without purge'
- iam_role:
- name: '{{ test_role }}'
- purge_policies: no
- managed_policy:
- - '{{ custom_policy_name }}'
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'Update Managed Policy without purge (no change)'
- iam_role:
- name: '{{ test_role }}'
- purge_policies: no
- managed_policy:
- - '{{ custom_policy_name }}'
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'iam_role_info after updating Managed Policy without purge'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 2
- - safe_managed_policy in ( role_info | json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
- - custom_policy_name in ( role_info | json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
- - role_info.iam_roles[0].max_session_duration == 43200
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 1
- - '"TagB" in role_info.iam_roles[0].tags'
- - role_info.iam_roles[0].tags.TagB == "ValueB"
-
- # Managed Policies are purged by default
- - name: 'Update Managed Policy with purge (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- managed_policy:
- - '{{ custom_policy_name }}'
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
-
- - name: 'Update Managed Policy with purge'
- iam_role:
- name: '{{ test_role }}'
- managed_policy:
- - '{{ custom_policy_name }}'
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'Update Managed Policy with purge (no change)'
- iam_role:
- name: '{{ test_role }}'
- managed_policy:
- - '{{ custom_policy_name }}'
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'iam_role_info after updating Managed Policy with purge'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 1
- - safe_managed_policy not in ( role_info | json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
- - custom_policy_name in ( role_info | json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
- - role_info.iam_roles[0].max_session_duration == 43200
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 1
- - '"TagB" in role_info.iam_roles[0].tags'
- - role_info.iam_roles[0].tags.TagB == "ValueB"
-
- # ===================================================================
- # Inline Policy (test _info behaviour)
-
- # XXX Not sure if it's a bug in Ansible or a "quirk" of AWS, but these two
- # policies need to have at least different Sids or the second doesn't show
- # up...
-
- - name: 'Attach inline policy a'
- iam_policy:
- state: present
- iam_type: 'role'
- iam_name: '{{ test_role }}'
- policy_name: 'inline-policy-a'
- policy_json: '{{ lookup("file", "deny-all-a.json") }}'
-
- - name: 'Attach inline policy b'
- iam_policy:
- state: present
- iam_type: 'role'
- iam_name: '{{ test_role }}'
- policy_name: 'inline-policy-b'
- policy_json: '{{ lookup("file", "deny-all-b.json") }}'
-
- - name: 'iam_role_info after attaching inline policies (using iam_policy)'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - 'role_info.iam_roles[0].description == "Ansible Test Role (updated) {{ resource_prefix }}"'
- - role_info.iam_roles[0].inline_policies | length == 2
- - '"inline-policy-a" in role_info.iam_roles[0].inline_policies'
- - '"inline-policy-b" in role_info.iam_roles[0].inline_policies'
- - role_info.iam_roles[0].instance_profiles | length == 1
- - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role
- - 'role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role)'
- - role_info.iam_roles[0].managed_policies | length == 1
- - safe_managed_policy not in ( role_info | json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
- - custom_policy_name in ( role_info | json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
- - role_info.iam_roles[0].max_session_duration == 43200
- - role_info.iam_roles[0].path == '/'
- - '"permissions_boundary" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - role_info.iam_roles[0].tags | length == 1
- - '"TagB" in role_info.iam_roles[0].tags'
- - role_info.iam_roles[0].tags.TagB == "ValueB"
-
- # XXX iam_role fails to remove inline policies before deleting the role
- - name: 'Detach inline policy a'
- iam_policy:
- state: absent
- iam_type: 'role'
- iam_name: '{{ test_role }}'
- policy_name: 'inline-policy-a'
-
- - name: 'Detach inline policy b'
- iam_policy:
- state: absent
- iam_type: 'role'
- iam_name: '{{ test_role }}'
- policy_name: 'inline-policy-b'
-
- # ===================================================================
- # Role Removal
- - name: 'Remove IAM Role (CHECK MODE)'
- iam_role:
- state: absent
- name: '{{ test_role }}'
- delete_instance_profile: yes
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - name: 'Short pause for role removal to finish'
- pause:
- seconds: 10
- when: paranoid_pauses | bool
-
- - name: 'iam_role_info after deleting role in check mode'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
-
- - name: 'Remove IAM Role'
- iam_role:
- state: absent
- name: '{{ test_role }}'
- delete_instance_profile: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - name: 'Short pause for role removal to finish'
- pause:
- seconds: 10
- when: paranoid_pauses | bool
-
- - name: 'iam_role_info after deleting role'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 0
-
- - name: 'Remove IAM Role (should be gone already)'
- iam_role:
- state: absent
- name: '{{ test_role }}'
- delete_instance_profile: yes
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - name: 'Short pause for role removal to finish'
- pause:
- seconds: 10
- when: paranoid_pauses | bool
-
- # ===================================================================
- # Boundary Policy (requires create_instance_profile: no)
- - name: 'Create minimal role with no boundary policy'
- iam_role:
- name: '{{ test_role }}'
- create_instance_profile: no
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'Configure Boundary Policy (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- create_instance_profile: no
- boundary: '{{ boundary_policy }}'
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
-
- - name: 'Configure Boundary Policy'
- iam_role:
- name: '{{ test_role }}'
- create_instance_profile: no
- boundary: '{{ boundary_policy }}'
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'Configure Boundary Policy (no change)'
- iam_role:
- name: '{{ test_role }}'
- create_instance_profile: no
- boundary: '{{ boundary_policy }}'
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'iam_role_info after adding boundary policy'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role/" + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - '"description" not in role_info.iam_roles[0]'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 0
- - role_info.iam_roles[0].managed_policies | length == 0
- - role_info.iam_roles[0].max_session_duration == 3600
- - role_info.iam_roles[0].path == '/'
- - role_info.iam_roles[0].permissions_boundary.permissions_boundary_arn == boundary_policy
- - role_info.iam_roles[0].permissions_boundary.permissions_boundary_type == 'Policy'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
-
- - name: 'Remove IAM Role'
- iam_role:
- state: absent
- name: '{{ test_role }}'
- delete_instance_profile: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - name: Short pause for role removal to finish
- pause:
- seconds: 10
- when: paranoid_pauses | bool
-
- # ===================================================================
- # Complex role Creation
- - name: 'Complex IAM Role (CHECK MODE)'
- iam_role:
- name: '{{ test_role }}'
- assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
- boundary: '{{ boundary_policy }}'
- create_instance_profile: no
- description: 'Ansible Test Role {{ resource_prefix }}'
- managed_policy:
- - '{{ safe_managed_policy }}'
- - '{{ custom_policy_name }}'
- max_session_duration: 43200
- path: '{{ test_path }}'
- tags:
- TagA: 'ValueA'
- check_mode: yes
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - name: Short pause for role creation to finish
- pause:
- seconds: 10
- when: standard_pauses | bool
-
- - name: 'iam_role_info after Complex Role creation in check_mode'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 0
-
- - name: 'Complex IAM Role'
- iam_role:
- name: '{{ test_role }}'
- assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
- boundary: '{{ boundary_policy }}'
- create_instance_profile: no
- description: 'Ansible Test Role {{ resource_prefix }}'
- managed_policy:
- - '{{ safe_managed_policy }}'
- - '{{ custom_policy_name }}'
- max_session_duration: 43200
- path: '{{ test_path }}'
- tags:
- TagA: 'ValueA'
- register: iam_role
- - assert:
- that:
- - iam_role is changed
- - iam_role.iam_role.role_name == test_role
- - 'iam_role.iam_role.arn.startswith("arn")'
- - 'iam_role.iam_role.arn.endswith("role" + test_path + test_role )'
- # Would be nice to test the contents...
- - '"assume_role_policy_document" in iam_role.iam_role'
- - iam_role.iam_role.attached_policies | length == 2
- - iam_role.iam_role.max_session_duration == 43200
- - iam_role.iam_role.path == test_path
- - iam_role.iam_role.role_name == test_role
- - '"create_date" in iam_role.iam_role'
- - '"role_id" in iam_role.iam_role'
- - name: Short pause for role creation to finish
- pause:
- seconds: 10
- when: standard_pauses | bool
-
- - name: 'Complex IAM role (no change)'
- iam_role:
- name: '{{ test_role }}'
- assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}'
- boundary: '{{ boundary_policy }}'
- create_instance_profile: no
- description: 'Ansible Test Role {{ resource_prefix }}'
- managed_policy:
- - '{{ safe_managed_policy }}'
- - '{{ custom_policy_name }}'
- max_session_duration: 43200
- path: '{{ test_path }}'
- tags:
- TagA: 'ValueA'
- register: iam_role
- - assert:
- that:
- - iam_role is not changed
- - iam_role.iam_role.role_name == test_role
-
- - name: 'iam_role_info after Role creation'
- iam_role_info:
- name: '{{ test_role }}'
- register: role_info
- - assert:
- that:
- - role_info is succeeded
- - role_info.iam_roles | length == 1
- - 'role_info.iam_roles[0].arn.startswith("arn")'
- - 'role_info.iam_roles[0].arn.endswith("role" + test_path + test_role )'
- - '"assume_role_policy_document" in role_info.iam_roles[0]'
- - '"create_date" in role_info.iam_roles[0]'
- - 'role_info.iam_roles[0].description == "Ansible Test Role {{ resource_prefix }}"'
- - role_info.iam_roles[0].inline_policies | length == 0
- - role_info.iam_roles[0].instance_profiles | length == 0
- - role_info.iam_roles[0].managed_policies | length == 2
- - safe_managed_policy in ( role_info | json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
- - custom_policy_name in ( role_info | json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten )
- - role_info.iam_roles[0].max_session_duration == 43200
- - role_info.iam_roles[0].path == test_path
- - role_info.iam_roles[0].permissions_boundary.permissions_boundary_arn == boundary_policy
- - role_info.iam_roles[0].permissions_boundary.permissions_boundary_type == 'Policy'
- - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id
- - role_info.iam_roles[0].role_name == test_role
- - '"TagA" in role_info.iam_roles[0].tags'
- - role_info.iam_roles[0].tags.TagA == "ValueA"
-
- always:
- # ===================================================================
- # Cleanup
-
- # XXX iam_role fails to remove inline policies before deleting the role
- - name: 'Detach inline policy a'
- iam_policy:
- state: absent
- iam_type: 'role'
- iam_name: '{{ test_role }}'
- policy_name: 'inline-policy-a'
- ignore_errors: true
-
- - name: 'Detach inline policy b'
- iam_policy:
- state: absent
- iam_type: 'role'
- iam_name: '{{ test_role }}'
- policy_name: 'inline-policy-b'
- ignore_errors: true
-
- - name: 'Remove IAM Role'
- iam_role:
- state: absent
- name: '{{ test_role }}'
- delete_instance_profile: yes
- ignore_errors: true
-
- - name: 'Remove IAM Role (with path)'
- iam_role:
- state: absent
- name: '{{ test_role }}'
- path: '{{ test_path }}'
- delete_instance_profile: yes
- ignore_errors: true
-
- - name: 'iam_role_info after Role deletion'
- iam_role_info:
- name: '{{ test_role }}'
- ignore_errors: true
-
- - name: 'Remove test managed policy'
- iam_managed_policy:
- state: absent
- policy_name: '{{ custom_policy_name }}'
diff --git a/test/integration/targets/iam_saml_federation/aliases b/test/integration/targets/iam_saml_federation/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/iam_saml_federation/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/iam_saml_federation/files/example1.xml b/test/integration/targets/iam_saml_federation/files/example1.xml
deleted file mode 100644
index fa2130a5e3..0000000000
--- a/test/integration/targets/iam_saml_federation/files/example1.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" validUntil="2019-08-24T20:37:21Z" cacheDuration="PT1567111041S" entityID="AnsibleSAMLTest1">
- <md:IDPSSODescriptor WantAuthnRequestsSigned="false" protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
- <md:KeyDescriptor use="signing">
- <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
- <ds:X509Data>
- <ds:X509Certificate>MIIDJjCCAg4CCQCiwst2XYH7fTANBgkqhkiG9w0BAQsFADBVMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMREwDwYDVQQDDAhleGFtcGxlMTAeFw0xOTA4MjIyMDM2NTFaFw0yMDA4MjEyMDM2NTFaMFUxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxETAPBgNVBAMMCGV4YW1wbGUxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArLbBVE6E28bfvB/gUGjOmY2lxxxLZ9Fls4fOH9js/MhGG+hh4diyj/Kb7Coo6HehXMp93TXkYYbiKGAoykT6ULEACZnYi1V9XdUs619ibumi9pRSFygBrbyN+n9peMJxf4jvM1QS/DTPWxdkgeMkqb2SARJChd3azCHd0cdGwcsx1pTkYp34SL0gP79m6W8N3TIxyJmqi0Kc7mntPQUCVH/wFSyg59JXo8SUQDQNap/yd9UwLzxP9MhH8G3DBatwQj3ijYOPnAeUPbsw7GYiKQBh/SIH5DGzW4TNHo0PiQJqzymNp0mI0eKjRO98vfnsXkeQwotzeKVbkmJ63h3PHQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBvm+zYchto1NESDxCVDK96QKObklWrfiAgKDLb49Loox+pyWTvs2mu5DOgDe0rrgEDfxngbbupo9eSu5w7OPVfip8W9rsB8k6ak+P4G8MltqkYv5A0aXbka1da1NenbIXAC3/YbMjLnsidDWQiKYZ0i0HxjuhguW3lvOFd3Dzp2rNDydzA6ilSmBXFrAcKm0RHAfP4NGy3ECdU6SQ5OBSUcJprKADMODIykuds1qh0Gz8a0ukKKmp2yJvz9bIuC4+TRXKKZtgDZKPcN0MgtqYZJ2rttoFqkCWrNBCZSUgJEASUJ78NSC3Wy8WQr3NjZvQ86KG2/mcVQ3Lm1ci82Uue</ds:X509Certificate>
- </ds:X509Data>
- </ds:KeyInfo>
- </md:KeyDescriptor>
- <md:KeyDescriptor use="encryption">
- <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
- <ds:X509Data>
- <ds:X509Certificate>MIIDJjCCAg4CCQCiwst2XYH7fTANBgkqhkiG9w0BAQsFADBVMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMREwDwYDVQQDDAhleGFtcGxlMTAeFw0xOTA4MjIyMDM2NTFaFw0yMDA4MjEyMDM2NTFaMFUxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxETAPBgNVBAMMCGV4YW1wbGUxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArLbBVE6E28bfvB/gUGjOmY2lxxxLZ9Fls4fOH9js/MhGG+hh4diyj/Kb7Coo6HehXMp93TXkYYbiKGAoykT6ULEACZnYi1V9XdUs619ibumi9pRSFygBrbyN+n9peMJxf4jvM1QS/DTPWxdkgeMkqb2SARJChd3azCHd0cdGwcsx1pTkYp34SL0gP79m6W8N3TIxyJmqi0Kc7mntPQUCVH/wFSyg59JXo8SUQDQNap/yd9UwLzxP9MhH8G3DBatwQj3ijYOPnAeUPbsw7GYiKQBh/SIH5DGzW4TNHo0PiQJqzymNp0mI0eKjRO98vfnsXkeQwotzeKVbkmJ63h3PHQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBvm+zYchto1NESDxCVDK96QKObklWrfiAgKDLb49Loox+pyWTvs2mu5DOgDe0rrgEDfxngbbupo9eSu5w7OPVfip8W9rsB8k6ak+P4G8MltqkYv5A0aXbka1da1NenbIXAC3/YbMjLnsidDWQiKYZ0i0HxjuhguW3lvOFd3Dzp2rNDydzA6ilSmBXFrAcKm0RHAfP4NGy3ECdU6SQ5OBSUcJprKADMODIykuds1qh0Gz8a0ukKKmp2yJvz9bIuC4+TRXKKZtgDZKPcN0MgtqYZJ2rttoFqkCWrNBCZSUgJEASUJ78NSC3Wy8WQr3NjZvQ86KG2/mcVQ3Lm1ci82Uue</ds:X509Certificate>
- </ds:X509Data>
- </ds:KeyInfo>
- </md:KeyDescriptor>
- <md:SingleLogoutService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="http://example.com/saml/logout"/>
- <md:NameIDFormat>urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified</md:NameIDFormat>
- <md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="http://example.com/saml/"/>
- </md:IDPSSODescriptor>
-</md:EntityDescriptor>
diff --git a/test/integration/targets/iam_saml_federation/files/example2.xml b/test/integration/targets/iam_saml_federation/files/example2.xml
deleted file mode 100644
index 76a86c7a76..0000000000
--- a/test/integration/targets/iam_saml_federation/files/example2.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata" validUntil="2019-08-24T20:38:34Z" cacheDuration="PT1567111114S" entityID="AnsibleSAMLTest2">
- <md:IDPSSODescriptor WantAuthnRequestsSigned="false" protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol">
- <md:KeyDescriptor use="signing">
- <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
- <ds:X509Data>
- <ds:X509Certificate>MIIDADCCAegCCQCgxBiDM2muazANBgkqhkiG9w0BAQsFADBCMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMB4XDTE5MDgyMjIwMzY1OFoXDTIwMDgyMTIwMzY1OFowQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMUJ3J1tzqoAgQwaJHx/MGl5yVTNpJLPfx8YCS0Z+RQWXIazZrssy/tpZcfgnek4+xvqrzRXR4nell31VTojIGItqR70lPhrsPES70SrN8egi+MLTZ4iddG5hjK4bn4wss88/3johi8/J85wc26/bkRz66lOvTaJ8k1pncQ3NekT9zZzWlW1LQk3uMbaPrVVocjFBEZyTsYUE9wZG+ggRBJlOMGEdhGsgPuR8Aj7OXO7X8/RolV8lB3GTzellX2GxiWnOhjnabSPBUUv5iVKcDOb2lIqxr5DScIvX1PcJSUCAGGLcd8wYK/lh3k+PFH9QNDLY6F5WHkoZq9LS46+8lkCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAWjX7E/BYAHaOKOXc5RAD9zwAaMxLMTSK5Cnq32TGIh1P4ap8jTNVaiCs9UJXHJpKwXUN+3DdVBIGMT17DzFwAeruZOzNBN0VJVl0yZ6dARgss4gpOBGvBD8blLidnVxEd5VRGldx5R5+I441ms6ASkohcHhGlF4WGbnabEZ/MtxhDIWUX2w4naOfFg6vOiPsE1C/ZXJeLDNP+dnjfueTN5DD38d+ND2mHweB7u0Qjpkd2K0TuCp0z4kXRuTgPzlfkPORNkgyU1hA3YClpT57aeUsHgO23sr/4d04jzI+hYeleGqjNM+3vDQYsOQyXx61/nANeF0Sp9ZIv3eJSTMXNw==</ds:X509Certificate>
- </ds:X509Data>
- </ds:KeyInfo>
- </md:KeyDescriptor>
- <md:KeyDescriptor use="encryption">
- <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
- <ds:X509Data>
- <ds:X509Certificate>MIIDADCCAegCCQCgxBiDM2muazANBgkqhkiG9w0BAQsFADBCMQswCQYDVQQGEwJYWDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBhbnkgTHRkMB4XDTE5MDgyMjIwMzY1OFoXDTIwMDgyMTIwMzY1OFowQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMUJ3J1tzqoAgQwaJHx/MGl5yVTNpJLPfx8YCS0Z+RQWXIazZrssy/tpZcfgnek4+xvqrzRXR4nell31VTojIGItqR70lPhrsPES70SrN8egi+MLTZ4iddG5hjK4bn4wss88/3johi8/J85wc26/bkRz66lOvTaJ8k1pncQ3NekT9zZzWlW1LQk3uMbaPrVVocjFBEZyTsYUE9wZG+ggRBJlOMGEdhGsgPuR8Aj7OXO7X8/RolV8lB3GTzellX2GxiWnOhjnabSPBUUv5iVKcDOb2lIqxr5DScIvX1PcJSUCAGGLcd8wYK/lh3k+PFH9QNDLY6F5WHkoZq9LS46+8lkCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAWjX7E/BYAHaOKOXc5RAD9zwAaMxLMTSK5Cnq32TGIh1P4ap8jTNVaiCs9UJXHJpKwXUN+3DdVBIGMT17DzFwAeruZOzNBN0VJVl0yZ6dARgss4gpOBGvBD8blLidnVxEd5VRGldx5R5+I441ms6ASkohcHhGlF4WGbnabEZ/MtxhDIWUX2w4naOfFg6vOiPsE1C/ZXJeLDNP+dnjfueTN5DD38d+ND2mHweB7u0Qjpkd2K0TuCp0z4kXRuTgPzlfkPORNkgyU1hA3YClpT57aeUsHgO23sr/4d04jzI+hYeleGqjNM+3vDQYsOQyXx61/nANeF0Sp9ZIv3eJSTMXNw==</ds:X509Certificate>
- </ds:X509Data>
- </ds:KeyInfo>
- </md:KeyDescriptor>
- <md:SingleLogoutService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="http://example.com/saml/logout"/>
- <md:NameIDFormat>urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified</md:NameIDFormat>
- <md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="http://example.com/saml/"/>
- </md:IDPSSODescriptor>
-</md:EntityDescriptor>
diff --git a/test/integration/targets/iam_saml_federation/meta/main.yml b/test/integration/targets/iam_saml_federation/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/iam_saml_federation/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/iam_saml_federation/tasks/main.yml b/test/integration/targets/iam_saml_federation/tasks/main.yml
deleted file mode 100644
index 2ee7daef97..0000000000
--- a/test/integration/targets/iam_saml_federation/tasks/main.yml
+++ /dev/null
@@ -1,87 +0,0 @@
-- module_defaults:
- group/aws:
- region: "{{ aws_region }}"
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- block:
- # ============================================================
- # TESTS
- - name: Create the identity provider
- iam_saml_federation:
- name: '{{ resource_prefix }}-saml'
- state: present
- saml_metadata_document: '{{ lookup("file", "example1.xml") }}'
- register: create_result
-
- - name: assert idp created
- assert:
- that:
- - create_result is changed
-
- - name: Test that nothing changes when we retry
- iam_saml_federation:
- name: '{{ resource_prefix }}-saml'
- state: present
- saml_metadata_document: '{{ lookup("file", "example1.xml") }}'
- register: create_result
-
- - name: assert the idp doesn't change when we retry
- assert:
- that:
- - create_result is not changed
-
- - name: Change the identity provider
- iam_saml_federation:
- name: '{{ resource_prefix }}-saml'
- state: present
- saml_metadata_document: '{{ lookup("file", "example2.xml") }}'
- register: change_result
-
- - name: assert idp created
- assert:
- that:
- - change_result is changed
-
- - name: Test that nothing changes when we retry
- iam_saml_federation:
- name: '{{ resource_prefix }}-saml'
- state: present
- saml_metadata_document: '{{ lookup("file", "example2.xml") }}'
- register: change_result
-
- - name: assert the idp doesn't change when we retry
- assert:
- that:
- - change_result is not changed
-
- - name: Delete the identity provider
- iam_saml_federation:
- name: '{{ resource_prefix }}-saml'
- state: absent
- register: destroy_result
-
- - name: assert deleted
- assert:
- that:
- - destroy_result is changed
-
- - name: Attempt to re-delete the identity provider
- iam_saml_federation:
- name: '{{ resource_prefix }}-saml'
- state: absent
- register: destroy_result
-
- - name: assert deleted
- assert:
- that:
- - destroy_result is not changed
-
- always:
- # ============================================================
- # CLEAN-UP
- - name: finish off by deleting the identity provider
- iam_saml_federation:
- name: '{{ resource_prefix }}-saml'
- state: absent
- register: destroy_result
diff --git a/test/integration/targets/iam_user/aliases b/test/integration/targets/iam_user/aliases
deleted file mode 100644
index c7a4b8abe0..0000000000
--- a/test/integration/targets/iam_user/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-iam_user_info
-unsupported
diff --git a/test/integration/targets/iam_user/defaults/main.yml b/test/integration/targets/iam_user/defaults/main.yml
deleted file mode 100644
index 8a69ca0931..0000000000
--- a/test/integration/targets/iam_user/defaults/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-test_group: '{{ resource_prefix }}-group'
-test_path: '/'
-test_user: '{{ test_users[0] }}'
-test_users:
- - '{{ resource_prefix }}-user-a'
- - '{{ resource_prefix }}-user-b'
diff --git a/test/integration/targets/iam_user/meta/main.yml b/test/integration/targets/iam_user/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/iam_user/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/iam_user/tasks/main.yml b/test/integration/targets/iam_user/tasks/main.yml
deleted file mode 100644
index e5b9a21e84..0000000000
--- a/test/integration/targets/iam_user/tasks/main.yml
+++ /dev/null
@@ -1,480 +0,0 @@
----
-- name: set up aws connection info
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- - name: ensure improper usage of parameters fails gracefully
- iam_user_info:
- path: '{{ test_path }}'
- group: '{{ test_group }}'
- ignore_errors: yes
- register: iam_user_info_path_group
- - assert:
- that:
- - iam_user_info_path_group is failed
- - 'iam_user_info_path_group.msg == "parameters are mutually exclusive: group|path"'
-
- - name: ensure exception handling fails as expected
- iam_user_info:
- region: 'bogus'
- path: ''
- ignore_errors: yes
- register: iam_user_info
- - assert:
- that:
- - iam_user_info is failed
- - '"user" in iam_user_info.msg'
-
- - name: ensure exception handling fails as expected with group
- iam_user_info:
- region: 'bogus'
- group: '{{ test_group }}'
- ignore_errors: yes
- register: iam_user_info
- - assert:
- that:
- - iam_user_info is failed
- - '"group" in iam_user_info.msg'
-
- - name: ensure exception handling fails as expected with default path
- iam_user_info:
- region: 'bogus'
- ignore_errors: yes
- register: iam_user_info
- - assert:
- that:
- - iam_user_info is failed
- - '"path" in iam_user_info.msg'
-
- - name: create test user (check mode)
- iam_user:
- name: '{{ test_user }}'
- state: present
- check_mode: yes
- register: iam_user
-
- - name: assert that the user would be created
- assert:
- that:
- - iam_user is changed
-
- - name: create test user
- iam_user:
- name: '{{ test_user }}'
- state: present
- register: iam_user
-
- - name: assert that the user is created
- assert:
- that:
- - iam_user is changed
-
- - name: ensure test user exists (no change)
- iam_user:
- name: '{{ test_user }}'
- state: present
- register: iam_user
-
- - name: assert that the user wasn't changed
- assert:
- that:
- - iam_user is not changed
-
- - name: ensure the info used to validate other tests is valid
- set_fact:
- test_iam_user: '{{ iam_user.iam_user.user }}'
- - assert:
- that:
- - 'test_iam_user.arn.startswith("arn:aws:iam")'
- - 'test_iam_user.arn.endswith("user/" + test_user )'
- - test_iam_user.create_date is not none
- - test_iam_user.path == '{{ test_path }}'
- - test_iam_user.user_id is not none
- - test_iam_user.user_name == '{{ test_user }}'
-
- - name: get info on IAM user(s)
- iam_user_info:
- register: iam_user_info
- - assert:
- that:
- - iam_user_info.iam_users | length != 0
-
- - name: get info on IAM user(s) with name
- iam_user_info:
- name: '{{ test_user }}'
- register: iam_user_info
- - debug: var=iam_user_info
- - assert:
- that:
- - iam_user_info.iam_users | length == 1
- - iam_user_info.iam_users[0].arn == test_iam_user.arn
- - iam_user_info.iam_users[0].create_date == test_iam_user.create_date
- - iam_user_info.iam_users[0].path == test_iam_user.path
- - iam_user_info.iam_users[0].user_id == test_iam_user.user_id
- - iam_user_info.iam_users[0].user_name == test_iam_user.user_name
-
- - name: get info on IAM user(s) on path
- iam_user_info:
- path: '{{ test_path }}'
- name: '{{ test_user }}'
- register: iam_user_info
- - assert:
- that:
- - iam_user_info.iam_users | length == 1
- - iam_user_info.iam_users[0].arn == test_iam_user.arn
- - iam_user_info.iam_users[0].create_date == test_iam_user.create_date
- - iam_user_info.iam_users[0].path == test_iam_user.path
- - iam_user_info.iam_users[0].user_id == test_iam_user.user_id
- - iam_user_info.iam_users[0].user_name == test_iam_user.user_name
-
- # ===========================================
- # Test Managed Policy management
- #
- # Use a couple of benign policies for testing:
- # - AWSDenyAll
- # - ServiceQuotasReadOnlyAccess
- #
- - name: attach managed policy to user (check mode)
- check_mode: yes
- iam_user:
- name: '{{ test_user }}'
- state: present
- managed_policy:
- - arn:aws:iam::aws:policy/AWSDenyAll
- register: iam_user
-
- - name: assert that the user is changed
- assert:
- that:
- - iam_user is changed
-
- - name: attach managed policy to user
- iam_user:
- name: '{{ test_user }}'
- state: present
- managed_policy:
- - arn:aws:iam::aws:policy/AWSDenyAll
- register: iam_user
-
- - name: assert that the user is changed
- assert:
- that:
- - iam_user is changed
-
- - name: ensure managed policy is attached to user (no change)
- iam_user:
- name: '{{ test_user }}'
- state: present
- managed_policy:
- - arn:aws:iam::aws:policy/AWSDenyAll
- register: iam_user
-
- - name: assert that the user hasn't changed
- assert:
- that:
- - iam_user is not changed
-
- - name: attach different managed policy to user (check mode)
- check_mode: yes
- iam_user:
- name: '{{ test_user }}'
- state: present
- managed_policy:
- - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
- purge_policy: no
- register: iam_user
-
- - name: assert that the user changed
- assert:
- that:
- - iam_user is changed
-
- - name: attach different managed policy to user
- iam_user:
- name: '{{ test_user }}'
- state: present
- managed_policy:
- - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
- purge_policy: no
- register: iam_user
-
- - name: assert that the user changed
- assert:
- that:
- - iam_user is changed
-
- - name: Check first policy wasn't purged
- iam_user:
- name: '{{ test_user }}'
- state: present
- managed_policy:
- - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
- - arn:aws:iam::aws:policy/AWSDenyAll
- purge_policy: no
- register: iam_user
-
- - name: assert that the user hasn't changed
- assert:
- that:
- - iam_user is not changed
-
- - name: Check that managed policy order doesn't matter
- iam_user:
- name: '{{ test_user }}'
- state: present
- managed_policy:
- - arn:aws:iam::aws:policy/AWSDenyAll
- - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
- purge_policy: no
- register: iam_user
-
- - name: assert that the user hasn't changed
- assert:
- that:
- - iam_user is not changed
-
- - name: Check that policy doesn't require full ARN path
- iam_user:
- name: '{{ test_user }}'
- state: present
- managed_policy:
- - AWSDenyAll
- - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
- purge_policy: no
- register: iam_user
-
- - name: assert that the user hasn't changed
- assert:
- that:
- - iam_user is not changed
-
- - name: Remove one of the managed policies - with purge (check mode)
- check_mode: yes
- iam_user:
- name: '{{ test_user }}'
- state: present
- managed_policy:
- - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
- purge_policy: yes
- register: iam_user
-
- - name: assert that the user changed
- assert:
- that:
- - iam_user is changed
-
- - name: Remove one of the managed policies - with purge
- iam_user:
- name: '{{ test_user }}'
- state: present
- managed_policy:
- - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
- purge_policy: yes
- register: iam_user
-
- - name: assert that the user changed
- assert:
- that:
- - iam_user is changed
-
- - name: Check we only have the one policy attached
- iam_user:
- name: '{{ test_user }}'
- state: present
- managed_policy:
- - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess
- purge_policy: yes
- register: iam_user
-
- - name: assert that the user changed
- assert:
- that:
- - iam_user is not changed
-
- - name: ensure group exists
- iam_group:
- name: '{{ test_group }}'
- users:
- - '{{ test_user }}'
- state: present
- register: iam_group
-
- - assert:
- that:
- - iam_group.changed
- - iam_group.iam_group.users
-
- - name: get info on IAM user(s) in group
- iam_user_info:
- group: '{{ test_group }}'
- name: '{{ test_user }}'
- register: iam_user_info
-
- - assert:
- that:
- - iam_user_info.iam_users | length == 1
- - iam_user_info.iam_users[0].arn == test_iam_user.arn
- - iam_user_info.iam_users[0].create_date == test_iam_user.create_date
- - iam_user_info.iam_users[0].path == test_iam_user.path
- - iam_user_info.iam_users[0].user_id == test_iam_user.user_id
- - iam_user_info.iam_users[0].user_name == test_iam_user.user_name
-
- - name: remove user from group
- iam_group:
- name: '{{ test_group }}'
- purge_users: True
- users: []
- state: present
- register: iam_group
-
- - name: get info on IAM user(s) after removing from group
- iam_user_info:
- group: '{{ test_group }}'
- name: '{{ test_user }}'
- register: iam_user_info
-
- - name: assert empty list of users for group are returned
- assert:
- that:
- - iam_user_info.iam_users | length == 0
-
- - name: ensure ansible users exist
- iam_user:
- name: '{{ item }}'
- state: present
- with_items: '{{ test_users }}'
-
- - name: get info on multiple IAM user(s)
- iam_user_info:
- register: iam_user_info
- - assert:
- that:
- - iam_user_info.iam_users | length != 0
-
- - name: ensure multiple user group exists with single user
- iam_group:
- name: '{{ test_group }}'
- users:
- - '{{ test_user }}'
- state: present
- register: iam_group
-
- - name: get info on IAM user(s) in group
- iam_user_info:
- group: '{{ test_group }}'
- register: iam_user_info
- - assert:
- that:
- - iam_user_info.iam_users | length == 1
-
- - name: add all users to group
- iam_group:
- name: '{{ test_group }}'
- users: '{{ test_users }}'
- state: present
- register: iam_group
-
- - name: get info on multiple IAM user(s) in group
- iam_user_info:
- group: '{{ test_group }}'
- register: iam_user_info
- - assert:
- that:
- - iam_user_info.iam_users | length == test_users | length
-
- - name: purge users from group
- iam_group:
- name: '{{ test_group }}'
- purge_users: True
- users: []
- state: present
- register: iam_group
-
- - name: ensure info is empty for empty group
- iam_user_info:
- group: '{{ test_group }}'
- register: iam_user_info
- - assert:
- that:
- - iam_user_info.iam_users | length == 0
-
- - name: get info on IAM user(s) after removing from group
- iam_user_info:
- group: '{{ test_group }}'
- register: iam_user_info
-
- - name: assert empty list of users for group are returned
- assert:
- that:
- - iam_user_info.iam_users | length == 0
-
- - name: remove group
- iam_group:
- name: '{{ test_group }}'
- state: absent
- register: iam_group
-
- - name: assert that group was removed
- assert:
- that:
- - iam_group.changed
- - iam_group
-
- - name: Test remove group again (idempotency)
- iam_group:
- name: "{{ test_group }}"
- state: absent
- register: iam_group
-
- - name: assert that group remove is not changed
- assert:
- that:
- - not iam_group.changed
-
- - name: Remove user with attached policy
- iam_user:
- name: "{{ test_user }}"
- state: absent
- register: iam_user
-
- - name: get info on IAM user(s) after deleting
- iam_user_info:
- group: '{{ test_user }}'
- ignore_errors: yes
- register: iam_user_info
-
- - name: Assert user was removed
- assert:
- that:
- - iam_user.changed
- - "'cannot be found' in iam_user_info.msg"
-
- - name: Remove user with attached policy (idempotent)
- iam_user:
- name: "{{ test_user }}"
- state: absent
- ignore_errors: yes
- register: iam_user
-
- - name: Assert user was removed
- assert:
- that:
- - not iam_user.changed
-
- always:
- - name: remove group
- iam_group:
- name: '{{ test_group }}'
- state: absent
- ignore_errors: yes
-
- - name: remove ansible users
- iam_user:
- name: '{{ item }}'
- state: absent
- with_items: '{{ test_users }}'
- ignore_errors: yes
diff --git a/test/integration/targets/lambda_policy/aliases b/test/integration/targets/lambda_policy/aliases
deleted file mode 100644
index a112c3d1bb..0000000000
--- a/test/integration/targets/lambda_policy/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group1
diff --git a/test/integration/targets/lambda_policy/defaults/main.yml b/test/integration/targets/lambda_policy/defaults/main.yml
deleted file mode 100644
index db22fd7b75..0000000000
--- a/test/integration/targets/lambda_policy/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# defaults file for aws_lambda test
-lambda_function_name: '{{resource_prefix}}-api-endpoint'
diff --git a/test/integration/targets/lambda_policy/files/mini_http_lambda.py b/test/integration/targets/lambda_policy/files/mini_http_lambda.py
deleted file mode 100644
index 5ac0bf5e8d..0000000000
--- a/test/integration/targets/lambda_policy/files/mini_http_lambda.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from __future__ import print_function
-import json
-
-
-def handler(event, context):
- """
- The handler function is the function which gets called each time
- the lambda is run.
- """
- # printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
- # the log entry.
- print("got event:\n" + json.dumps(event))
-
- # if the name parameter isn't present this can throw an exception
- # which will result in an amazon chosen failure from the lambda
- # which can be completely fine.
-
- name = event["pathParameters"]["greet_name"]
-
- return {"statusCode": 200,
- "body": 'hello: "' + name + '"',
- "headers": {}}
-
-
-def main():
- """
- This main function will normally never be called during normal
- lambda use. It is here for testing the lambda program only.
- """
- event = {"name": "james"}
- context = None
- print(handler(event, context))
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/integration/targets/lambda_policy/files/minimal_trust_policy.json b/test/integration/targets/lambda_policy/files/minimal_trust_policy.json
deleted file mode 100644
index fb84ae9de1..0000000000
--- a/test/integration/targets/lambda_policy/files/minimal_trust_policy.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "Service": "lambda.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/lambda_policy/meta/main.yml b/test/integration/targets/lambda_policy/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/lambda_policy/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/lambda_policy/tasks/main.yml b/test/integration/targets/lambda_policy/tasks/main.yml
deleted file mode 100644
index 5ea0f2012f..0000000000
--- a/test/integration/targets/lambda_policy/tasks/main.yml
+++ /dev/null
@@ -1,206 +0,0 @@
----
-#
-# Author: Michael De La Rue
-# based on ec2_key.yml + lambda.py
-
-- block:
-
- - name: set up AWS credentials
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_region: '{{ aws_region }}'
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- no_log: yes
-
- # ============================================================
- - name: test with no parameters
- lambda_policy:
- register: result
- ignore_errors: true
-
- - name: assert failure when called with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("missing required arguments: ")'
-
- # ============================================================
- - name: test with all required dummy parameters but no region
- lambda_policy:
- statement_id: dummy
- principal: api_fakeway
- action: fake:do_something_fake
- function_name: dummy_fake_function
- ignore_errors: true
- register: result
-
- - name: assert failure and appropriate message when called without region
- assert:
- that:
- - 'result.failed'
- - '"requires a region and none was found" in result.msg'
-
- # ============================================================
- - name: test exceptions generated by forcing bad ec2 url
- lambda_policy:
- function_name: "{{ lambda_function_name }}"
- state: present
- statement_id: api-gateway-invoke-lambdas
- action: lambda:InvokeFunction
- principal: apigateway.amazonaws.com
- source_arn: "arn:aws:execute-api:no-north-0:1234567:*/*"
- ec2_url: https://noexist.example.com
- ec2_region: 'no-north-0'
- ec2_access_key: 'iamnotreallyanaccesskey'
- ec2_secret_key: 'thisisabadsecretkey'
- security_token: 'andthisisabadsecuritytoken'
- register: result
- ignore_errors: true
-
- - name: assert lambda manages to respond as expected
- assert:
- that:
- - 'result is failed'
- - 'result.msg != "MODULE FAILURE"'
- - 'result.changed == False'
-
- # ============================================================
- # direct zip file upload
- - name: move lambda into place for archive module
- copy:
- src: "mini_http_lambda.py"
- dest: "{{output_dir}}/mini_http_lambda.py"
-
- - name: bundle lambda into a zip
- archive:
- format: zip
- path: "{{output_dir}}/mini_http_lambda.py"
- dest: "{{output_dir}}/mini_http_lambda.zip"
- register: zip_res
-
- # This should exist, but there's no expectation that the test user should be able to
- # create/update this role, merely validate that it's there.
- # Use ansible -m iam_role -a 'name=ansible_lambda_role
- # assume_role_policy_document={{ lookup("file", "test/integration/targets/lambda_policy/files/minimal_trust_policy.json", convert_data=False) }}
- # ' -vvv localhost
- # to create this through more privileged credentials before running this test suite.
- - name: create minimal lambda role
- iam_role:
- name: ansible_lambda_role
- assume_role_policy_document: "{{ lookup('file', 'minimal_trust_policy.json', convert_data=False) }}"
- create_instance_profile: no
- <<: *aws_connection_info
- register: iam_role
-
- - name: wait 10 seconds for role to become available
- pause:
- seconds: 10
- when: iam_role.changed
-
- - name: test state=present - upload the lambda
- lambda:
- name: "{{lambda_function_name}}"
- runtime: "python2.7"
- handler: "mini_http_lambda.handler"
- role: "ansible_lambda_role"
- zip_file: "{{zip_res.dest}}"
- <<: *aws_connection_info
- register: lambda_result
-
- - name: get the aws account ID for use in future commands
- aws_caller_info:
- <<: *aws_connection_info
- register: aws_caller_info
-
- - name: register lambda uri for use in template
- set_fact:
- mini_lambda_uri: "arn:aws:apigateway:{{ aws_region }}:lambda:path/2015-03-31/functions/arn:aws:lambda:{{ aws_region }}:{{ aws_caller_info.account }}:function:{{ lambda_result.configuration.function_name }}/invocations"
-
- - name: build API file
- template:
- src: endpoint-test-swagger-api.yml.j2
- dest: "{{output_dir}}/endpoint-test-swagger-api.yml.j2"
-
- - name: deploy new API
- aws_api_gateway:
- api_file: "{{output_dir}}/endpoint-test-swagger-api.yml.j2"
- stage: "lambdabased"
- <<: *aws_connection_info
- register: create_result
-
- - name: register api id for later
- set_fact:
- api_id: "{{ create_result.api_id }}"
-
- - name: check API fails with permissions failure
- uri:
- url: "https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester"
- register: unauth_uri_result
- ignore_errors: true
-
- - name: assert internal server error due to permissions
- assert:
- that:
- - unauth_uri_result is failed
- - 'unauth_uri_result.status == 500'
-
- - name: give api gateway execute permissions on lambda
- lambda_policy:
- function_name: "{{ lambda_function_name }}"
- state: present
- statement_id: api-gateway-invoke-lambdas
- action: lambda:InvokeFunction
- principal: apigateway.amazonaws.com
- source_arn: "arn:aws:execute-api:{{ aws_region }}:{{ aws_caller_info.account }}:*/*"
- <<: *aws_connection_info
-
- - name: try again but with ARN
- lambda_policy:
- function_name: "{{ lambda_result.configuration.function_arn }}"
- state: present
- statement_id: api-gateway-invoke-lambdas
- action: lambda:InvokeFunction
- principal: apigateway.amazonaws.com
- source_arn: "arn:aws:execute-api:{{ aws_region }}:{{ aws_caller_info.account }}:*/*"
- <<: *aws_connection_info
-
- - name: check API works with execute permissions
- uri:
- url: "https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester"
- register: uri_result
-
- - name: assert API works success
- assert:
- that:
- - 'uri_result'
-
-
- - name: deploy new API
- aws_api_gateway:
- api_file: "{{output_dir}}/endpoint-test-swagger-api.yml.j2"
- stage: "lambdabased"
- <<: *aws_connection_info
- register: create_result
- ignore_errors: true
-
-
- always:
-
- # ============================================================
- - name: destroy lambda for test cleanup if created
- lambda:
- name: "{{lambda_function_name}}"
- <<: *aws_connection_info
- state: absent
- register: result
- ignore_errors: yes
-
- - name: destroy API for test cleanup if created
- aws_api_gateway:
- state: absent
- api_id: '{{api_id}}'
- <<: *aws_connection_info
- register: destroy_result
- ignore_errors: yes
diff --git a/test/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2 b/test/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2
deleted file mode 100644
index d621884773..0000000000
--- a/test/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2
+++ /dev/null
@@ -1,39 +0,0 @@
----
-swagger: "2.0"
-info:
- version: "2017-05-11T12:14:59Z"
- title: "{{resource_prefix}}LambdaBased_API"
-host: "fakeexample.execute-api.us-east-1.amazonaws.com"
-basePath: "/lambdabased"
-schemes:
-- "https"
-paths:
- /mini/{greet_name}:
- get:
- produces:
- - "application/json"
- parameters:
- - name: "greet_name"
- in: "path"
- required: true
- type: "string"
- responses:
- 200:
- description: "200 response"
- schema:
- $ref: "#/definitions/Empty"
- x-amazon-apigateway-integration:
- responses:
- default:
- statusCode: "200"
- uri: "{{mini_lambda_uri}}"
- requestTemplates:
- application/json: "{\"statusCode\": 200}"
- passthroughBehavior: "when_no_match"
- httpMethod: "POST"
- contentHandling: "CONVERT_TO_TEXT"
- type: "aws_proxy"
-definitions:
- Empty:
- type: "object"
- title: "Empty Schema"
diff --git a/test/integration/targets/lightsail/aliases b/test/integration/targets/lightsail/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/lightsail/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/lightsail/defaults/main.yml b/test/integration/targets/lightsail/defaults/main.yml
deleted file mode 100644
index 46f5b34e01..0000000000
--- a/test/integration/targets/lightsail/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-instance_name: "{{ resource_prefix }}_instance"
-zone: "{{ aws_region }}a"
diff --git a/test/integration/targets/lightsail/tasks/main.yml b/test/integration/targets/lightsail/tasks/main.yml
deleted file mode 100644
index 91f13a8bab..0000000000
--- a/test/integration/targets/lightsail/tasks/main.yml
+++ /dev/null
@@ -1,122 +0,0 @@
----
-
-- module_defaults:
- group/aws:
- aws_access_key: '{{ aws_access_key | default(omit) }}'
- aws_secret_key: '{{ aws_secret_key | default(omit) }}'
- security_token: '{{ security_token | default(omit) }}'
- region: '{{ aws_region | default(omit) }}'
-
- block:
-
- # ==== Tests ===================================================
-
- - name: Create a new instance
- lightsail:
- name: "{{ instance_name }}"
- zone: "{{ zone }}"
- blueprint_id: amazon_linux
- bundle_id: nano_2_0
- wait: yes
- register: result
-
- - assert:
- that:
- - result.changed == True
- - "'instance' in result and result.instance.name == instance_name"
- - "result.instance.state.name == 'running'"
-
- - name: Make sure create is idempotent
- lightsail:
- name: "{{ instance_name }}"
- zone: "{{ zone }}"
- blueprint_id: amazon_linux
- bundle_id: nano_2_0
- register: result
-
- - assert:
- that:
- - result.changed == False
-
- - name: Start the running instance
- lightsail:
- name: "{{ instance_name }}"
- state: running
- register: result
-
- - assert:
- that:
- - result.changed == False
-
- - name: Stop the instance
- lightsail:
- name: "{{ instance_name }}"
- state: stopped
- wait: yes
- register: result
-
- - assert:
- that:
- - result.changed == True
- - "result.instance.state.name == 'stopped'"
-
- - name: Stop the stopped instance
- lightsail:
- name: "{{ instance_name }}"
- state: stopped
- register: result
-
- - assert:
- that:
- - result.changed == False
-
- - name: Start the instance
- lightsail:
- name: "{{ instance_name }}"
- state: running
- register: result
-
- - assert:
- that:
- - result.changed == True
- - "result.instance.state.name == 'running'"
-
- - name: Restart the instance
- lightsail:
- name: "{{ instance_name }}"
- state: restarted
- register: result
-
- - assert:
- that:
- - result.changed == True
-
- - name: Delete the instance
- lightsail:
- name: "{{ instance_name }}"
- state: absent
- register: result
-
- - assert:
- that:
- - result.changed == True
-
- - name: Make sure instance deletion is idempotent
- lightsail:
- name: "{{ instance_name }}"
- state: absent
- register: result
-
- - assert:
- that:
- - result.changed == False
-
- # ==== Cleanup ====================================================
-
- always:
-
- - name: Cleanup - delete instance
- lightsail:
- name: "{{ instance_name }}"
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/rds_instance/aliases b/test/integration/targets/rds_instance/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/rds_instance/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/rds_instance/defaults/main.yml b/test/integration/targets/rds_instance/defaults/main.yml
deleted file mode 100644
index a2d215ba8a..0000000000
--- a/test/integration/targets/rds_instance/defaults/main.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-instance_id: "{{ resource_prefix }}"
-modified_instance_id: "{{ resource_prefix }}-updated"
-username: test
-password: test12345678
-db_instance_class: db.t2.micro
-storage_encrypted_db_instance_class: db.t2.small
-modified_db_instance_class: db.t2.medium
-allocated_storage: 20
-modified_allocated_storage: 30
-
-# For aurora tests
-cluster_id: "{{ resource_prefix }}-cluster"
-aurora_db_instance_class: db.t2.medium
-
-# For oracle tests
-oracle_ee_db_instance_class: db.r3.xlarge
-processor_features:
- coreCount: 1
- threadsPerCore: 1
-modified_processor_features:
- coreCount: 2
- threadsPerCore: 2
diff --git a/test/integration/targets/rds_instance/tasks/credential_tests.yml b/test/integration/targets/rds_instance/tasks/credential_tests.yml
deleted file mode 100644
index 1aa1c3a23d..0000000000
--- a/test/integration/targets/rds_instance/tasks/credential_tests.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: test without credentials
- rds_instance:
- db_instance_identifier: test-rds-instance
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - result.failed
- - 'result.msg == "The rds_instance module requires a region and none was found in configuration, environment variables or module parameters"'
-
-- name: test without credentials
- rds_instance:
- db_instance_identifier: test-rds-instance
- region: us-east-1
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - result.failed
- - '"Unable to locate credentials" in result.msg'
-
-- name: test with invalid credentials
- rds_instance:
- db_instance_identifier: test-rds-instance
- region: us-east-1
- profile: doesnotexist
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - result.failed
- - 'result.msg == "The config profile (doesnotexist) could not be found"'
diff --git a/test/integration/targets/rds_instance/tasks/main.yml b/test/integration/targets/rds_instance/tasks/main.yml
deleted file mode 100644
index bb368c47d0..0000000000
--- a/test/integration/targets/rds_instance/tasks/main.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- block:
-
- - include: ./credential_tests.yml
- tags: credentials
- - include: ./test_states.yml
- tags: states
- - include: ./test_tags.yml
- tags: tags
- - include: ./test_modification.yml # TODO: test availability_zone and multi_az
- tags: modification
- - include: ./test_bad_options.yml
- tags: bad_options
- - include: ./test_processor_features.yml
- tags: processor_features
- - include: ./test_encryption.yml
- tags: encryption
- - include: ./test_final_snapshot.yml
- tags: final_snapshot
- - include: ./test_read_replica.yml
- tags: read_replica
- - include: ./test_vpc_security_groups.yml
- tags: vpc_security_groups
- - include: ./test_restore_instance.yml # TODO: snapshot, s3
- tags: restore
- - include: ./test_snapshot.yml
- tags: snapshot
- # TODO: uncomment after adding rds_cluster module
- #- include: ./test_aurora.yml
diff --git a/test/integration/targets/rds_instance/tasks/test_aurora.yml b/test/integration/targets/rds_instance/tasks/test_aurora.yml
deleted file mode 100644
index 14d28b248d..0000000000
--- a/test/integration/targets/rds_instance/tasks/test_aurora.yml
+++ /dev/null
@@ -1,144 +0,0 @@
----
- - block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- ignore_errors: yes
-
- - name: Create minimal aurora cluster in default VPC and default subnet group
- rds_cluster:
- state: present
- engine: aurora
- cluster_id: "{{ cluster_id }}"
- username: "{{ username }}"
- password: "{{ password }}"
- <<: *aws_connection_info
-
- - name: Create an Aurora instance
- rds_instance:
- id: "{{ instance_id }}"
- cluster_id: "{{ cluster_id }}"
- engine: aurora
- state: present
- db_instance_class: "{{ aurora_db_instance_class }}"
- tags:
- CreatedBy: rds_instance integration tests
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - "result.db_instance_identifier == '{{ instance_id }}'"
- - "result.tags | length == 1"
-
- - name: Modify tags
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- tags:
- Test: rds_instance
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - result.tags | length == 1
- - "result.tags.Test == 'rds_instance'"
-
- - name: Test idempotence
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
-
- - name: Attempt to modify password (a cluster-managed attribute)
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- password: "{{ password }}"
- force_update_password: True
- apply_immediately: True
- <<: *aws_connection_info
- register: result
- ignore_errors: yes
-
- - assert:
- that:
- - result.failed
- - "'Modify master user password for the DB Cluster using the ModifyDbCluster API' in result.msg"
- - "'Please see rds_cluster' in result.msg"
-
- - name: Modify aurora instance port (a cluster-managed attribute)
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- port: 1150
- <<: *aws_connection_info
- register: result
- ignore_errors: yes
-
- - assert:
- that:
- - not result.changed
- - "'Modify database endpoint port number for the DB Cluster using the ModifyDbCluster API' in result.msg"
- - "'Please see rds_cluster' in result.msg"
-
- - name: Modify Aurora instance identifier
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- purge_tags: False
- new_id: "{{ modified_instance_id }}"
- apply_immediately: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - "result.db_instance_identifier == '{{ modified_instance_id }}'"
-
- always:
-
- - name: Delete the instance
- rds_instance:
- id: "{{ item }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- loop:
- - "{{ instance_id }}"
- - "{{ modified_instance_id }}"
- ignore_errors: yes
-
- - name: Delete the cluster
- rds_cluster:
- cluster_id: "{{ cluster_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/rds_instance/tasks/test_bad_options.yml b/test/integration/targets/rds_instance/tasks/test_bad_options.yml
deleted file mode 100644
index 21de862d22..0000000000
--- a/test/integration/targets/rds_instance/tasks/test_bad_options.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
- - block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- ignore_errors: yes
-
- - name: Create a DB instance with an invalid engine
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- engine: thisisnotavalidengine
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- <<: *aws_connection_info
- register: result
- ignore_errors: True
-
- - assert:
- that:
- - result.failed
- - '"DB engine thisisnotavalidengine should be one of" in result.msg'
diff --git a/test/integration/targets/rds_instance/tasks/test_encryption.yml b/test/integration/targets/rds_instance/tasks/test_encryption.yml
deleted file mode 100644
index dc9a8d9646..0000000000
--- a/test/integration/targets/rds_instance/tasks/test_encryption.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
- - block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- ignore_errors: yes
-
- - name: Create a mariadb instance
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- engine: mariadb
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ storage_encrypted_db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- storage_encrypted: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - "result.db_instance_identifier == '{{ instance_id }}'"
- - result.kms_key_id
- - result.storage_encrypted == true
-
- always:
-
- - name: Delete DB instance
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
diff --git a/test/integration/targets/rds_instance/tasks/test_final_snapshot.yml b/test/integration/targets/rds_instance/tasks/test_final_snapshot.yml
deleted file mode 100644
index bbada4207c..0000000000
--- a/test/integration/targets/rds_instance/tasks/test_final_snapshot.yml
+++ /dev/null
@@ -1,75 +0,0 @@
----
- - block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- ignore_errors: yes
-
- - name: Create a mariadb instance
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- engine: mariadb
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- <<: *aws_connection_info
- register: result
-
- - name: Delete the DB instance
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- final_snapshot_identifier: "{{ instance_id }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - "result.final_snapshot.db_instance_identifier == '{{ instance_id }}'"
-
- - name: Check that snapshot exists
- rds_snapshot_info:
- db_snapshot_identifier: "{{ instance_id }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - "result.snapshots | length == 1"
- - "result.snapshots.0.engine == 'mariadb'"
-
- always:
- - name: Remove the snapshot
- rds_snapshot:
- db_snapshot_identifier: "{{ instance_id }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: Remove the DB instance
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/rds_instance/tasks/test_modification.yml b/test/integration/targets/rds_instance/tasks/test_modification.yml
deleted file mode 100644
index ff874447de..0000000000
--- a/test/integration/targets/rds_instance/tasks/test_modification.yml
+++ /dev/null
@@ -1,195 +0,0 @@
----
- - block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- ignore_errors: yes
-
- - name: Create a mariadb instance
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- engine: mariadb
- engine_version: "10.1.26"
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - "result.db_instance_identifier == '{{ instance_id }}'"
-
- - name: Modify the instance name without immediate application
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- new_id: "{{ modified_instance_id }}"
- apply_immediately: False
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - 'result.db_instance_identifier == "{{ instance_id }}"'
-
- - name: Immediately apply the pending update
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- new_id: "{{ modified_instance_id }}"
- apply_immediately: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - 'result.db_instance_identifier == "{{ modified_instance_id }}"'
-
- - name: Modify the instance immediately
- rds_instance:
- id: '{{ modified_instance_id }}'
- state: present
- new_id: '{{ instance_id }}'
- apply_immediately: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - 'result.db_instance_identifier == "{{ instance_id }}"'
-
- - name: Check mode - modify the password
- rds_instance:
- id: '{{ instance_id }}'
- state: present
- password: '{{ password }}'
- force_update_password: True
- apply_immediately: True
- <<: *aws_connection_info
- register: result
- check_mode: True
-
- - assert:
- that:
- - result.changed
-
- - name: Modify the password
- rds_instance:
- id: '{{ instance_id }}'
- state: present
- password: '{{ password }}'
- force_update_password: True
- apply_immediately: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- # TODO: test modifying db_subnet_group_name, db_security_groups, db_parameter_group_name, option_group_name,
- # monitoring_role_arn, monitoring_interval, domain, domain_iam_role_name, cloudwatch_logs_export_configuration
-
- - name: Modify several attributes
- rds_instance:
- id: '{{ instance_id }}'
- state: present
- allocated_storage: 30
- db_instance_class: "{{ modified_db_instance_class }}"
- backup_retention_period: 2
- preferred_backup_window: "05:00-06:00"
- preferred_maintenance_window: "mon:06:20-mon:06:50"
- engine_version: "10.2.21"
- allow_major_version_upgrade: true
- auto_minor_version_upgrade: false
- port: 1150
- max_allocated_storage: 100
- apply_immediately: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - '"allocated_storage" in result.pending_modified_values or result.allocated_storage == 30'
- - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage == 100'
- - '"port" in result.pending_modified_values or result.endpoint.port == 1150'
- - '"db_instance_class" in result.pending_modified_values or result.db_instance_class == "db.t2.medium"'
- - '"engine_version" in result.pending_modified_values or result.engine_version == "10.2.21"'
-
- - name: Idempotence modifying several pending attributes
- rds_instance:
- id: '{{ instance_id }}'
- state: present
- allocated_storage: 30
- db_instance_class: "{{ modified_db_instance_class }}"
- backup_retention_period: 2
- preferred_backup_window: "05:00-06:00"
- preferred_maintenance_window: "mon:06:20-mon:06:50"
- engine_version: "10.2.21"
- allow_major_version_upgrade: true
- auto_minor_version_upgrade: false
- port: 1150
- max_allocated_storage: 100
- <<: *aws_connection_info
- register: result
- retries: 30
- delay: 10
- until: result is not failed
-
- - assert:
- that:
- - not result.changed
- - '"allocated_storage" in result.pending_modified_values or result.allocated_storage == 30'
- - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage == 100'
- - '"port" in result.pending_modified_values or result.endpoint.port == 1150'
- - '"db_instance_class" in result.pending_modified_values or result.db_instance_class == "db.t2.medium"'
- - '"engine_version" in result.pending_modified_values or result.engine_version == "10.2.21"'
-
- - name: Delete the instance
- rds_instance:
- id: '{{ instance_id }}'
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - '"pending_modified_values" not in result'
-
- always:
-
- - name: Delete the instance
- rds_instance:
- id: '{{ item }}'
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- loop: ['{{ instance_id }}', '{{ modified_instance_id }}']
- ignore_errors: yes
diff --git a/test/integration/targets/rds_instance/tasks/test_processor_features.yml b/test/integration/targets/rds_instance/tasks/test_processor_features.yml
deleted file mode 100644
index 2fb3d8951c..0000000000
--- a/test/integration/targets/rds_instance/tasks/test_processor_features.yml
+++ /dev/null
@@ -1,126 +0,0 @@
----
- - block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- ignore_errors: yes
-
- - name: Create an oracle-ee DB instance
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- engine: oracle-ee
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ oracle_ee_db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- storage_encrypted: True
- processor_features: "{{ processor_features }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - 'result.processor_features.coreCount == "{{ processor_features.coreCount }}"'
- - 'result.processor_features.threadsPerCore == "{{ processor_features.threadsPerCore }}"'
-
- - name: Check mode - modify the processor features
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- engine: oracle-ee
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ oracle_ee_db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- storage_encrypted: True
- processor_features: "{{ modified_processor_features }}"
- apply_immediately: true
- <<: *aws_connection_info
- register: result
- check_mode: True
-
- - assert:
- that:
- - result.changed
-
- - name: Modify the processor features
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- engine: oracle-ee
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ oracle_ee_db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- storage_encrypted: True
- processor_features: "{{ modified_processor_features }}"
- apply_immediately: true
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - 'result.pending_modified_values.processor_features.coreCount == "{{ modified_processor_features.coreCount }}"'
- - 'result.pending_modified_values.processor_features.threadsPerCore == "{{ modified_processor_features.threadsPerCore }}"'
-
- - name: Check mode - use the default processor features
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- processor_features: {}
- apply_immediately: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- - name: Use the default processor features
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- processor_features: {}
- apply_immediately: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - 'result.pending_modified_values.processor_features.coreCount == "DEFAULT"'
- - 'result.pending_modified_values.processor_features.threadsPerCore == "DEFAULT"'
-
- always:
-
- - name: Delete the DB instance
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
diff --git a/test/integration/targets/rds_instance/tasks/test_read_replica.yml b/test/integration/targets/rds_instance/tasks/test_read_replica.yml
deleted file mode 100644
index 157fd10597..0000000000
--- a/test/integration/targets/rds_instance/tasks/test_read_replica.yml
+++ /dev/null
@@ -1,142 +0,0 @@
----
- - block:
-
- - name: set the two regions for the source DB and the replica
- set_fact:
- region_src: "{{ aws_region }}"
- region_dest: "{{ aws_region }}"
-
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- no_log: yes
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- region: "{{ region_src }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- ignore_errors: yes
-
- - name: Create a source DB instance
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- engine: mysql
- backup_retention_period: 1
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- region: "{{ region_src }}"
- <<: *aws_connection_info
- register: source_db
-
- - assert:
- that:
- - source_db.changed
- - "source_db.db_instance_identifier == '{{ instance_id }}'"
-
- - name: Create a read replica in a different region
- rds_instance:
- id: "{{ instance_id }}-replica"
- state: present
- source_db_instance_identifier: "{{ instance_id }}"
- engine: mysql
- username: "{{ username }}"
- password: "{{ password }}"
- read_replica: True
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- region: "{{ region_dest }}"
- <<: *aws_connection_info
- register: result
-
- - name: Test idempotence with a read replica
- rds_instance:
- id: "{{ instance_id }}-replica"
- state: present
- source_db_instance_identifier: "{{ instance_id }}"
- engine: mysql
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- region: "{{ region_dest }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
-
- - name: Test idempotence with read_replica=True
- rds_instance:
- id: "{{ instance_id }}-replica"
- state: present
- read_replica: True
- source_db_instance_identifier: "{{ instance_id }}"
- engine: mysql
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- region: "{{ region_dest }}"
- <<: *aws_connection_info
- register: result
-
- - name: Promote the read replica
- rds_instance:
- id: "{{ instance_id }}-replica"
- state: present
- read_replica: False
- region: "{{ region_dest }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- - name: Test idempotence
- rds_instance:
- id: "{{ instance_id }}-replica"
- state: present
- read_replica: False
- region: "{{ region_dest }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
-
- always:
-
- - name: Remove the DB instance
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- region: "{{ region_src }}"
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: Remove the DB replica
- rds_instance:
- id: "{{ instance_id }}-replica"
- state: absent
- skip_final_snapshot: True
- region: "{{ region_dest }}"
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/rds_instance/tasks/test_restore_instance.yml b/test/integration/targets/rds_instance/tasks/test_restore_instance.yml
deleted file mode 100644
index b40e487dd8..0000000000
--- a/test/integration/targets/rds_instance/tasks/test_restore_instance.yml
+++ /dev/null
@@ -1,95 +0,0 @@
----
- - block:
-
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- ignore_errors: yes
-
- - name: Create a source DB instance
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- engine: mysql
- backup_retention_period: 1
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- <<: *aws_connection_info
- register: source_db
-
- - assert:
- that:
- - source_db.changed
- - "source_db.db_instance_identifier == '{{ instance_id }}'"
-
- - name: Create a point in time DB instance
- rds_instance:
- id: "{{ instance_id }}-point-in-time"
- state: present
- source_db_instance_identifier: "{{ instance_id }}"
- creation_source: instance
- engine: mysql
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- use_latest_restorable_time: True
- <<: *aws_connection_info
- register: result
-
- - name: Test idempotence with a point in time replica
- rds_instance:
- id: "{{ instance_id }}-point-in-time"
- state: present
- source_db_instance_identifier: "{{ instance_id }}"
- creation_source: instance
- engine: mysql
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- restore_time: "{{ result.latest_restorable_time }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
-
- always:
-
- - name: Remove the DB instance
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- ignore_errors: yes
-
-
- - name: Remove the point in time restored DB
- rds_instance:
- id: "{{ instance_id }}-point-in-time"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/rds_instance/tasks/test_snapshot.yml b/test/integration/targets/rds_instance/tasks/test_snapshot.yml
deleted file mode 100644
index 7e88db4371..0000000000
--- a/test/integration/targets/rds_instance/tasks/test_snapshot.yml
+++ /dev/null
@@ -1,85 +0,0 @@
----
- - block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: Getting shared snapshots
- rds_snapshot_info:
- snapshot_type: "shared"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- - result.cluster_snapshots is defined
- - result.snapshots is defined
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- db_instance_identifier: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- ignore_errors: yes
-
- - name: Create a mariadb instance
- rds_instance:
- db_instance_identifier: "{{ instance_id }}"
- state: present
- engine: mariadb
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- tags:
- Name: "{{ instance_id }}"
- Created_by: Ansible rds_instance tests
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - "result.db_instance_identifier == '{{ instance_id }}'"
- - "result.tags | length == 2"
- - "result.tags.Name == '{{ instance_id }}'"
- - "result.tags.Created_by == 'Ansible rds_instance tests'"
-
- - name: Getting public snapshots
- rds_snapshot_info:
- db_instance_identifier: "{{ instance_id }}"
- snapshot_type: "public"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- - result.cluster_snapshots is not defined
- - result.snapshots is defined
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- db_instance_identifier: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- # TODO ideally we test with an actual shared snapshot - but we'd need a second account - making tests fairly complicated?
diff --git a/test/integration/targets/rds_instance/tasks/test_states.yml b/test/integration/targets/rds_instance/tasks/test_states.yml
deleted file mode 100644
index f55ffe70ce..0000000000
--- a/test/integration/targets/rds_instance/tasks/test_states.yml
+++ /dev/null
@@ -1,277 +0,0 @@
----
- - block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- ignore_errors: yes
-
- - name: Check Mode - Create a mariadb instance
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- engine: mariadb
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - assert:
- that:
- - result.changed
-
- - name: Create a mariadb instance
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- engine: mariadb
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - "result.db_instance_identifier == '{{ instance_id }}'"
-
- - name: Idempotence
- rds_instance:
- id: '{{ instance_id }}'
- state: present
- engine: mariadb
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- - result.db_instance_identifier
-
- - name: Idempotence with minimal options
- rds_instance:
- id: '{{ instance_id }}'
- state: present
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- - result.db_instance_identifier
-
- - name: Check Mode - stop the instance
- rds_instance:
- id: '{{ instance_id }}'
- state: stopped
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - assert:
- that:
- - result.changed
-
- - name: Stop the instance
- rds_instance:
- id: '{{ instance_id }}'
- state: stopped
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- - name: Check Mode - idempotence
- rds_instance:
- id: '{{ instance_id }}'
- state: stopped
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - assert:
- that:
- - not result.changed
-
- - name: Idempotence
- rds_instance:
- id: '{{ instance_id }}'
- state: stopped
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
-
- - name: Check mode - reboot a stopped instance
- rds_instance:
- id: '{{ instance_id }}'
- state: rebooted
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - assert:
- that:
- - result.changed
-
- - name: Reboot a stopped instance
- rds_instance:
- id: '{{ instance_id }}'
- state: rebooted
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- - name: Check Mode - start the instance
- rds_instance:
- id: '{{ instance_id }}'
- state: started
- <<: *aws_connection_info
- register: result
- check_mode: yes
-
- - assert:
- that:
- - not result.changed
-
- - name: Stop the instance
- rds_instance:
- id: '{{ instance_id }}'
- state: stopped
- <<: *aws_connection_info
-
- - name: Start the instance
- rds_instance:
- id: '{{ instance_id }}'
- state: started
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- - name: take a snapshot
- rds_snapshot:
- db_instance_identifier: '{{ instance_id }}'
- db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
- state: present
- wait: yes
- <<: *aws_connection_info
-
- - name: take a snapshot - idempotence
- rds_snapshot:
- db_instance_identifier: '{{ instance_id }}'
- db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
- state: present
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
-
- - name: check snapshot is ok
- rds_snapshot_info:
- db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - (result.snapshots | length) == 1
-
- - name: remove a snapshot without wait
- rds_snapshot:
- db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
- state: absent
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- - name: remove a snapshot without wait - idempotence
- rds_snapshot:
- db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
- state: absent
- wait: yes
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
-
- - name: remove a snapshot with wait - idempotence
- rds_snapshot:
- db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
- state: absent
- wait: yes
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
-
- - name: check snapshot is removed
- rds_snapshot_info:
- db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.snapshots
-
- always:
-
- - name: remove snapshot
- rds_snapshot:
- db_snapshot_identifier: '{{ resource_prefix }}-test-snapshot'
- state: absent
- wait: yes
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: Remove DB instance
- rds_instance:
- id: '{{ instance_id }}'
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/rds_instance/tasks/test_tags.yml b/test/integration/targets/rds_instance/tasks/test_tags.yml
deleted file mode 100644
index f5003ad7a9..0000000000
--- a/test/integration/targets/rds_instance/tasks/test_tags.yml
+++ /dev/null
@@ -1,265 +0,0 @@
----
- - block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- db_instance_identifier: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- ignore_errors: yes
-
- - name: Create a mariadb instance
- rds_instance:
- db_instance_identifier: "{{ instance_id }}"
- state: present
- engine: mariadb
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- tags:
- Name: "{{ instance_id }}"
- Created_by: Ansible rds_instance tests
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - "result.db_instance_identifier == '{{ instance_id }}'"
- - "result.tags | length == 2"
- - "result.tags.Name == '{{ instance_id }}'"
- - "result.tags.Created_by == 'Ansible rds_instance tests'"
-
- - name: Test idempotence omitting tags
- rds_instance:
- db_instance_identifier: "{{ instance_id }}"
- state: present
- engine: mariadb
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- - "result.tags | length == 2"
-
- - name: Test tags are not purged if purge_tags is False
- rds_instance:
- db_instance_identifier: "{{ instance_id }}"
- state: present
- engine: mariadb
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- tags: {}
- purge_tags: False
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- - "result.tags | length == 2"
-
- - name: Add a tag and remove a tag
- rds_instance:
- db_instance_identifier: "{{ instance_id }}"
- state: present
- tags:
- Name: "{{ instance_id }}-new"
- Created_by: Ansible rds_instance tests
- purge_tags: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - "result.tags | length == 2"
- - "result.tags.Name == '{{ instance_id }}-new'"
-
- - name: Remove all tags
- rds_instance:
- db_instance_identifier: "{{ instance_id }}"
- state: present
- engine: mariadb
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- tags: {}
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - not result.tags
-
- - name: snapshot instance without tags
- rds_snapshot:
- db_instance_identifier: "{{ instance_id }}"
- db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
- state: present
- wait: yes
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - not result.tags
-
- - name: add tags to snapshot
- rds_snapshot:
- db_instance_identifier: "{{ instance_id }}"
- db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
- state: present
- tags:
- one: hello
- two: world
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - result.tags | length == 2
-
- - name: add tags to snapshot - idempotence
- rds_snapshot:
- db_instance_identifier: "{{ instance_id }}"
- db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
- state: present
- tags:
- one: hello
- two: world
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- - result.tags | length == 2
-
- - name: add tag to snapshot using purge_tags False
- rds_snapshot:
- db_instance_identifier: "{{ instance_id }}"
- db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
- state: present
- tags:
- one: hello
- three: another
- purge_tags: False
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - result.tags | length == 3
-
- - name: rerun tags but not setting purge_tags
- rds_snapshot:
- db_instance_identifier: "{{ instance_id }}"
- db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
- state: present
- tags:
- one: hello
- three: another
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - result.tags | length == 2
-
- - name: rerun tags but not setting purge_tags - idempotence
- rds_snapshot:
- db_instance_identifier: "{{ instance_id }}"
- db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
- state: present
- tags:
- one: hello
- three: another
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- - result.tags | length == 2
-
- - name: remove snapshot
- rds_snapshot:
- db_instance_identifier: "{{ instance_id }}"
- db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
- state: absent
- wait: yes
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- - name: create snapshot with tags
- rds_snapshot:
- db_instance_identifier: "{{ instance_id }}"
- db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
- state: present
- tags:
- one: hello
- three: another
- purge_tags: yes
- wait: yes
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - result.tags | length == 2
-
- always:
-
- - name: tidy up snapshot
- rds_snapshot:
- db_instance_identifier: "{{ instance_id }}"
- db_snapshot_identifier: "{{ resource_prefix }}-test-tags"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- db_instance_identifier: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
diff --git a/test/integration/targets/rds_instance/tasks/test_vpc_security_groups.yml b/test/integration/targets/rds_instance/tasks/test_vpc_security_groups.yml
deleted file mode 100644
index 4da38069b3..0000000000
--- a/test/integration/targets/rds_instance/tasks/test_vpc_security_groups.yml
+++ /dev/null
@@ -1,166 +0,0 @@
----
- - block:
- - name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- - name: create a VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: present
- cidr_block: "10.122.122.128/26"
- tags:
- Name: "{{ resource_prefix }}-vpc"
- Description: "created by rds_instance integration tests"
- <<: *aws_connection_info
- register: vpc_result
-
- - name: create subnets
- ec2_vpc_subnet:
- cidr: "{{ item.cidr }}"
- az: "{{ item.zone }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- Name: "{{ resource_prefix }}-subnet"
- Description: "created by rds_instance integration tests"
- state: present
- <<: *aws_connection_info
- register: subnets_result
- loop:
- - {"cidr": "10.122.122.128/28", "zone": "{{ aws_region }}a"}
- - {"cidr": "10.122.122.144/28", "zone": "{{ aws_region }}b"}
- - {"cidr": "10.122.122.160/28", "zone": "{{ aws_region }}c"}
- - {"cidr": "10.122.122.176/28", "zone": "{{ aws_region }}d"}
-
- - name: Create security groups
- ec2_group:
- name: "{{ item }}"
- description: "created by rds_instance integration tests"
- state: present
- <<: *aws_connection_info
- register: sgs_result
- loop:
- - "{{ resource_prefix }}-sg-1"
- - "{{ resource_prefix }}-sg-2"
- - "{{ resource_prefix }}-sg-3"
-
- - debug: var=sgs_result
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - not result.changed
- ignore_errors: yes
-
- - name: Create a DB instance in the VPC with two security groups
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- engine: mariadb
- username: "{{ username }}"
- password: "{{ password }}"
- db_instance_class: "{{ db_instance_class }}"
- allocated_storage: "{{ allocated_storage }}"
- vpc_security_group_ids:
- - "{{ sgs_result.results.0.group_id }}"
- - "{{ sgs_result.results.1.group_id }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
- - "result.db_instance_identifier == '{{ instance_id }}'"
-
- - name: Add a new security group
- rds_instance:
- id: "{{ instance_id }}"
- state: present
- vpc_security_group_ids:
- - "{{ sgs_result.results.2.group_id }}"
- <<: *aws_connection_info
- register: result
-
- - assert:
- that:
- - result.changed
-
- always:
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
- ignore_errors: yes
-
- - name: Remove security groups
- ec2_group:
- name: "{{ item }}"
- description: "created by rds_instance integration tests"
- state: absent
- <<: *aws_connection_info
- register: sgs_result
- loop:
- - "{{ resource_prefix }}-sg-1"
- - "{{ resource_prefix }}-sg-2"
- - "{{ resource_prefix }}-sg-3"
-
- - name: remove subnets
- ec2_vpc_subnet:
- cidr: "{{ item.cidr }}"
- az: "{{ item.zone }}"
- vpc_id: "{{ vpc_result.vpc.id }}"
- tags:
- Name: "{{ resource_prefix }}-subnet"
- Description: "created by rds_instance integration tests"
- state: absent
- <<: *aws_connection_info
- register: subnets
- ignore_errors: yes
- retries: 30
- until: subnets is not failed
- delay: 10
- loop:
- - {"cidr": "10.122.122.128/28", "zone": "{{ aws_region }}a"}
- - {"cidr": "10.122.122.144/28", "zone": "{{ aws_region }}b"}
- - {"cidr": "10.122.122.160/28", "zone": "{{ aws_region }}c"}
- - {"cidr": "10.122.122.176/28", "zone": "{{ aws_region }}d"}
-
- - name: create a VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- state: absent
- cidr_block: "10.122.122.128/26"
- tags:
- Name: "{{ resource_prefix }}-vpc"
- Description: "created by rds_instance integration tests"
- <<: *aws_connection_info
- register: vpc_result
- ignore_errors: yes
- retries: 30
- until: vpc_result is not failed
- delay: 10
-
- - name: Ensure the resource doesn't exist
- rds_instance:
- id: "{{ instance_id }}"
- state: absent
- skip_final_snapshot: True
- <<: *aws_connection_info
- register: result
- ignore_errors: yes
diff --git a/test/integration/targets/rds_param_group/aliases b/test/integration/targets/rds_param_group/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/rds_param_group/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/rds_param_group/defaults/main.yml b/test/integration/targets/rds_param_group/defaults/main.yml
deleted file mode 100644
index 8f9de71fbb..0000000000
--- a/test/integration/targets/rds_param_group/defaults/main.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-rds_param_group:
- name: "{{ resource_prefix}}rds-param-group"
- description: "Test group for rds_param_group Ansible module"
- engine: postgres9.6
-
-rds_long_param_list:
- application_name: Test
- logging_collector: on
- log_directory: /var/log/postgresql
- log_filename: postgresql.log.%Y-%m-%d-%H
- log_file_mode: 0600
- event_source: RDS
- log_min_messages: INFO
- log_min_duration_statement: 500
- log_rotation_age: 60
- debug_print_parse: on
- debug_print_rewritten: on
- debug_print_plan: on
- debug_pretty_print: on
- log_checkpoints: on
- log_connections: on
- log_disconnections: on
- log_duration: on
- log_error_verbosity: VERBOSE
- log_lock_waits: on
- log_temp_files: 10K
- log_timezone: UTC
- log_statement: 'all'
- log_replication_commands: on
diff --git a/test/integration/targets/rds_param_group/meta/main.yml b/test/integration/targets/rds_param_group/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/rds_param_group/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/rds_param_group/tasks/main.yml b/test/integration/targets/rds_param_group/tasks/main.yml
deleted file mode 100644
index 9af2776b3e..0000000000
--- a/test/integration/targets/rds_param_group/tasks/main.yml
+++ /dev/null
@@ -1,321 +0,0 @@
----
-# A Note about ec2 environment variable name preference:
-# - EC2_URL -> AWS_URL
-# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
-# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
-# - EC2_REGION -> AWS_REGION
-#
-# TODO - name: test 'region' parameter
-# TODO - name: test 'state=absent' parameter for existing key
-# TODO - name: test 'state=absent' parameter for missing key
-# TODO - name: test 'validate_certs' parameter
-
-# ============================================================
-# - include: ../../setup_ec2/tasks/common.yml module_name=rds_param_group
-
-- block:
-
- # ============================================================
- - name: test empty parameter group
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- description: "{{ rds_param_group.description }}"
- state: present
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: assert rds parameter group changed
- assert:
- that:
- - 'result.changed'
- - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name | lower }}'"
- - 'result.tags == {}'
-
- # ============================================================
- - name: test empty parameter group with no arguments changes nothing
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- description: "{{ rds_param_group.description }}"
- state: present
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: assert no change when running empty parameter group a second time
- assert:
- that:
- - 'not result.changed'
-
- # ============================================================
- - name: test adding numeric tag
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- description: "{{ rds_param_group.description }}"
- state: present
- tags:
- Environment: test
- Test: 123
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: adding numeric tag just silently converts
- assert:
- that:
- - 'result.changed'
- - 'result.tags.Test == "123"'
-
- # ============================================================
- - name: test tagging existing group
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- description: "{{ rds_param_group.description }}"
- state: present
- tags:
- Environment: test
- Test: "123"
- NewTag: "hello"
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: assert tagging existing group changes it and adds tags
- assert:
- that:
- - 'result.changed'
- - 'result.tags.NewTag == "hello"'
-
- # ============================================================
- - name: test repeating tagging existing group
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- description: "{{ rds_param_group.description }}"
- state: present
- tags:
- Environment: test
- Test: "123"
- NewTag: "hello"
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: assert tagging existing group changes it and adds tags
- assert:
- that:
- - 'not result.changed'
- - 'result.tags.Test == "123"'
-
- # ============================================================
- - name: test deleting tags from existing group
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- description: "{{ rds_param_group.description }}"
- state: present
- tags:
- Environment: test
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- purge_tags: yes
- register: result
- ignore_errors: true
-
- - name: assert removing tags from existing group changes it
- assert:
- that:
- - 'result.changed'
- - 'result.tags.Environment == "test"'
- - '"NewTag" not in result.tags'
-
- # ============================================================
- - name: test state=absent with engine defined (expect changed=true)
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- state: absent
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: assert state=absent with engine defined (expect changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test creating group with parameters
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- description: "{{ rds_param_group.description }}"
- state: present
- params:
- log_directory: /var/log/postgresql
- log_statement: 'all'
- log_duration: on
- this_param_does_not_exist: oh_no
- tags:
- Environment: test
- Test: "123"
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: assert creating a new group with parameter changes it
- assert:
- that:
- - 'result.changed'
- - 'result.tags.Test == "123"'
- - 'result.errors|length == 2'
-
- # ============================================================
- - name: test repeating group with parameters
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- description: "{{ rds_param_group.description }}"
- state: present
- params:
- log_directory: /var/log/postgresql
- log_statement: 'all'
- log_duration: on
- this_param_does_not_exist: oh_no
- tags:
- Environment: test
- Test: "123"
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: assert repeating group with parameters does not change it
- assert:
- that:
- - 'not result.changed'
- - 'result.tags.Test == "123"'
- - 'result.errors|length == 2'
-
- # ============================================================
- - name: test state=absent with engine defined (expect changed=true)
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- state: absent
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: assert state=absent with engine defined (expect changed=true)
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test repeating state=absent (expect changed=false)
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- state: absent
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: assert repeating state=absent (expect changed=false)
- assert:
- that:
- - 'not result.changed'
-
- # ============================================================
- - name: test creating group with more than 20 parameters
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- description: "{{ rds_param_group.description }}"
- params: "{{ rds_long_param_list }}"
- state: present
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: assert creating a new group with lots of parameter changes it
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
- - name: test creating group with more than 20 parameters
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- engine: "{{ rds_param_group.engine }}"
- description: "{{ rds_param_group.description }}"
- params: "{{ rds_long_param_list }}"
- region: "{{ ec2_region }}"
- state: present
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: assert repeating a group with lots of parameter does not change it
- assert:
- that:
- - 'not result.changed'
-
- always:
- # ============================================================
- - name: test state=absent (expect changed=false)
- rds_param_group:
- name: "{{ rds_param_group.name }}"
- state: absent
- region: "{{ ec2_region }}"
- ec2_access_key: '{{ aws_access_key }}'
- ec2_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token }}'
- register: result
- ignore_errors: true
-
- - name: assert state=absent (expect changed=false)
- assert:
- that:
- - 'result.changed'
diff --git a/test/integration/targets/rds_subnet_group/aliases b/test/integration/targets/rds_subnet_group/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/rds_subnet_group/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/rds_subnet_group/defaults/main.yml b/test/integration/targets/rds_subnet_group/defaults/main.yml
deleted file mode 100644
index 07e0fe93f8..0000000000
--- a/test/integration/targets/rds_subnet_group/defaults/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16'
-subnet_a: '10.{{ 256 | random(seed=resource_prefix) }}.10.0/24'
-subnet_b: '10.{{ 256 | random(seed=resource_prefix) }}.11.0/24'
-subnet_c: '10.{{ 256 | random(seed=resource_prefix) }}.12.0/24'
-subnet_d: '10.{{ 256 | random(seed=resource_prefix) }}.13.0/24'
-
-group_description: 'Created by integration test : {{ resource_prefix }}'
-group_description_changed: 'Created by integration test : {{ resource_prefix }} - changed'
diff --git a/test/integration/targets/rds_subnet_group/meta/main.yml b/test/integration/targets/rds_subnet_group/meta/main.yml
deleted file mode 100644
index 9d91be1705..0000000000
--- a/test/integration/targets/rds_subnet_group/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
-- prepare_tests
-- setup_ec2
diff --git a/test/integration/targets/rds_subnet_group/tasks/main.yml b/test/integration/targets/rds_subnet_group/tasks/main.yml
deleted file mode 100644
index 44184e302d..0000000000
--- a/test/integration/targets/rds_subnet_group/tasks/main.yml
+++ /dev/null
@@ -1,113 +0,0 @@
----
-# Tests for rds_subnet_group
-#
-# Note: (From Amazon's documentation)
-# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.modify_db_subnet_group
-# DB subnet groups must contain at least one subnet in at least two AZs in the
-# AWS Region.
-
-- module_defaults:
- group/aws:
- aws_access_key: '{{ aws_access_key }}'
- aws_secret_key: '{{ aws_secret_key }}'
- security_token: '{{ security_token | default(omit) }}'
- region: '{{ aws_region }}'
- block:
-
- # ============================================================
-
- - name: 'Fetch AZ availability'
- aws_az_info:
- register: az_info
-
- - name: 'Assert that we have multiple AZs available to us'
- assert:
- that: az_info.availability_zones | length >= 2
-
- - name: 'Pick AZs'
- set_fact:
- az_one: '{{ az_info.availability_zones[0].zone_name }}'
- az_two: '{{ az_info.availability_zones[1].zone_name }}'
-
- # ============================================================
-
- - name: 'Create a VPC'
- ec2_vpc_net:
- state: present
- cidr_block: '{{ vpc_cidr }}'
- name: '{{ resource_prefix }}'
- register: vpc
-
- - name: 'Create subnets'
- ec2_vpc_subnet:
- state: present
- cidr: '{{ item.cidr }}'
- az: '{{ item.az }}'
- vpc_id: '{{ vpc.vpc.id }}'
- tags:
- Name: '{{ item.name }}'
- with_items:
- - cidr: '{{ subnet_a }}'
- az: '{{ az_one }}'
- name: '{{ resource_prefix }}-subnet-a'
- - cidr: '{{ subnet_b }}'
- az: '{{ az_two }}'
- name: '{{ resource_prefix }}-subnet-b'
- - cidr: '{{ subnet_c }}'
- az: '{{ az_one }}'
- name: '{{ resource_prefix }}-subnet-c'
- - cidr: '{{ subnet_d }}'
- az: '{{ az_two }}'
- name: '{{ resource_prefix }}-subnet-d'
- register: subnets
-
- - set_fact:
- subnet_ids: '{{ subnets | json_query("results[].subnet.id") | list }}'
-
- # ============================================================
-
- - include_tasks: 'params.yml'
-
- - include_tasks: 'tests.yml'
-
- # ============================================================
-
- always:
- - name: 'Remove subnet group'
- rds_subnet_group:
- state: absent
- name: '{{ resource_prefix }}'
- ignore_errors: yes
-
- - name: 'Remove subnets'
- ec2_vpc_subnet:
- state: absent
- cidr: '{{ item.cidr }}'
- vpc_id: '{{ vpc.vpc.id }}'
- with_items:
- - cidr: '{{ subnet_a }}'
- name: '{{ resource_prefix }}-subnet-a'
- - cidr: '{{ subnet_b }}'
- name: '{{ resource_prefix }}-subnet-b'
- - cidr: '{{ subnet_c }}'
- name: '{{ resource_prefix }}-subnet-c'
- - cidr: '{{ subnet_d }}'
- name: '{{ resource_prefix }}-subnet-d'
- ignore_errors: yes
- register: removed_subnets
- until: removed_subnets is succeeded
- retries: 5
- delay: 5
-
- - name: 'Remove the VPC'
- ec2_vpc_net:
- state: absent
- cidr_block: '{{ vpc_cidr }}'
- name: '{{ resource_prefix }}'
- ignore_errors: yes
- register: removed_vpc
- until: removed_vpc is success
- retries: 5
- delay: 5
-
- # ============================================================
diff --git a/test/integration/targets/rds_subnet_group/tasks/params.yml b/test/integration/targets/rds_subnet_group/tasks/params.yml
deleted file mode 100644
index 74da381ff7..0000000000
--- a/test/integration/targets/rds_subnet_group/tasks/params.yml
+++ /dev/null
@@ -1,62 +0,0 @@
----
-# Try creating without a description
-- name: 'Create a subnet group (no description)'
- rds_subnet_group:
- state: present
- name: '{{ resource_prefix }}'
- subnets:
- - '{{ subnet_ids[0] }}'
- - '{{ subnet_ids[1] }}'
- ignore_errors: yes
- register: create_missing_param
-- assert:
- that:
- - create_missing_param is failed
- - "'description' in create_missing_param.msg"
- - "\"required for state='present'\" in create_missing_param.msg"
-
-# Try creating without subnets
-- name: 'Create a subnet group (no subnets)'
- rds_subnet_group:
- state: present
- name: '{{ resource_prefix }}'
- description: '{{ group_description }}'
- ignore_errors: yes
- register: create_missing_param
-- assert:
- that:
- - create_missing_param is failed
- - "'subnets' in create_missing_param.msg"
- - "\"required for state='present'\" in create_missing_param.msg"
-
-# XXX This feels like a bad pattern
-# Try deleting with subnets
-- name: 'Delete a subnet group (with subnets)'
- rds_subnet_group:
- state: absent
- name: '{{ resource_prefix }}'
- subnets:
- - '{{ subnet_ids[0] }}'
- - '{{ subnet_ids[1] }}'
- ignore_errors: yes
- register: delete_extra_param
-- assert:
- that:
- - delete_extra_param is failed
- - "'subnets' in delete_extra_param.msg"
- - "\"not allowed for state='absent'\" in delete_extra_param.msg"
-
-# XXX This feels like a bad pattern
-# Try deleting with a description
-- name: 'Create a subnet group (with description)'
- rds_subnet_group:
- state: absent
- name: '{{ resource_prefix }}'
- description: '{{ group_description }}'
- ignore_errors: yes
- register: delete_extra_param
-- assert:
- that:
- - delete_extra_param is failed
- - "'description' in delete_extra_param.msg"
- - "\"not allowed for state='absent'\" in delete_extra_param.msg"
diff --git a/test/integration/targets/rds_subnet_group/tasks/tests.yml b/test/integration/targets/rds_subnet_group/tasks/tests.yml
deleted file mode 100644
index 0b4e3d1b52..0000000000
--- a/test/integration/targets/rds_subnet_group/tasks/tests.yml
+++ /dev/null
@@ -1,221 +0,0 @@
----
-# XXX rds_subnet_group doesn't support check_mode yet
-
-# ============================================================
-# Basic creation
-- name: 'Create a subnet group'
- rds_subnet_group:
- state: present
- name: '{{ resource_prefix }}'
- description: '{{ group_description }}'
- subnets:
- - '{{ subnet_ids[0] }}'
- - '{{ subnet_ids[1] }}'
- register: result
-
-- assert:
- that:
- - result is changed
- - result.subnet_group.description == group_description
- - result.subnet_group.name == resource_prefix
- - result.subnet_group.vpc_id == vpc.vpc.id
- - result.subnet_group.subnet_ids | length == 2
- - subnet_ids[0] in result.subnet_group.subnet_ids
- - subnet_ids[1] in result.subnet_group.subnet_ids
-
-- name: 'Create a subnet group (idempotency)'
- rds_subnet_group:
- state: present
- name: '{{ resource_prefix }}'
- description: '{{ group_description }}'
- subnets:
- - '{{ subnet_ids[0] }}'
- - '{{ subnet_ids[1] }}'
- register: result
-
-- assert:
- that:
- - result is not changed
- - result.subnet_group.description == group_description
- - result.subnet_group.name == resource_prefix
- - result.subnet_group.vpc_id == vpc.vpc.id
- - result.subnet_group.subnet_ids | length == 2
- - subnet_ids[0] in result.subnet_group.subnet_ids
- - subnet_ids[1] in result.subnet_group.subnet_ids
-
-# ============================================================
-# Update description
-
-- name: 'Update subnet group description'
- rds_subnet_group:
- state: present
- name: '{{ resource_prefix }}'
- description: '{{ group_description_changed }}'
- subnets:
- - '{{ subnet_ids[0] }}'
- - '{{ subnet_ids[1] }}'
- register: result
-
-- assert:
- that:
- - result is changed
- - result.subnet_group.description == group_description_changed
- - result.subnet_group.name == resource_prefix
- - result.subnet_group.vpc_id == vpc.vpc.id
- - result.subnet_group.subnet_ids | length == 2
- - subnet_ids[0] in result.subnet_group.subnet_ids
- - subnet_ids[1] in result.subnet_group.subnet_ids
-
-- name: 'Update subnet group description (idempotency)'
- rds_subnet_group:
- state: present
- name: '{{ resource_prefix }}'
- description: '{{ group_description_changed }}'
- subnets:
- - '{{ subnet_ids[0] }}'
- - '{{ subnet_ids[1] }}'
- register: result
-
-- assert:
- that:
- - result is not changed
- - result.subnet_group.description == group_description_changed
- - result.subnet_group.name == resource_prefix
- - result.subnet_group.vpc_id == vpc.vpc.id
- - result.subnet_group.subnet_ids | length == 2
- - subnet_ids[0] in result.subnet_group.subnet_ids
- - subnet_ids[1] in result.subnet_group.subnet_ids
-
-- name: 'Restore subnet group description'
- rds_subnet_group:
- state: present
- name: '{{ resource_prefix }}'
- description: '{{ group_description }}'
- subnets:
- - '{{ subnet_ids[0] }}'
- - '{{ subnet_ids[1] }}'
- register: result
-
-- assert:
- that:
- - result is changed
- - result.subnet_group.description == group_description
- - result.subnet_group.name == resource_prefix
- - result.subnet_group.vpc_id == vpc.vpc.id
- - result.subnet_group.subnet_ids | length == 2
- - subnet_ids[0] in result.subnet_group.subnet_ids
- - subnet_ids[1] in result.subnet_group.subnet_ids
-
-# ============================================================
-# Update subnets
-
-- name: 'Update subnet group list'
- rds_subnet_group:
- state: present
- name: '{{ resource_prefix }}'
- description: '{{ group_description }}'
- subnets:
- - '{{ subnet_ids[2] }}'
- - '{{ subnet_ids[3] }}'
- register: result
-
-- assert:
- that:
- - result is changed
- - result.subnet_group.description == group_description
- - result.subnet_group.name == resource_prefix
- - result.subnet_group.vpc_id == vpc.vpc.id
- - result.subnet_group.subnet_ids | length == 2
- - subnet_ids[2] in result.subnet_group.subnet_ids
- - subnet_ids[3] in result.subnet_group.subnet_ids
-
-- name: 'Update subnet group list (idempotency)'
- rds_subnet_group:
- state: present
- name: '{{ resource_prefix }}'
- description: '{{ group_description }}'
- subnets:
- - '{{ subnet_ids[2] }}'
- - '{{ subnet_ids[3] }}'
- register: result
-
-- assert:
- that:
- - result is not changed
- - result.subnet_group.description == group_description
- - result.subnet_group.name == resource_prefix
- - result.subnet_group.vpc_id == vpc.vpc.id
- - result.subnet_group.subnet_ids | length == 2
- - subnet_ids[2] in result.subnet_group.subnet_ids
- - subnet_ids[3] in result.subnet_group.subnet_ids
-
-- name: 'Add more subnets subnet group list'
- rds_subnet_group:
- state: present
- name: '{{ resource_prefix }}'
- description: '{{ group_description }}'
- subnets:
- - '{{ subnet_ids[0] }}'
- - '{{ subnet_ids[1] }}'
- - '{{ subnet_ids[2] }}'
- - '{{ subnet_ids[3] }}'
- register: result
-
-- assert:
- that:
- - result is changed
- - result.subnet_group.description == group_description
- - result.subnet_group.name == resource_prefix
- - result.subnet_group.vpc_id == vpc.vpc.id
- - result.subnet_group.subnet_ids | length == 4
- - subnet_ids[0] in result.subnet_group.subnet_ids
- - subnet_ids[1] in result.subnet_group.subnet_ids
- - subnet_ids[2] in result.subnet_group.subnet_ids
- - subnet_ids[3] in result.subnet_group.subnet_ids
-
-- name: 'Add more members to subnet group list (idempotency)'
- rds_subnet_group:
- state: present
- name: '{{ resource_prefix }}'
- description: '{{ group_description }}'
- subnets:
- - '{{ subnet_ids[0] }}'
- - '{{ subnet_ids[1] }}'
- - '{{ subnet_ids[2] }}'
- - '{{ subnet_ids[3] }}'
- register: result
-
-- assert:
- that:
- - result is not changed
- - result.subnet_group.description == group_description
- - result.subnet_group.name == resource_prefix
- - result.subnet_group.vpc_id == vpc.vpc.id
- - result.subnet_group.subnet_ids | length == 4
- - subnet_ids[0] in result.subnet_group.subnet_ids
- - subnet_ids[1] in result.subnet_group.subnet_ids
- - subnet_ids[2] in result.subnet_group.subnet_ids
- - subnet_ids[3] in result.subnet_group.subnet_ids
-
-# ============================================================
-# Deletion
-
-- name: 'Delete a subnet group'
- rds_subnet_group:
- state: absent
- name: '{{ resource_prefix }}'
- register: result
-
-- assert:
- that:
- - result is changed
-
-- name: 'Delete a subnet group (idempotency)'
- rds_subnet_group:
- state: absent
- name: '{{ resource_prefix }}'
- register: result
-
-- assert:
- that:
- - result is not changed
diff --git a/test/integration/targets/redshift/aliases b/test/integration/targets/redshift/aliases
deleted file mode 100644
index a112c3d1bb..0000000000
--- a/test/integration/targets/redshift/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group1
diff --git a/test/integration/targets/redshift/defaults/main.yml b/test/integration/targets/redshift/defaults/main.yml
deleted file mode 100644
index f1cd2cb12a..0000000000
--- a/test/integration/targets/redshift/defaults/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# defaults file for test_redshift
-redshift_cluster_name: '{{ resource_prefix }}'
-reshift_master_password: "th1s_is_A_test"
-redshift_master_username: "master_user"
-node_type: "dc2.large"
diff --git a/test/integration/targets/redshift/meta/main.yml b/test/integration/targets/redshift/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/redshift/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/redshift/tasks/main.yml b/test/integration/targets/redshift/tasks/main.yml
deleted file mode 100644
index 591ebc8c8d..0000000000
--- a/test/integration/targets/redshift/tasks/main.yml
+++ /dev/null
@@ -1,276 +0,0 @@
----
-# A Note about ec2 environment variable name preference:
-# - EC2_URL -> AWS_URL
-# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
-# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
-# - EC2_REGION -> AWS_REGION
-#
-
-- block:
-
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- no_log: yes
-
- # ============================================================
- - name: test failure with no parameters
- redshift:
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
-
- - name: assert failure with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "missing required arguments: command, identifier"'
-
- # ============================================================
- - name: test failure with only identifier
- redshift:
- identifier: '{{ redshift_cluster_name }}'
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: assert failure with only identifier
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "missing required arguments: command"'
-
- # ============================================================
- - name: test create with no identifier
- redshift:
- command: create
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: assert failure with no identifier
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "missing required arguments: identifier"'
-
- # ============================================================
- - name: test create with missing node_type
- redshift:
- command: create
- identifier: "{{ redshift_cluster_name }}"
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: assert failure with missing node_type
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "command is create but all of the following are missing: node_type, username, password"'
-
- # ============================================================
-
- - name: test create with missing username
- redshift:
- command: create
- identifier: "{{ redshift_cluster_name }}"
- username: "{{ redshift_master_username }}"
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: assert create failure with missing username
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "command is create but all of the following are missing: node_type, password"'
-
- # ============================================================
-
- - name: test create with missing username
- redshift:
- command: create
- identifier: "{{ redshift_cluster_name }}"
- password: "{{ reshift_master_password }}"
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: assert create failure with missing username
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "command is create but all of the following are missing: node_type, username"'
-
- # ============================================================
-
- - name: test create with default params
- redshift:
- command: create
- identifier: "{{ redshift_cluster_name }}"
- username: "{{ redshift_master_username }}"
- password: "{{ reshift_master_password }}"
- node_type: "{{ node_type }}"
- wait: yes
- wait_timeout: 1000
- <<: *aws_connection_info
- register: result
- - debug:
- msg: "{{ result }}"
- verbosity: 1
- - name: assert create success
- assert:
- that:
- - 'result.changed'
- - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
-
- # ============================================================
-
- - name: test create again with default params
- redshift:
- command: create
- identifier: "{{ redshift_cluster_name }}"
- username: "{{ redshift_master_username }}"
- password: "{{ reshift_master_password }}"
- node_type: "{{ node_type }}"
- <<: *aws_connection_info
- register: result
-
- - name: assert no change gets made to the existing cluster
- assert:
- that:
- - 'not result.changed'
- - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
- # ============================================================
-
- - name: test modify cluster
- redshift:
- command: modify
- identifier: "{{ redshift_cluster_name }}"
- new_cluster_identifier: "{{ redshift_cluster_name }}-modified"
- enhanced_vpc_routing: True
- wait: yes
- wait_timeout: 1000
- <<: *aws_connection_info
- register: result
-
- - name: assert cluster was modified
- assert:
- that:
- - 'result.changed'
- - 'result.cluster.identifier == "{{ redshift_cluster_name }}-modified"'
- - 'result.cluster.enhanced_vpc_routing == True'
-
-
- # ============================================================
- - name: test delete with no cluster identifier
- redshift:
- command: delete
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: assert failure with no identifier
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "missing required arguments: identifier"'
-
- # ============================================================
- - name: test delete with no snapshot id
- redshift:
- command: delete
- identifier: "{{ redshift_cluster_name }}"
- <<: *aws_connection_info
- register: result
- ignore_errors: true
-
- - name: assert failure for no snapshot identifier
- assert:
- that:
- - 'result.failed'
- - 'result.msg == "Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False"'
-
-
- # ============================================================
- - name: test successful delete
- redshift:
- command: delete
- identifier: "{{ redshift_cluster_name }}-modified"
- skip_final_cluster_snapshot: true
- wait: yes
- wait_timeout: 1200
- <<: *aws_connection_info
- register: result
-
- - name: assert delete
- assert:
- that:
- - 'result.changed'
-
- # ============================================================
-
- - name: test create multi-node cluster with custom db-name
- redshift:
- command: create
- identifier: "{{ redshift_cluster_name }}"
- username: "{{ redshift_master_username }}"
- password: "{{ reshift_master_password }}"
- node_type: "{{ node_type }}"
- cluster_type: multi-node
- number_of_nodes: 3
- wait: yes
- db_name: "integration_test"
- wait_timeout: 1800
- <<: *aws_connection_info
- register: result
-
-
- - name: assert create
- assert:
- that:
- - 'result.changed'
- - 'result.cluster.identifier == "{{ redshift_cluster_name }}"'
- - 'result.cluster.db_name == "integration_test"'
-
- # ============================================================
-
- - name: test successful delete of multi-node cluster
- redshift:
- command: delete
- identifier: "{{ redshift_cluster_name }}"
- skip_final_cluster_snapshot: true
- wait: yes
- wait_timeout: 1200
- <<: *aws_connection_info
- register: result
-
- - name: assert delete
- assert:
- that:
- - 'result.changed'
-
- always:
-
- - name: Remove cluster if tests failed
- redshift:
- command: delete
- identifier: "{{ item }}"
- skip_final_cluster_snapshot: true
- wait: yes
- wait_timeout: 1200
- <<: *aws_connection_info
- register: cleanup
- ignore_errors: yes
- retries: 10
- delay: 10
- until: cleanup is success
- loop:
- - "{{ redshift_cluster_name }}"
- - "{{ redshift_cluster_name }}-modified"
diff --git a/test/integration/targets/route53/aliases b/test/integration/targets/route53/aliases
deleted file mode 100644
index f6cc7ad00c..0000000000
--- a/test/integration/targets/route53/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-route53_info
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/route53/defaults/main.yml b/test/integration/targets/route53/defaults/main.yml
deleted file mode 100644
index cc0d3b78d0..0000000000
--- a/test/integration/targets/route53/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for route53 tests
diff --git a/test/integration/targets/route53/tasks/main.yml b/test/integration/targets/route53/tasks/main.yml
deleted file mode 100644
index de332a7ba0..0000000000
--- a/test/integration/targets/route53/tasks/main.yml
+++ /dev/null
@@ -1,252 +0,0 @@
----
-# tasks file for Route53 integration tests
-
-- set_fact:
- zone_one: '{{ resource_prefix | replace("-", "") }}.one.fakeansible.com.'
- zone_two: '{{ resource_prefix | replace("-", "") }}.two.fakeansible.com.'
-- debug: msg='Set zones {{ zone_one }} and {{ zone_two }}'
-
-- name: Test basics (new zone, A and AAAA records)
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- route53:
- region: null
- block:
- - route53_zone:
- zone: '{{ zone_one }}'
- comment: Created in Ansible test {{ resource_prefix }}
- register: z1
-
- - assert:
- that:
- - z1 is success
- - z1 is changed
- - "z1.comment == 'Created in Ansible test {{ resource_prefix }}'"
-
- - name: Get zone details
- route53_info:
- query: hosted_zone
- hosted_zone_id: '{{ z1.zone_id }}'
- hosted_zone_method: details
- register: hosted_zones
-
- - name: Assert newly created hosted zone only has NS and SOA records
- assert:
- that:
- - hosted_zones.HostedZone.ResourceRecordSetCount == 2
-
- - name: Create A record using zone fqdn
- route53:
- state: present
- zone: '{{ zone_one }}'
- record: 'qdn_test.{{ zone_one }}'
- type: A
- value: 1.2.3.4
- register: qdn
- - assert:
- that:
- - qdn is not failed
- - qdn is changed
-
- - name: Create same A record using zone non-qualified domain
- route53:
- state: present
- zone: '{{ zone_one[:-1] }}'
- record: 'qdn_test.{{ zone_one[:-1] }}'
- type: A
- value: 1.2.3.4
- register: non_qdn
- - assert:
- that:
- - non_qdn is not failed
- - non_qdn is not changed
-
- - name: Create A record using zone ID
- route53:
- state: present
- hosted_zone_id: '{{ z1.zone_id }}'
- record: 'zid_test.{{ zone_one }}'
- type: A
- value: 1.2.3.4
- register: zid
- - assert:
- that:
- - zid is not failed
- - zid is changed
-
- - name: Create a multi-value A record with values in different order
- route53:
- state: present
- zone: '{{ zone_one }}'
- record: 'order_test.{{ zone_one }}'
- type: A
- value:
- - 4.5.6.7
- - 1.2.3.4
- register: mv_a_record
- - assert:
- that:
- - mv_a_record is not failed
- - mv_a_record is changed
-
- - name: Create same multi-value A record with values in different order
- route53:
- state: present
- zone: '{{ zone_one }}'
- record: 'order_test.{{ zone_one }}'
- type: A
- value:
- - 4.5.6.7
- - 1.2.3.4
- register: mv_a_record
- - assert:
- that:
- - mv_a_record is not failed
- - mv_a_record is not changed
-
- - name: get Route53 A record information
- route53_info:
- type: A
- query: record_sets
- hosted_zone_id: '{{ z1.zone_id }}'
- start_record_name: 'order_test.{{ zone_one }}'
- max_items: 50
- register: records
- - assert:
- that:
- - records.ResourceRecordSets|length == 3
- - records.ResourceRecordSets[0].ResourceRecords|length == 2
- - records.ResourceRecordSets[0].ResourceRecords[0].Value == "4.5.6.7"
- - records.ResourceRecordSets[0].ResourceRecords[1].Value == "1.2.3.4"
-
- - name: Remove a member from multi-value A record with values in different order
- route53:
- state: present
- zone: '{{ zone_one }}'
- record: 'order_test.{{ zone_one }}'
- type: A
- value:
- - 4.5.6.7
- register: del_a_record
- ignore_errors: true
- - name: This should fail, because `overwrite` is false
- assert:
- that:
- - del_a_record is failed
-
- - name: Remove a member from multi-value A record with values in different order
- route53:
- state: present
- zone: '{{ zone_one }}'
- record: 'order_test.{{ zone_one }}'
- overwrite: true
- type: A
- value:
- - 4.5.6.7
- register: del_a_record
- ignore_errors: true
- - name: This should not fail, because `overwrite` is true
- assert:
- that:
- - del_a_record is not failed
- - del_a_record is changed
-
- - name: get Route53 zone A record information
- route53_info:
- type: A
- query: record_sets
- hosted_zone_id: '{{ z1.zone_id }}'
- start_record_name: 'order_test.{{ zone_one }}'
- max_items: 50
- register: records
- - assert:
- that:
- - records.ResourceRecordSets|length == 3
- - records.ResourceRecordSets[0].ResourceRecords|length == 1
- - records.ResourceRecordSets[0].ResourceRecords[0].Value == "4.5.6.7"
-
- - name: Create a LetsEncrypt CAA record
- route53:
- state: present
- zone: '{{ zone_one }}'
- record: '{{ zone_one }}'
- type: CAA
- value:
- - 0 issue "letsencrypt.org;"
- - 0 issuewild "letsencrypt.org;"
- overwrite: true
- register: caa
- - assert:
- that:
- - caa is not failed
- - caa is changed
-
- - name: Re-create the same LetsEncrypt CAA record
- route53:
- state: present
- zone: '{{ zone_one }}'
- record: '{{ zone_one }}'
- type: CAA
- value:
- - 0 issue "letsencrypt.org;"
- - 0 issuewild "letsencrypt.org;"
- overwrite: true
- register: caa
- - assert:
- that:
- - caa is not failed
- - caa is not changed
-
- - name: Re-create the same LetsEncrypt CAA record in opposite-order
- route53:
- state: present
- zone: '{{ zone_one }}'
- record: '{{ zone_one }}'
- type: CAA
- value:
- - 0 issuewild "letsencrypt.org;"
- - 0 issue "letsencrypt.org;"
- overwrite: true
- register: caa
- - name: This should not be changed, as CAA records are not order sensitive
- assert:
- that:
- - caa is not failed
- - caa is not changed
-
-
- always:
- - route53_info:
- query: record_sets
- hosted_zone_id: '{{ z1.zone_id }}'
- register: z1_records
- - debug: var=z1_records
- - name: Loop over A/AAAA/CNAME records and delete them
- route53:
- state: absent
- zone: '{{ zone_one }}'
- record: '{{ item.Name }}'
- type: '{{ item.Type }}'
- value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}'
- loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", "CNAME", "CAA"]) | list }}'
- - name: Delete test zone one '{{ zone_one }}'
- route53_zone:
- state: absent
- zone: '{{ zone_one }}'
- register: delete_one
- ignore_errors: yes
- retries: 10
- until: delete_one is not failed
- - name: Delete test zone two '{{ zone_two }}'
- route53_zone:
- state: absent
- zone: '{{ zone_two }}'
- register: delete_two
- ignore_errors: yes
- retries: 10
- until: delete_two is not failed
- when: false
diff --git a/test/integration/targets/route53/vars/main.yml b/test/integration/targets/route53/vars/main.yml
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/route53/vars/main.yml
+++ /dev/null
diff --git a/test/integration/targets/route53_zone/aliases b/test/integration/targets/route53_zone/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/route53_zone/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/route53_zone/tasks/main.yml b/test/integration/targets/route53_zone/tasks/main.yml
deleted file mode 100644
index 132d58c5d2..0000000000
--- a/test/integration/targets/route53_zone/tasks/main.yml
+++ /dev/null
@@ -1,393 +0,0 @@
----
-- block:
-
- # ============================================================
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: true
-
- - name: Create VPC for use in testing
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- cidr_block: 10.22.32.0/23
- tags:
- Name: Ansible ec2_instance Testing VPC
- tenancy: default
- <<: *aws_connection_info
- register: testing_vpc
-
- # ============================================================
- - name: Create a public zone
- route53_zone:
- zone: "{{ resource_prefix }}.public"
- comment: original comment
- state: present
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output.changed
- - output.comment == 'original comment'
- - output.name == '{{ resource_prefix }}.public.'
- - not output.private_zone
-
- # ============================================================
- - name: Create a public zone (CHECK MODE)
- route53_zone:
- zone: "{{ resource_prefix }}.check.public"
- comment: original comment
- state: present
- <<: *aws_connection_info
- register: output
- check_mode: yes
-
- - assert:
- that:
- - output.changed
- - output.comment == 'original comment'
- - output.name == '{{ resource_prefix }}.check.public.'
- - not output.private_zone
-
- # ============================================================
- - name: Do an idemptotent update of a public zone
- route53_zone:
- zone: "{{ resource_prefix }}.public"
- comment: original comment
- state: present
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - not output.changed
- - output.comment == 'original comment'
- - output.name == '{{ resource_prefix }}.public.'
- - not output.private_zone
-
- - name: Do an idemptotent update of a public zone (CHECK MODE)
- route53_zone:
- zone: "{{ resource_prefix }}.public"
- comment: original comment
- state: present
- <<: *aws_connection_info
- register: output
- check_mode: yes
-
- - assert:
- that:
- - not output.changed
- - output.comment == 'original comment'
- - output.name == '{{ resource_prefix }}.public.'
- - not output.private_zone
-
- # ============================================================
- - name: Update comment of a public zone
- route53_zone:
- zone: "{{ resource_prefix }}.public"
- comment: updated comment
- state: present
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output.changed
- - output.result.comment == "updated comment"
-
- - name: Update comment of a public zone (CHECK MODE)
- route53_zone:
- zone: "{{ resource_prefix }}.public"
- comment: updated comment for check
- state: present
- <<: *aws_connection_info
- register: output
- check_mode: yes
-
- - assert:
- that:
- - output.changed
- - output.result.comment == "updated comment for check"
-
- # ============================================================
- - name: Delete public zone (CHECK MODE)
- route53_zone:
- zone: "{{ resource_prefix }}.public"
- state: absent
- <<: *aws_connection_info
- register: output
- check_mode: yes
-
- - assert:
- that:
- - output.changed
- - "'Successfully deleted' in output.result"
-
- - name: Delete public zone
- route53_zone:
- zone: "{{ resource_prefix }}.public"
- state: absent
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output.changed
- - "'Successfully deleted' in output.result"
-
- # ============================================================
- - name: Create a private zone (CHECK MODE)
- route53_zone:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- vpc_region: "{{ aws_region }}"
- zone: "{{ resource_prefix }}.private"
- comment: original comment
- state: present
- <<: *aws_connection_info
- register: output
- check_mode: yes
-
- - assert:
- that:
- - output.changed
-
- - name: Create a private zone
- route53_zone:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- vpc_region: "{{ aws_region }}"
- zone: "{{ resource_prefix }}.private"
- comment: original comment
- state: present
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output.changed
- # ============================================================
- - name: Idemptotent update a private zone
- route53_zone:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- vpc_region: "{{ aws_region }}"
- zone: "{{ resource_prefix }}.private"
- comment: original comment
- state: present
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - not output.changed
- - "'There is already a private hosted zone in the same region with the same VPC' in output.msg"
-
- - name: Idemptotent update a private zone (CHECK MODE)
- route53_zone:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- vpc_region: "{{ aws_region }}"
- zone: "{{ resource_prefix }}.private"
- comment: original comment
- state: present
- <<: *aws_connection_info
- register: output
- check_mode: yes
-
- - assert:
- that:
- - not output.changed
- - "'There is already a private hosted zone in the same region with the same VPC' in output.msg"
-
- # ============================================================
- - name: Update private zone comment
- route53_zone:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- vpc_region: "{{ aws_region }}"
- zone: "{{ resource_prefix }}.private"
- comment: updated_comment
- state: present
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output.changed
- - output.result.comment == "updated_comment"
-
- - name: Update private zone comment (CHECK MODE)
- route53_zone:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- vpc_region: "{{ aws_region }}"
- zone: "{{ resource_prefix }}.private"
- comment: updated_comment check
- state: present
- <<: *aws_connection_info
- register: output
- check_mode: yes
-
- - assert:
- that:
- - output.changed
- - output.result.comment == "updated_comment check"
-
- # ============================================================
- - name: Try to delete private zone without setting vpc_id and vpc_region
- route53_zone:
- zone: "{{ resource_prefix }}.private"
- state: absent
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - not output.changed
- - "output.result == 'No zone to delete.'"
-
- - name: Try to delete private zone without setting vpc_id and vpc_region (CHECK MODE)
- route53_zone:
- zone: "{{ resource_prefix }}.private"
- state: absent
- <<: *aws_connection_info
- register: output
- check_mode: yes
-
- - assert:
- that:
- - not output.changed
- - "output.result == 'No zone to delete.'"
-
- # ============================================================
- - name: Try to delete a public zone that does not exists
- route53_zone:
- zone: "{{ resource_prefix }}.publicfake"
- comment: original comment
- state: absent
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - not output.changed
- - "output.result == 'No zone to delete.'"
-
- - name: Try to delete a public zone that does not exists (CHECK MODE)
- route53_zone:
- zone: "{{ resource_prefix }}.publicfake"
- comment: original comment
- state: absent
- <<: *aws_connection_info
- register: output
- check_mode: yes
-
- - assert:
- that:
- - not output.changed
- - "output.result == 'No zone to delete.'"
-
- # ============================================================
- - name: Delete private zone (CHECK MODE)
- route53_zone:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- vpc_region: "{{ aws_region }}"
- zone: "{{ resource_prefix }}.private"
- state: absent
- <<: *aws_connection_info
- register: output
- check_mode: yes
-
- - assert:
- that:
- - output.changed
- - "'Successfully deleted' in output.result"
-
- - name: Delete private zone
- route53_zone:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- vpc_region: "{{ aws_region }}"
- zone: "{{ resource_prefix }}.private"
- state: absent
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output.changed
- - "'Successfully deleted' in output.result"
-
- # ============================================================
- - name: Create a public zone
- route53_zone:
- zone: "{{ resource_prefix }}.public2"
- comment: this is an example
- state: present
- <<: *aws_connection_info
- register: new_zone
-
- # Delete zone using its id
- - name: Delete zone using attribute hosted_zone_id (CHECK MODE)
- route53_zone:
- zone: "{{ resource_prefix }}.public2"
- hosted_zone_id: "{{new_zone.zone_id}}"
- state: absent
- <<: *aws_connection_info
- register: output
- check_mode: yes
-
- - assert:
- that:
- - output.changed
- - "'Successfully deleted' in output.result"
-
- - name: Delete zone using attribute hosted_zone_id
- route53_zone:
- zone: "{{ resource_prefix }}.public2"
- hosted_zone_id: "{{new_zone.zone_id}}"
- state: absent
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output.changed
- - "'Successfully deleted' in output.result"
-
- # ============================================================
- always:
- - name: Ensure public zone is deleted
- route53_zone:
- zone: "{{ item }}"
- state: absent
- <<: *aws_connection_info
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
- with_items:
- - "{{ resource_prefix }}.public"
- - "{{ resource_prefix }}.public2"
-
- - name: Ensure private zone is deleted
- route53_zone:
- vpc_id: "{{ testing_vpc.vpc.id }}"
- vpc_region: "{{ aws_region }}"
- zone: "{{ resource_prefix }}.private"
- state: absent
- <<: *aws_connection_info
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
-
- - name: remove the VPC
- ec2_vpc_net:
- name: "{{ resource_prefix }}-vpc"
- cidr_block: 10.22.32.0/23
- state: absent
- <<: *aws_connection_info
- register: removed
- until: removed is not failed
- ignore_errors: yes
- retries: 10
diff --git a/test/integration/targets/s3_bucket_notification/aliases b/test/integration/targets/s3_bucket_notification/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/s3_bucket_notification/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/s3_bucket_notification/defaults/main.yml b/test/integration/targets/s3_bucket_notification/defaults/main.yml
deleted file mode 100644
index d227210344..0000000000
--- a/test/integration/targets/s3_bucket_notification/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# defaults file for aws_lambda test
-lambda_function_name: '{{resource_prefix}}'
diff --git a/test/integration/targets/s3_bucket_notification/files/mini_lambda.py b/test/integration/targets/s3_bucket_notification/files/mini_lambda.py
deleted file mode 100644
index 0ba9e0d300..0000000000
--- a/test/integration/targets/s3_bucket_notification/files/mini_lambda.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import json
-
-
-def lambda_handler(event, context):
- return {
- 'statusCode': 200,
- 'body': json.dumps('Hello from Lambda!')
- }
diff --git a/test/integration/targets/s3_bucket_notification/meta/main.yml b/test/integration/targets/s3_bucket_notification/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/s3_bucket_notification/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/s3_bucket_notification/tasks/main.yml b/test/integration/targets/s3_bucket_notification/tasks/main.yml
deleted file mode 100644
index 873c80d184..0000000000
--- a/test/integration/targets/s3_bucket_notification/tasks/main.yml
+++ /dev/null
@@ -1,335 +0,0 @@
----
-# ============================================================
-- name: set up aws connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: yes
-# ============================================================
-- name: test add s3 bucket notification
- block:
- - name: move lambda into place for archive module
- copy:
- src: "mini_lambda.py"
- dest: "{{output_dir}}/mini_lambda.py"
-
- - name: bundle lambda into a zip
- archive:
- format: zip
- path: "{{output_dir}}/mini_lambda.py"
- dest: "{{output_dir}}/mini_lambda.zip"
- register: function_res
-
- - name: register bucket
- s3_bucket:
- name: "{{resource_prefix}}-bucket"
- state: present
- <<: *aws_connection_info
- register: bucket_info
-
- - name: register lambda
- lambda:
- name: "{{resource_prefix}}-lambda"
- state: present
- role: "ansible_lambda_role"
- runtime: "python3.7"
- zip_file: "{{function_res.dest}}"
- handler: "lambda_function.lambda_handler"
- memory_size: "128"
- timeout: "30"
- <<: *aws_connection_info
- register: lambda_info
-
- - name: register notification without invoke permissions
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"]
- prefix: images/
- suffix: .jpg
- <<: *aws_connection_info
- register: result
- ignore_errors: true
- - name: assert nice message returned
- assert:
- that:
- - result is failed
- - result.msg != 'MODULE FAILURE'
-
- - name: Add invocation permission of Lambda function on AWS
- lambda_policy:
- function_name: "{{ lambda_info.configuration.function_arn }}"
- statement_id: allow_lambda_invoke
- action: lambda:InvokeFunction
- principal: "s3.amazonaws.com"
- source_arn: "arn:aws:s3:::{{bucket_info.name}}"
- <<: *aws_connection_info
-
- - name: register s3 bucket notification
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"]
- prefix: images/
- suffix: .jpg
- <<: *aws_connection_info
- register: result
- - name: assert result.changed == True
- assert:
- that:
- - result.changed == True
-
- # ============================================================
- - name: test check_mode without change
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"]
- prefix: images/
- suffix: .jpg
- <<: *aws_connection_info
- register: result
- check_mode: yes
- - name: assert result.changed == False
- assert:
- that:
- - result.changed == False
-
- - name: test check_mode change events
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectCreated:*"]
- prefix: images/
- suffix: .jpg
- <<: *aws_connection_info
- register: result
- check_mode: yes
- - name: assert result.changed == True
- assert:
- that:
- - result.changed == True
-
- - name: test that check_mode didn't change events
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"]
- prefix: images/
- suffix: .jpg
- <<: *aws_connection_info
- register: result
- - name: assert result.changed == False
- assert:
- that:
- - result.changed == False
-
- # ============================================================
- - name: test mutually exclusive parameters
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectCreated:Post"]
- prefix: photos/
- suffix: .gif
- lambda_version: 0
- lambda_alias: 0
- <<: *aws_connection_info
- register: result
- ignore_errors: true
- - name: assert task failed
- assert:
- that:
- - result is failed
- - "result.msg == 'parameters are mutually exclusive: lambda_alias|lambda_version'"
-
- # ============================================================
- # Test configuration changes
- - name: test configuration change on suffix
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"]
- prefix: images/
- suffix: .gif
- <<: *aws_connection_info
- register: result
- - name: assert result.changed == True
- assert:
- that:
- - result.changed == True
-
- - name: test configuration change on prefix
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"]
- prefix: photos/
- suffix: .gif
- <<: *aws_connection_info
- register: result
- - name: assert result.changed == True
- assert:
- that:
- - result.changed == True
-
- - name: test configuration change on new events added
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*", "s3:ObjectRestore:Post"]
- prefix: photos/
- suffix: .gif
- <<: *aws_connection_info
- register: result
- - name: assert result.changed == True
- assert:
- that:
- - result.changed == True
-
- - name: test configuration change on events removed
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectCreated:Post"]
- prefix: photos/
- suffix: .gif
- <<: *aws_connection_info
- register: result
- - name: assert result.changed == True
- assert:
- that:
- - result.changed == True
-
- # ============================================================
- # Test idempotency of CRUD
-
- - name: change events
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*", "s3:ObjectRestore:Post"]
- prefix: photos/
- suffix: .gif
- <<: *aws_connection_info
- register: result
-
- - name: test that event order does not matter
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectRestore:Post", "s3:ObjectRemoved:*", "s3:ObjectCreated:*"]
- prefix: photos/
- suffix: .gif
- <<: *aws_connection_info
- register: result
- - name: assert result.changed == False
- assert:
- that:
- - result.changed == False
-
- - name: test that configuration is the same as previous task
- s3_bucket_notification:
- state: present
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- lambda_function_arn: "{{ lambda_info.configuration.function_arn }}"
- events: ["s3:ObjectCreated:*", "s3:ObjectRemoved:*", "s3:ObjectRestore:Post"]
- prefix: photos/
- suffix: .gif
- <<: *aws_connection_info
- register: result
- - name: assert result.changed == False
- assert:
- that:
- - result.changed == False
-
- - name: test remove notification
- s3_bucket_notification:
- state: absent
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- <<: *aws_connection_info
- register: result
- - name: assert result.changed == True
- assert:
- that:
- - result.changed == True
-
- - name: test that events is already removed
- s3_bucket_notification:
- state: absent
- event_name: "{{resource_prefix}}-on_file_add_or_remove"
- bucket_name: "{{resource_prefix}}-bucket"
- <<: *aws_connection_info
- register: result
- - name: assert result.changed == False
- assert:
- that:
- - result.changed == False
-
- always:
- - name: clean-up bucket
- s3_bucket:
- name: "{{resource_prefix}}-bucket"
- state: absent
- <<: *aws_connection_info
-
- - name: clean-up lambda
- lambda:
- name: "{{resource_prefix}}-lambda"
- state: absent
- <<: *aws_connection_info
-# ============================================================
--
-- block:
- # ============================================================
- - name: test with no parameters except state absent
- s3_bucket_notification:
- state=absent
- register: result
- ignore_errors: true
- - name: assert failure when called with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("missing required arguments: event_name, bucket_name")'
-
- # ============================================================
- - name: test abesnt
- s3_bucket_notification:
- state=absent
- register: result
- ignore_errors: true
- - name: assert failure when called with no parameters
- assert:
- that:
- - 'result.failed'
- - 'result.msg.startswith("missing required arguments: event_name, bucket_name")' \ No newline at end of file
diff --git a/test/integration/targets/s3_lifecycle/aliases b/test/integration/targets/s3_lifecycle/aliases
deleted file mode 100644
index 0cc87f1a38..0000000000
--- a/test/integration/targets/s3_lifecycle/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/aws
-shippable/aws/group1
-disabled
diff --git a/test/integration/targets/s3_lifecycle/tasks/main.yml b/test/integration/targets/s3_lifecycle/tasks/main.yml
deleted file mode 100644
index f6dc2ab386..0000000000
--- a/test/integration/targets/s3_lifecycle/tasks/main.yml
+++ /dev/null
@@ -1,435 +0,0 @@
----
-
-- block:
-
- # ============================================================
- - name: set connection information for all tasks
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: true
-
- # ============================================================
- - name: Create simple s3_bucket
- s3_bucket:
- name: "{{ resource_prefix }}-testbucket-ansible"
- state: present
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output.changed
- - output.name == '{{ resource_prefix }}-testbucket-ansible'
- - not output.requester_pays
- # ============================================================
- - name: Create a lifecycle policy
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- expiration_days: 300
- prefix: ''
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is changed
- # ============================================================
- - name: Create a lifecycle policy (idempotency)
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- expiration_days: 300
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
- # ============================================================
- - name: Create a second lifecycle policy
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- transition_days: 30
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is changed
- # ============================================================
- - name: Create a second lifecycle policy (idempotency)
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- transition_days: 30
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
- # ============================================================
- - name: Disable the second lifecycle policy
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- status: disabled
- transition_days: 30
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is changed
- # ============================================================
- - name: Disable the second lifecycle policy (idempotency)
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- status: disabled
- transition_days: 30
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
- # ============================================================
- - name: Re-enable the second lifecycle policy
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- status: enabled
- transition_days: 300
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is changed
- # ============================================================
- - name: Re-enable the second lifecycle policy (idempotency)
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- status: enabled
- transition_days: 300
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
- # ============================================================
- - name: Delete the second lifecycle policy
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- state: absent
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is changed
- # ============================================================
- - name: Delete the second lifecycle policy (idempotency)
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- state: absent
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
- # ============================================================
- - name: Create a second lifecycle policy, with infrequent access
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- transition_days: 30
- storage_class: standard_ia
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is changed
- # ============================================================
- - name: Create a second lifecycle policy, with infrequent access (idempotency)
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- storage_class: standard_ia
- transition_days: 30
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
- # ============================================================
- - name: Create a second lifecycle policy, with glacier
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- transition_days: 300
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is changed
- # ============================================================
- - name: Create a second lifecycle policy, with glacier (idempotency)
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- transition_days: 300
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
- # ============================================================
- - name: Create a lifecycle policy with infrequent access
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- transition_days: 30
- storage_class: standard_ia
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - name: Create a second lifecycle policy, with glacier
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- transition_days: 300
- prefix: /something
- purge_transitions: false
- <<: *aws_connection_info
- register: output
-
- - name: Create a lifecycle policy with infrequent access (idempotency)
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- storage_class: standard_ia
- transition_days: 30
- prefix: /something
- purge_transitions: false
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
-
- - name: Create a second lifecycle policy, with glacier (idempotency)
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- transition_days: 300
- prefix: /something
- purge_transitions: false
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
- # ============================================================
- - name: Create a lifecycle policy, with noncurrent expiration
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- noncurrent_version_expiration_days: 300
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is changed
- # ============================================================
- - name: Create a lifecycle policy, with noncurrent expiration
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- noncurrent_version_expiration_days: 300
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
- # ============================================================
- - name: Create a lifecycle policy, with noncurrent transition
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- noncurrent_version_transition_days: 300
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is changed
- # ============================================================
- - name: Create a lifecycle policy, with noncurrent transitions and expirations
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- noncurrent_version_transition_days: 300
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
- # ============================================================
- - name: Create a lifecycle policy, with noncurrent transition
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- noncurrent_version_transition_days: 300
- noncurrent_version_storage_class: standard_ia
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is changed
- # ============================================================
- - name: Create a lifecycle policy, with noncurrent transitions and expirations
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- noncurrent_version_storage_class: standard_ia
- noncurrent_version_transition_days: 300
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
- # ============================================================
- - name: Create a lifecycle policy, with noncurrent transitions
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- noncurrent_version_transitions:
- - transition_days: 30
- storage_class: standard_ia
- - transition_days: 60
- storage_class: onezone_ia
- - transition_days: 90
- storage_class: glacier
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is changed
- # ============================================================
- - name: Create a lifecycle policy, with noncurrent transitions
- s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- noncurrent_version_transitions:
- - transition_days: 30
- storage_class: standard_ia
- - transition_days: 60
- storage_class: onezone_ia
- - transition_days: 90
- storage_class: glacier
- prefix: /something
- <<: *aws_connection_info
- register: output
-
- - assert:
- that:
- - output is not changed
- # ============================================================
- # test all the examples
- # Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
- - s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- expiration_days: 30
- prefix: /logs/
- status: enabled
- <<: *aws_connection_info
- state: present
-
- # Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
- - s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- transition_days: 7
- expiration_days: 90
- prefix: /logs/
- status: enabled
- <<: *aws_connection_info
- state: present
-
- # Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030.
- # Note that midnight GMT must be specified.
- # Be sure to quote your date strings
- - s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- transition_date: "2020-12-30T00:00:00.000Z"
- expiration_date: "2030-12-30T00:00:00.000Z"
- prefix: /logs/
- status: enabled
- <<: *aws_connection_info
- state: present
-
- # Disable the rule created above
- - s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- prefix: /logs/
- status: disabled
- <<: *aws_connection_info
- state: present
-
- # Delete the lifecycle rule created above
- - s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- <<: *aws_connection_info
- prefix: /logs/
- state: absent
-
- # Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class.
- - s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- prefix: /backups/
- storage_class: standard_ia
- transition_days: 31
- state: present
- <<: *aws_connection_info
- status: enabled
-
- # Configure a lifecycle rule to transition files to infrequent access after 30 days and glacier after 90
- - s3_lifecycle:
- name: "{{ resource_prefix }}-testbucket-ansible"
- prefix: /other_logs/
- state: present
- <<: *aws_connection_info
- status: enabled
- transitions:
- - transition_days: 30
- storage_class: standard_ia
- - transition_days: 90
- storage_class: glacier
- # ============================================================
- always:
- - name: Ensure all buckets are deleted
- s3_bucket:
- name: "{{item}}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
- with_items:
- - "{{ resource_prefix }}-testbucket-ansible"
diff --git a/test/integration/targets/s3_logging/aliases b/test/integration/targets/s3_logging/aliases
deleted file mode 100644
index 3431a6a542..0000000000
--- a/test/integration/targets/s3_logging/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/aws
-#shippable/aws/group1
-# when running tests we saw an ~20% failure rate
-unsupported
diff --git a/test/integration/targets/s3_logging/defaults/main.yml b/test/integration/targets/s3_logging/defaults/main.yml
deleted file mode 100644
index a0f9b7b359..0000000000
--- a/test/integration/targets/s3_logging/defaults/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-test_bucket: '{{ resource_prefix }}-testbucket'
-log_bucket_1: '{{ resource_prefix }}-logs-1'
-log_bucket_2: '{{ resource_prefix }}-logs-2'
diff --git a/test/integration/targets/s3_logging/tasks/main.yml b/test/integration/targets/s3_logging/tasks/main.yml
deleted file mode 100644
index 3fc5a919b9..0000000000
--- a/test/integration/targets/s3_logging/tasks/main.yml
+++ /dev/null
@@ -1,203 +0,0 @@
----
-# Integration tests for s3_logging
-#
-# Notes:
-# - s3_logging doesn't support check_mode and the only output is 'changed'
-# - During initial testing we hit issues with boto reporting
-# "You must give the log-delivery group WRITE and READ_ACP permissions
-# to the target bucket"
-# a long term solution might be to port s3_logging to AnsibleAWSModule
-# so we can add retries
-#
-- module_defaults:
- group/aws:
- aws_access_key: '{{ aws_access_key | default(omit) }}'
- aws_secret_key: '{{ aws_secret_key | default(omit) }}'
- security_token: '{{ security_token | default(omit) }}'
- region: '{{ aws_region | default(omit) }}'
- block:
-
- # ============================================================
-
- - name: Try to enable logging without providing target_bucket
- s3_logging:
- state: present
- name: '{{ test_bucket }}'
- register: result
- ignore_errors: yes
-
- - assert:
- that:
- - result is failed
-
- # ============================================================
- - name: Create simple s3_bucket to be logged
- s3_bucket:
- state: present
- name: '{{ test_bucket }}'
- register: output
-
- - assert:
- that:
- - output is changed
- - output.name == test_bucket
-
- - name: Create simple s3_bucket as target for logs
- s3_bucket:
- state: present
- name: '{{ log_bucket_1 }}'
- register: output
-
- - assert:
- that:
- - output is changed
- - output.name == log_bucket_1
-
- - name: Create simple s3_bucket as second target for logs
- s3_bucket:
- state: present
- name: '{{ log_bucket_2 }}'
- register: output
-
- - assert:
- that:
- - output is changed
- - output.name == log_bucket_2
-
-# ============================================================
-
- - name: Enable logging
- s3_logging:
- state: present
- name: '{{ test_bucket }}'
- target_bucket: '{{ log_bucket_1 }}'
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: Enable logging idempotency
- s3_logging:
- state: present
- name: '{{ test_bucket }}'
- target_bucket: '{{ log_bucket_1 }}'
- register: result
-
- - assert:
- that:
- - result is not changed
-
-# ============================================================
-
- - name: Change logging bucket
- s3_logging:
- state: present
- name: '{{ test_bucket }}'
- target_bucket: '{{ log_bucket_2 }}'
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: Change logging bucket idempotency
- s3_logging:
- state: present
- name: '{{ test_bucket }}'
- target_bucket: '{{ log_bucket_2 }}'
- register: result
-
- - assert:
- that:
- - result is not changed
-
-# ============================================================
-
- - name: Change logging prefix
- s3_logging:
- state: present
- name: '{{ test_bucket }}'
- target_bucket: '{{ log_bucket_2 }}'
- target_prefix: '/{{ resource_prefix }}/'
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: Change logging prefix idempotency
- s3_logging:
- state: present
- name: '{{ test_bucket }}'
- target_bucket: '{{ log_bucket_2 }}'
- target_prefix: '/{{ resource_prefix }}/'
- register: result
-
- - assert:
- that:
- - result is not changed
-
-# ============================================================
-
- - name: Remove logging prefix
- s3_logging:
- state: present
- name: '{{ test_bucket }}'
- target_bucket: '{{ log_bucket_2 }}'
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: Remove logging prefix idempotency
- s3_logging:
- state: present
- name: '{{ test_bucket }}'
- target_bucket: '{{ log_bucket_2 }}'
- register: result
-
- - assert:
- that:
- - result is not changed
-
-# ============================================================
-
- - name: Disable logging
- s3_logging:
- state: absent
- name: '{{ test_bucket }}'
- register: result
-
- - assert:
- that:
- - result is changed
-
- - name: Disable logging idempotency
- s3_logging:
- state: absent
- name: '{{ test_bucket }}'
- register: result
-
- - assert:
- that:
- - result is not changed
-
-# ============================================================
- always:
- - name: Delete bucket being logged
- s3_bucket:
- name: '{{ test_bucket }}'
- state: absent
- ignore_errors: yes
- - name: Delete first bucket containing logs
- s3_bucket:
- name: '{{ log_bucket_1 }}'
- state: absent
- ignore_errors: yes
- - name: Delete second bucket containing logs
- s3_bucket:
- name: '{{ log_bucket_2 }}'
- state: absent
- ignore_errors: yes
diff --git a/test/integration/targets/script_inventory_ec2/aliases b/test/integration/targets/script_inventory_ec2/aliases
deleted file mode 100644
index 092d6ac64b..0000000000
--- a/test/integration/targets/script_inventory_ec2/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-shippable/posix/group2
-needs/file/contrib/inventory/ec2.py
diff --git a/test/integration/targets/script_inventory_ec2/ec2.sh b/test/integration/targets/script_inventory_ec2/ec2.sh
deleted file mode 100755
index 9ae9dee58a..0000000000
--- a/test/integration/targets/script_inventory_ec2/ec2.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-# Wrapper to use the correct Python interpreter and support code coverage.
-ABS_SCRIPT=$(python -c "import os; print(os.path.abspath('../../../../contrib/inventory/ec2.py'))")
-cd "${OUTPUT_DIR}"
-python.py "${ABS_SCRIPT}" "$@"
diff --git a/test/integration/targets/script_inventory_ec2/inventory_diff.py b/test/integration/targets/script_inventory_ec2/inventory_diff.py
deleted file mode 100755
index 3aaeff50b4..0000000000
--- a/test/integration/targets/script_inventory_ec2/inventory_diff.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-import sys
-
-
-def check_hosts(contrib, plugin):
- contrib_hosts = sorted(contrib['_meta']['hostvars'].keys())
- plugin_hosts = sorted(plugin['_meta']['hostvars'].keys())
- assert contrib_hosts == plugin_hosts
- return contrib_hosts, plugin_hosts
-
-
-def check_groups(contrib, plugin):
- contrib_groups = set(contrib.keys())
- plugin_groups = set(plugin.keys())
- missing_groups = contrib_groups.difference(plugin_groups)
- if missing_groups:
- print("groups: %s are missing from the plugin" % missing_groups)
- assert not missing_groups
- return contrib_groups, plugin_groups
-
-
-def check_host_vars(key, value, plugin, host):
- # tags are a dict in the plugin
- if key.startswith('ec2_tag'):
- print('assert tag', key, value)
- assert 'tags' in plugin['_meta']['hostvars'][host], 'b file does not have tags in host'
- btags = plugin['_meta']['hostvars'][host]['tags']
- tagkey = key.replace('ec2_tag_', '')
- assert tagkey in btags, '%s tag not in b file host tags' % tagkey
- assert value == btags[tagkey], '%s != %s' % (value, btags[tagkey])
- else:
- print('assert var', key, value, key in plugin['_meta']['hostvars'][host], plugin['_meta']['hostvars'][host].get(key))
- assert key in plugin['_meta']['hostvars'][host], "%s not in b's %s hostvars" % (key, host)
- assert value == plugin['_meta']['hostvars'][host][key], "%s != %s" % (value, plugin['_meta']['hostvars'][host][key])
-
-
-def main():
- # a should be the source of truth (the script output)
- a = sys.argv[1]
- # b should be the thing to check (the plugin output)
- b = sys.argv[2]
-
- with open(a, 'r') as f:
- adata = json.loads(f.read())
- with open(b, 'r') as f:
- bdata = json.loads(f.read())
-
- # all hosts should be present obviously
- ahosts, bhosts = check_hosts(adata, bdata)
-
- # all groups should be present obviously
- agroups, bgroups = check_groups(adata, bdata)
-
- # check host vars can be reconstructed
- for ahost in ahosts:
- contrib_host_vars = adata['_meta']['hostvars'][ahost]
- for key, value in contrib_host_vars.items():
- check_host_vars(key, value, bdata, ahost)
-
-
-if __name__ == "__main__":
- main()
diff --git a/test/integration/targets/script_inventory_ec2/lib/__init__.py b/test/integration/targets/script_inventory_ec2/lib/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/script_inventory_ec2/lib/__init__.py
+++ /dev/null
diff --git a/test/integration/targets/script_inventory_ec2/lib/boto/__init__.py b/test/integration/targets/script_inventory_ec2/lib/boto/__init__.py
deleted file mode 100644
index ede07eeae5..0000000000
--- a/test/integration/targets/script_inventory_ec2/lib/boto/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import boto.exceptions as exceptions # pylint: disable=useless-import-alias
-import boto.session as session # pylint: disable=useless-import-alias
diff --git a/test/integration/targets/script_inventory_ec2/lib/boto/ec2/__init__.py b/test/integration/targets/script_inventory_ec2/lib/boto/ec2/__init__.py
deleted file mode 100644
index e590be6395..0000000000
--- a/test/integration/targets/script_inventory_ec2/lib/boto/ec2/__init__.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# boto2
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from boto.mocks.instances import BotoInstance, Reservation
-
-
-class Region(object):
- name = None
-
- def __init__(self, name):
- self.name = name
-
-
-class Connection(object):
- region = None
- instances = None
-
- def __init__(self, **kwargs):
- self.reservations = [Reservation(
- owner_id='123456789012',
- instance_ids=['i-0678e70402c0b434c', 'i-16a83b42f01c082a1'],
- region=kwargs['region']
- )]
-
- def get_all_instances(self, *args, **kwargs):
- return self.reservations
-
- def describe_cache_clusters(self, *args, **kwargs):
- return {}
-
- def get_all_tags(self, *args, **kwargs):
- tags = []
- resid = kwargs['filters']['resource-id'][0]
- for instance in self.reservations[0].instances:
- if instance.id == resid:
- tags = instance._tags[:]
- break
- return tags
-
-
-def connect_to_region(*args, **kwargs):
- return Connection(region=args[0])
-
-
-def regions():
- return [Region('us-east-1')]
diff --git a/test/integration/targets/script_inventory_ec2/lib/boto/elasticache/__init__.py b/test/integration/targets/script_inventory_ec2/lib/boto/elasticache/__init__.py
deleted file mode 100644
index 4da41601a4..0000000000
--- a/test/integration/targets/script_inventory_ec2/lib/boto/elasticache/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class Connection(object):
- def __init__(self):
- pass
-
- def get_all_instances(self, *args, **kwargs):
- return []
-
- def describe_cache_clusters(self, *args, **kwargs):
- return {
- 'DescribeCacheClustersResponse': {
- 'DescribeCacheClustersResult': {
- 'Marker': None,
- 'CacheClusters': []
- }
- }
- }
-
- def describe_replication_groups(self, *args, **kwargs):
- return {
- 'DescribeReplicationGroupsResponse': {
- 'DescribeReplicationGroupsResult': {
- 'ReplicationGroups': []
- }
- }
- }
-
-
-def connect_to_region(*args, **kwargs):
- return Connection()
diff --git a/test/integration/targets/script_inventory_ec2/lib/boto/exception.py b/test/integration/targets/script_inventory_ec2/lib/boto/exception.py
deleted file mode 100644
index 0179d5d75f..0000000000
--- a/test/integration/targets/script_inventory_ec2/lib/boto/exception.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class BotoServerError(Exception):
- pass
-
-
-class ClientError(Exception):
- pass
-
-
-class PartialCredentialsError(Exception):
- pass
-
-
-class ProfileNotFound(Exception):
- pass
-
-
-class BotoCoreError(Exception):
- pass
diff --git a/test/integration/targets/script_inventory_ec2/lib/boto/exceptions.py b/test/integration/targets/script_inventory_ec2/lib/boto/exceptions.py
deleted file mode 100644
index 0179d5d75f..0000000000
--- a/test/integration/targets/script_inventory_ec2/lib/boto/exceptions.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class BotoServerError(Exception):
- pass
-
-
-class ClientError(Exception):
- pass
-
-
-class PartialCredentialsError(Exception):
- pass
-
-
-class ProfileNotFound(Exception):
- pass
-
-
-class BotoCoreError(Exception):
- pass
diff --git a/test/integration/targets/script_inventory_ec2/lib/boto/mocks/__init__.py b/test/integration/targets/script_inventory_ec2/lib/boto/mocks/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/script_inventory_ec2/lib/boto/mocks/__init__.py
+++ /dev/null
diff --git a/test/integration/targets/script_inventory_ec2/lib/boto/mocks/instances.py b/test/integration/targets/script_inventory_ec2/lib/boto/mocks/instances.py
deleted file mode 100644
index 9511a1dbaa..0000000000
--- a/test/integration/targets/script_inventory_ec2/lib/boto/mocks/instances.py
+++ /dev/null
@@ -1,348 +0,0 @@
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.module_utils.common._collections_compat import MutableMapping
-
-import datetime
-from dateutil.tz import tzutc
-import sys
-
-try:
- from ansible.parsing.yaml.objects import AnsibleUnicode
-except ImportError:
- AnsibleUnicode = str
-
-
-if sys.version_info[0] >= 3:
- unicode = str
-
-DNSDOMAIN = "ansible.amazon.com"
-
-
-class Reservation(object):
- def __init__(self, owner_id, instance_ids, region):
- if len(instance_ids) > 1:
- stopped_instance = instance_ids[-1]
- self.instances = []
- for instance_id in instance_ids:
- stopped = bool(instance_id == stopped_instance)
- self.instances.append(BotoInstance(instance_id=instance_id, owner_id=owner_id, region=region, stopped=stopped))
- self.owner_id = owner_id
-
-
-class Tag(object):
- res_id = None
- name = None
- value = None
-
- def __init__(self, res_id, name, value):
- self.res_id = res_id
- self.name = name
- self.value = value
-
-
-class SecurityGroup(object):
- name = 'sg_default'
- group_id = 'sg-00000'
- id = 'sg-00000'
-
- def __init__(self, group_id, group_name):
- self.name = group_name
- self.group_id = group_id
- self.id = self.group_id
-
- def __str__(self):
- return self.name
-
-
-class NetworkInterfaceBase(list):
-
- def __init__(self, owner_id=None, private_ip=None, subnet_id=None, vpc_id=None):
- self.description = 'Primary network interface'
- self.mac_address = '06:32:7e:30:3a:20'
- self.owner_id = owner_id
- self.private_ip_address = private_ip
- self.status = 'in-use'
- self.subnet_id = subnet_id
- self.vpc_id = vpc_id
-
- super(NetworkInterfaceBase, self).__init__([self.to_dict()])
-
- def to_dict(self):
-
- data = {}
- for attr in dir(self):
- if attr.startswith('__') or attr == 'boto3':
- continue
-
- val = getattr(self, attr)
-
- if callable(val):
- continue
-
- if self.boto3:
- attr = ''.join(x.capitalize() or '_' for x in attr.split('_'))
-
- data[attr] = val
-
- return data
-
-
-class Boto3NetworkInterface(NetworkInterfaceBase):
-
- boto3 = True
-
- def __init__(self, owner_id=None, public_ip=None, public_dns=None, private_ip=None, security_groups=None, subnet_id=None, vpc_id=None):
- self.association = {
- 'IpOwnerId': 'amazon',
- 'PublicDnsName': public_dns,
- 'PublicIp': public_ip
- }
- self.attachment = {
- 'AttachTime': datetime.datetime(2019, 2, 27, 19, 41, 49, tzinfo=tzutc()),
- 'AttachmentId': 'eni-attach-008fda539bfd1877d',
- 'DeleteOnTermination': True,
- 'DeviceIndex': 0,
- 'Status': 'attached'
- }
- self.groups = security_groups
- self.ipv6_addresses = [{'Ipv6Address': '2600:1f18:1af:f6a1:2c8d:7cf:3d14:1224'}]
- self.network_interface_id = 'eni-00abc58b929197984'
- self.private_ip_addresses = [{
- 'Association': {
- 'IpOwnerId': 'amazon',
- 'PublicDnsName': public_dns,
- 'PublicIp': public_ip
- },
- 'Primary': True,
- 'PrivateIpAddress': private_ip
- }]
- self.source_dest_check = True
-
- super(Boto3NetworkInterface, self).__init__(
- owner_id=owner_id,
- private_ip=private_ip,
- subnet_id=subnet_id,
- vpc_id=vpc_id
- )
-
-
-class BotoNetworkInterface(NetworkInterfaceBase):
-
- boto3 = False
-
- def __init__(self, owner_id=None, public_ip=None, public_dns=None, private_ip=None, subnet_id=None, vpc_id=None):
- self.tags = {}
- self.id = 'eni-00abc58b929197984'
- self.availability_zone = None
- self.requester_managed = False
- self.publicIp = public_ip
- self.publicDnsName = public_dns
- self.ipOwnerId = 'amazon'
- self.association = '\n '
- self.item = '\n '
-
- super(BotoNetworkInterface, self).__init__(
- owner_id=owner_id,
- private_ip=private_ip,
- subnet_id=subnet_id,
- vpc_id=vpc_id
- )
-
-
-class Volume(object):
- def __init__(self, volume_id):
- self.volume_id = volume_id
-
-
-class BlockDeviceMapping(MutableMapping):
- devices = {}
-
- def __init__(self, devices):
- for device, volume_id in devices.items():
- self.devices[device] = Volume(volume_id)
-
- def __getitem__(self, key):
- return self.devices[key]
-
- def __setitem__(self, key, value):
- self.devices[key] = Volume(value)
-
- def __delitem__(self, key):
- del self.devices[key]
-
- def __iter__(self):
- return iter(self.devices)
-
- def __len__(self):
- return len(self.devices)
-
-
-class InstanceBase(object):
- def __init__(self, stopped=False):
- # set common ignored attribute to make sure instances have identical tags and security groups
- self._ignore_security_groups = {
- 'sg-0e1d2bd02b45b712e': 'a-sgname-with-hyphens',
- 'sg-ae5c262eb5c4d712e': 'name@with?invalid!chars'
- }
- self._ignore_tags = {
- 'tag-with-hyphens': 'value:with:colons',
- b'\xec\xaa\xb4'.decode('utf'): 'value1with@invalid:characters',
- 'tag;me': 'value@noplez',
- 'tag!notit': 'value<=ohwhy?'
- }
- if not stopped:
- self._ignore_state = {'Code': 16, 'Name': 'running'}
- else:
- self._ignore_state = {'Code': 80, 'Name': 'stopped'}
-
- # common attributes
- self.ami_launch_index = '0'
- self.architecture = 'x86_64'
- self.client_token = ''
- self.ebs_optimized = False
- self.hypervisor = 'xen'
- self.image_id = 'ami-0ac019f4fcb7cb7e6'
- self.instance_type = 't2.micro'
- self.key_name = 'k!y:2/-n@me'
- self.private_dns_name = 'ip-20-0-0-20.ec2.internal'
- self.private_ip_address = '20.0.0.20'
- self.product_codes = []
- if not stopped:
- self.public_dns_name = 'ec2-12-3-456-78.compute-1.amazonaws.com'
- else:
- self.public_dns_name = ''
- self.root_device_name = '/dev/sda1'
- self.root_device_type = 'ebs'
- self.subnet_id = 'subnet-09564ba2121bca7bd'
- self.virtualization_type = 'hvm'
- self.vpc_id = 'vpc-01ae527fabc81dd04'
-
- def to_dict(self):
-
- data = {}
- for attr in dir(self):
- if attr.startswith(('__', '_ignore')) or attr in ['to_dict', 'boto3']:
- continue
-
- val = getattr(self, attr)
-
- if self.boto3:
- attr = ''.join(x.capitalize() or '_' for x in attr.split('_'))
-
- data[attr] = val
-
- return data
-
-
-class BotoInstance(InstanceBase):
-
- boto3 = False
-
- def __init__(self, instance_id=None, owner_id=None, region=None, stopped=False):
- super(BotoInstance, self).__init__(stopped=stopped)
-
- self._in_monitoring_element = False
- self._tags = [Tag(instance_id, k, v) for k, v in self._ignore_tags.items()]
- self.block_device_mapping = BlockDeviceMapping({'/dev/sda1': 'vol-044a646a9292c82af'})
- self.dns_name = 'ec2-12-3-456-78.compute-1.amazonaws.com'
- self.eventsSet = None
- self.group_name = None
- self.groups = [SecurityGroup(k, v) for k, v in sorted(self._ignore_security_groups.items())]
- self.id = instance_id
- self.instance_profile = {
- 'arn': 'arn:aws:iam::{0}:instance-profile/developer'.format(owner_id),
- 'id': 'ABCDE2GHIJKLMN8PQRSTU'
- }
- if not stopped:
- self.ip_address = '12.3.456.7'
- else:
- self.ip_address = '' # variable is returned as empty by boto if the instance is stopped
- self.item = '\n '
- self.kernel = None
- self.launch_time = '2019-02-27T19:41:49.000Z'
- self.monitored = False
- self.monitoring = '\n '
- self.monitoring_state = 'disabled'
- self.persistent = False
- self.placement = region + 'e'
- self.platform = None
- self.ramdisk = None
- self.reason = ''
- self.region = region
- self.requester_id = None
- self.sourceDestCheck = 'true'
- self.spot_instance_request_id = None
- self.state = self._ignore_state['Name']
- self.state_code = self._ignore_state['Code']
- if not stopped:
- self.state_reason = None
- else:
- self.state_reason = {
- 'code': 'Client.UserInitiatedShutdown',
- 'message': 'Client.UserInitiatedShutdown: User initiated shutdown'
- }
- self.tags = dict(self._ignore_tags)
-
- self.interfaces = BotoNetworkInterface(
- owner_id=owner_id,
- public_ip=self.ip_address,
- public_dns=self.public_dns_name,
- private_ip=self.private_ip_address,
- subnet_id=self.subnet_id,
- vpc_id=self.vpc_id,
- )
-
-
-class Boto3Instance(InstanceBase):
-
- boto3 = True
-
- def __init__(self, instance_id=None, owner_id=None, region=None, stopped=False):
- super(Boto3Instance, self).__init__(stopped=stopped)
-
- self.block_device_mappings = [{
- 'DeviceName': '/dev/sda1',
- 'Ebs': {
- 'AttachTime': datetime.datetime(2019, 2, 27, 19, 41, 50, tzinfo=tzutc()),
- 'DeleteOnTermination': True,
- 'Status': 'attached',
- 'VolumeId': 'vol-044a646a9292c82af'
- }
- }]
- self.capacity_reservation_specification = {'CapacityReservationPreference': 'open'}
- self.cpu_options = {'CoreCount': 1, 'ThreadsPerCore': 1}
- self.ena_support = True
- self.hibernation_options = {'Configured': False}
- self.iam_instance_profile = {
- 'Arn': 'arn:aws:iam::{0}:instance-profile/developer'.format(owner_id),
- 'Id': 'ABCDE2GHIJKLMN8PQRSTU'
- }
- self.instance_id = instance_id
- self.launch_time = datetime.datetime(2019, 2, 27, 19, 41, 49, tzinfo=tzutc())
- self.monitoring = {'State': 'disabled'}
- self.placement = {'AvailabilityZone': region + 'e', 'GroupName': '', 'Tenancy': 'default'}
- if not stopped:
- self.public_ip_address = '12.3.456.7' # variable is not returned by boto3 if the instance is stopped
- self.security_groups = [{'GroupId': key, 'GroupName': value} for key, value in self._ignore_security_groups.items()]
- self.source_dest_check = True
- self.state = dict(self._ignore_state)
- if not stopped:
- self.state_transition_reason = ''
- else:
- self.state_transition_reason = 'User initiated (2019-02-11 12:49:13 GMT)'
- self.state_reason = { # this variable is only returned by AWS if the instance is stopped
- 'Code': 'Client.UserInitiatedShutdown',
- 'Message': 'Client.UserInitiatedShutdown: User initiated shutdown'
- }
- self.tags = [{'Key': k, 'Value': v} for k, v in self._ignore_tags.items()]
-
- self.network_interfaces = Boto3NetworkInterface(
- owner_id=owner_id,
- public_ip=getattr(self, 'public_ip_address', ''),
- public_dns=self.public_dns_name,
- private_ip=self.private_ip_address,
- security_groups=self.security_groups,
- subnet_id=self.subnet_id,
- vpc_id=self.vpc_id
- )
diff --git a/test/integration/targets/script_inventory_ec2/lib/boto/rds.py b/test/integration/targets/script_inventory_ec2/lib/boto/rds.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/script_inventory_ec2/lib/boto/rds.py
+++ /dev/null
diff --git a/test/integration/targets/script_inventory_ec2/lib/boto/route53.py b/test/integration/targets/script_inventory_ec2/lib/boto/route53.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/script_inventory_ec2/lib/boto/route53.py
+++ /dev/null
diff --git a/test/integration/targets/script_inventory_ec2/lib/boto/session.py b/test/integration/targets/script_inventory_ec2/lib/boto/session.py
deleted file mode 100644
index 8e3d79f660..0000000000
--- a/test/integration/targets/script_inventory_ec2/lib/boto/session.py
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/usr/bin/env python
-# boto3
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from boto.mocks.instances import Boto3Instance
-
-
-class Paginator(object):
- def __init__(self, datalist):
- self.datalist = datalist
-
- def paginate(self, *args, **kwargs):
- '''
- {'Filters': [{'Name': 'instance-state-name',
- 'Values': ['running', 'pending', 'stopping', 'stopped']}]}
- '''
- filters = kwargs.get('Filters', [])
- if not (filters or any([True for f in filters if f['Name'] == 'instance-state-name'])):
- self.instance_states = ['running', 'pending', 'stopping', 'stopped']
- else:
- self.instance_states = [f['Values'] for f in filters if f['Name'] == 'instance-state-name'][0]
- return self
-
- def build_full_result(self):
- filtered_states = set([x.state['Name'] for x in self.datalist]).difference(set(self.instance_states))
- return {'Reservations': [{
- 'Instances': [x.to_dict() for x in self.datalist if x.state['Name'] not in filtered_states],
- 'OwnerId': '123456789012',
- 'RequesterId': 'AIDAIS3MMFPO53D2T3WWE',
- 'ReservationId': 'r-07889670a282de964'
- }]}
-
-
-class Client(object):
- cloud = None
- region = None
-
- def __init__(self, *args, **kwargs):
- self.cloud = args[0]
- self.region = args[1]
-
- def get_paginator(self, method):
- if method == 'describe_instances':
- return Paginator(
- [Boto3Instance(instance_id='i-0678e70402c0b434c', owner_id='123456789012', region=self.region),
- Boto3Instance(instance_id='i-16a83b42f01c082a1', owner_id='123456789012', region=self.region, stopped=True)]
- )
-
-
-class Session(object):
- profile_name = None
- region = None
-
- def __init__(self, *args, **kwargs):
- for k, v in kwargs.items():
- if hasattr(self, k):
- setattr(self, k, v)
-
- def client(self, *args, **kwargs):
- return Client(*args, **kwargs)
-
- def get_config_variables(self, key):
- if hasattr(self, key):
- return getattr(self, key)
-
- def get_available_regions(self, *args):
- return ['us-east-1']
-
- def get_credentials(self, *args, **kwargs):
- raise Exception('not implemented')
-
-
-def get_session(*args, **kwargs):
- return Session(*args, **kwargs)
diff --git a/test/integration/targets/script_inventory_ec2/lib/boto/sts.py b/test/integration/targets/script_inventory_ec2/lib/boto/sts.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/integration/targets/script_inventory_ec2/lib/boto/sts.py
+++ /dev/null
diff --git a/test/integration/targets/script_inventory_ec2/runme.sh b/test/integration/targets/script_inventory_ec2/runme.sh
deleted file mode 100755
index 0577295589..0000000000
--- a/test/integration/targets/script_inventory_ec2/runme.sh
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-
-source virtualenv.sh
-
-pip install "python-dateutil>=2.1,<2.7.0" jmespath "Jinja2==2.10"
-
-# create boto3 symlinks
-ln -s "$(pwd)/lib/boto" "$(pwd)/lib/boto3"
-ln -s "$(pwd)/lib/boto" "$(pwd)/lib/botocore"
-
-# override boto's import path(s)
-export PYTHONPATH
-PYTHONPATH="$(pwd)/lib:$PYTHONPATH"
-
-#################################################
-# RUN THE SCRIPT
-#################################################
-
-# run the script first
-cat << EOF > "$OUTPUT_DIR/ec2.ini"
-[ec2]
-regions = us-east-1
-cache_path = $(pwd)/.cache
-cache_max_age = 0
-group_by_tag_none = False
-
-[credentials]
-aws_access_key_id = FOO
-aws_secret_acccess_key = BAR
-EOF
-
-ANSIBLE_JINJA2_NATIVE=1 ansible-inventory -vvvv -i ./ec2.sh --list --output="$OUTPUT_DIR/script.out"
-RC=$?
-if [[ $RC != 0 ]]; then
- exit $RC
-fi
-
-#################################################
-# RUN THE PLUGIN
-#################################################
-
-# run the plugin second
-export ANSIBLE_INVENTORY_ENABLED=aws_ec2
-export ANSIBLE_INVENTORY=test.aws_ec2.yml
-export AWS_ACCESS_KEY_ID=FOO
-export AWS_SECRET_ACCESS_KEY=BAR
-export ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS=never
-
-cat << EOF > "$OUTPUT_DIR/test.aws_ec2.yml"
-plugin: aws_ec2
-cache: False
-use_contrib_script_compatible_sanitization: True
-strict: True
-regions:
- - us-east-1
-hostnames:
- - network-interface.addresses.association.public-ip
- - dns-name
-filters:
- instance-state-name: running
-compose:
- # vars that don't exist anymore in any meaningful way
- ec2_item: undefined | default("")
- ec2_monitoring: undefined | default("")
- ec2_previous_state: undefined | default("")
- ec2_previous_state_code: undefined | default(0)
- ec2__in_monitoring_element: undefined | default(false)
- # the following three will be accessible again after #53645
- ec2_requester_id: undefined | default("")
- ec2_eventsSet: undefined | default("")
- ec2_persistent: undefined | default(false)
-
- # vars that change
- ansible_host: public_ip_address
- ec2_block_devices: dict(block_device_mappings | map(attribute='device_name') | map('basename') | list | zip(block_device_mappings | map(attribute='ebs.volume_id') | list))
- ec2_dns_name: public_dns_name
- ec2_group_name: placement['group_name']
- ec2_id: instance_id
- ec2_instance_profile: iam_instance_profile | default("")
- ec2_ip_address: public_ip_address
- ec2_kernel: kernel_id | default("")
- ec2_monitored: monitoring['state'] in ['enabled', 'pending']
- ec2_monitoring_state: monitoring['state']
- ec2_account_id: owner_id
- ec2_placement: placement['availability_zone']
- ec2_ramdisk: ramdisk_id | default("")
- ec2_reason: state_transition_reason
- ec2_security_group_ids: security_groups | map(attribute='group_id') | list | sort | join(',')
- ec2_security_group_names: security_groups | map(attribute='group_name') | list | sort | join(',')
- ec2_state: state['name']
- ec2_state_code: state['code']
- ec2_state_reason: state_reason['message'] if state_reason is defined else ""
- ec2_sourceDestCheck: source_dest_check | lower | string # butchered snake_case case not a typo.
-
- # vars that just need ec2_ prefix
- ec2_ami_launch_index: ami_launch_index | string
- ec2_architecture: architecture
- ec2_client_token: client_token
- ec2_ebs_optimized: ebs_optimized
- ec2_hypervisor: hypervisor
- ec2_image_id: image_id
- ec2_instance_type: instance_type
- ec2_key_name: key_name
- ec2_launch_time: 'launch_time | regex_replace(" ", "T") | regex_replace("(\+)(\d\d):(\d)(\d)$", ".\g<2>\g<3>Z")'
- ec2_platform: platform | default("")
- ec2_private_dns_name: private_dns_name
- ec2_private_ip_address: private_ip_address
- ec2_public_dns_name: public_dns_name
- ec2_region: placement['region']
- ec2_root_device_name: root_device_name
- ec2_root_device_type: root_device_type
- ec2_spot_instance_request_id: spot_instance_request_id | default("")
- ec2_subnet_id: subnet_id
- ec2_virtualization_type: virtualization_type
- ec2_vpc_id: vpc_id
- tags: dict(tags.keys() | map('regex_replace', '[^A-Za-z0-9\_]', '_') | list | zip(tags.values() | list))
-
-keyed_groups:
- - key: '"ec2"'
- separator: ""
- - key: 'instance_id'
- separator: ""
- - key: tags
- prefix: tag
- - key: key_name | regex_replace('-', '_')
- prefix: key
- - key: placement['region']
- separator: ""
- - key: placement['availability_zone']
- separator: ""
- - key: platform | default('undefined')
- prefix: platform
- - key: vpc_id | regex_replace('-', '_')
- prefix: vpc_id
- - key: instance_type
- prefix: type
- - key: "image_id | regex_replace('-', '_')"
- separator: ""
- - key: security_groups | map(attribute='group_name') | map("regex_replace", "-", "_") | list
- prefix: security_group
-EOF
-
-ANSIBLE_JINJA2_NATIVE=1 ansible-inventory -vvvv -i "$OUTPUT_DIR/test.aws_ec2.yml" --list --output="$OUTPUT_DIR/plugin.out"
-
-#################################################
-# DIFF THE RESULTS
-#################################################
-
-./inventory_diff.py "$OUTPUT_DIR/script.out" "$OUTPUT_DIR/plugin.out"
diff --git a/test/integration/targets/sns/aliases b/test/integration/targets/sns/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/sns/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/sns/defaults/main.yml b/test/integration/targets/sns/defaults/main.yml
deleted file mode 100644
index 59ef656491..0000000000
--- a/test/integration/targets/sns/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
-sns_topic_name: "{{ resource_prefix }}-topic"
diff --git a/test/integration/targets/sns/tasks/main.yml b/test/integration/targets/sns/tasks/main.yml
deleted file mode 100644
index c8ba9abf77..0000000000
--- a/test/integration/targets/sns/tasks/main.yml
+++ /dev/null
@@ -1,53 +0,0 @@
-- name: set up AWS connection info
- set_fact:
- aws_connection_info: &aws_connection_info
- aws_secret_key: "{{ aws_secret_key }}"
- aws_access_key: "{{ aws_access_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- no_log: true
-
-- block:
- - name: Create an SNS topic
- sns_topic:
- name: "{{ sns_topic_name }}"
- display_name: "Test topic"
- <<: *aws_connection_info
- register: sns_topic
-
- - name: Publish to the topic by name
- sns:
- topic: "{{ sns_topic_name }}"
- subject: Test message
- msg: Default test message
- http: Test message for HTTP
- https: Test message for HTTPS
- email: Test message for email
- email_json: Test message for email-json
- sms: Short test message for SMS
- sqs: Test message for SQS
- application: Test message for apps
- lambda: Test message for Lambda
- <<: *aws_connection_info
- register: result
-
- - name: Check for expected result structure
- assert:
- that:
- - result is not changed
- - "'message_id' in result"
-
- - name: Publish to the topic by ARN
- sns:
- topic: "{{ sns_topic.sns_arn }}"
- subject: Second test message
- msg: Simple test message
- <<: *aws_connection_info
-
- always:
- - name: Remove topic
- sns_topic:
- name: "{{ sns_topic_name }}"
- state: absent
- <<: *aws_connection_info
- ignore_errors: yes
diff --git a/test/integration/targets/sns_topic/aliases b/test/integration/targets/sns_topic/aliases
deleted file mode 100644
index 5692719518..0000000000
--- a/test/integration/targets/sns_topic/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-unsupported
diff --git a/test/integration/targets/sns_topic/defaults/main.yml b/test/integration/targets/sns_topic/defaults/main.yml
deleted file mode 100644
index afcc5dc8cb..0000000000
--- a/test/integration/targets/sns_topic/defaults/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-sns_topic_topic_name: "{{ resource_prefix }}-topic"
-sns_topic_subscriptions:
- - endpoint: "{{ sns_topic_subscriber_arn }}"
- protocol: "lambda"
-sns_topic_third_party_topic_arn: "arn:aws:sns:us-east-1:806199016981:AmazonIpSpaceChanged"
-sns_topic_third_party_region: "{{ sns_topic_third_party_topic_arn.split(':')[3] }}"
-sns_topic_lambda_function: "sns_topic_lambda"
-sns_topic_lambda_name: "{{ resource_prefix }}-{{ sns_topic_lambda_function }}"
diff --git a/test/integration/targets/sns_topic/files/lambda-policy.json b/test/integration/targets/sns_topic/files/lambda-policy.json
deleted file mode 100644
index ac1e64ac04..0000000000
--- a/test/integration/targets/sns_topic/files/lambda-policy.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "Version":"2012-10-17",
- "Statement":[
- {
- "Effect":"Allow",
- "Action":[
- "logs:CreateLogStream",
- "logs:CreateLogGroup",
- "logs:PutLogEvents"
- ],
- "Resource":"*"
- }
- ]
-}
diff --git a/test/integration/targets/sns_topic/files/lambda-trust-policy.json b/test/integration/targets/sns_topic/files/lambda-trust-policy.json
deleted file mode 100644
index fb84ae9de1..0000000000
--- a/test/integration/targets/sns_topic/files/lambda-trust-policy.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "Service": "lambda.amazonaws.com"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-}
diff --git a/test/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py b/test/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py
deleted file mode 100644
index c3d31c26eb..0000000000
--- a/test/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from __future__ import print_function
-
-
-def handler(event, context):
- print(event)
- return True
diff --git a/test/integration/targets/sns_topic/tasks/main.yml b/test/integration/targets/sns_topic/tasks/main.yml
deleted file mode 100644
index 58b89b2b14..0000000000
--- a/test/integration/targets/sns_topic/tasks/main.yml
+++ /dev/null
@@ -1,360 +0,0 @@
-- module_defaults:
- group/aws:
- aws_secret_key: "{{ aws_secret_key }}"
- aws_access_key: "{{ aws_access_key }}"
- security_token: "{{ security_token|default(omit) }}"
- region: "{{ aws_region }}"
- block:
- # This should exist, but there's no expectation that the test user should be able to
- # create/update this role, merely validate that it's there.
- # Use ansible -m iam_role -a 'name=ansible_lambda_role
- # assume_role_policy_document={{ lookup("file", "test/integration/targets/sns_topic/files/lambda-trust-policy.json", convert_data=False) }}
- # ' -vvv localhost
- # to create this through more privileged credentials before running this test suite.
- - name: create minimal lambda role
- iam_role:
- name: ansible_lambda_role
- assume_role_policy_document: "{{ lookup('file', 'lambda-trust-policy.json', convert_data=False) }}"
- create_instance_profile: no
- register: iam_role
-
- - name: pause if role was created
- pause:
- seconds: 10
- when: iam_role is changed
-
- - name: ensure lambda role policy exists
- iam_policy:
- policy_name: "ansible_lambda_role_policy"
- iam_name: ansible_lambda_role
- iam_type: role
- policy_json: "{{ lookup('file', 'lambda-policy.json') }}"
- state: present
- register: iam_policy
-
- - name: pause if policy was created
- pause:
- seconds: 10
- when: iam_policy is changed
-
- - name: create topic
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- display_name: "My topic name"
- register: sns_topic_create
-
- - name: assert that creation worked
- assert:
- that:
- - sns_topic_create.changed
-
- - name: set sns_arn fact
- set_fact:
- sns_arn: "{{ sns_topic_create.sns_arn }}"
-
- - name: create topic again (expect changed=False)
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- display_name: "My topic name"
- register: sns_topic_no_change
-
- - name: assert that recreation had no effect
- assert:
- that:
- - not sns_topic_no_change.changed
- - sns_topic_no_change.sns_arn == sns_topic_create.sns_arn
-
- - name: update display name
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- display_name: "My new topic name"
- register: sns_topic_update_name
-
- - name: assert that updating name worked
- assert:
- that:
- - sns_topic_update_name.changed
- - 'sns_topic_update_name.sns_topic.display_name == "My new topic name"'
-
- - name: add policy
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- display_name: "My new topic name"
- policy: "{{ lookup('template', 'initial-policy.json') }}"
- register: sns_topic_add_policy
-
- - name: assert that adding policy worked
- assert:
- that:
- - sns_topic_add_policy.changed
-
- - name: rerun same policy
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- display_name: "My new topic name"
- policy: "{{ lookup('template', 'initial-policy.json') }}"
- register: sns_topic_rerun_policy
-
- - name: assert that rerunning policy had no effect
- assert:
- that:
- - not sns_topic_rerun_policy.changed
-
- - name: update policy
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- display_name: "My new topic name"
- policy: "{{ lookup('template', 'updated-policy.json') }}"
- register: sns_topic_update_policy
-
- - name: assert that updating policy worked
- assert:
- that:
- - sns_topic_update_policy.changed
-
- - name: add delivery policy
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- display_name: "My new topic name"
- delivery_policy:
- http:
- defaultHealthyRetryPolicy:
- minDelayTarget: 20
- maxDelayTarget: 20
- numRetries: 3
- numMaxDelayRetries: 0
- numNoDelayRetries: 0
- numMinDelayRetries: 0
- backoffFunction: 'linear'
- register: sns_topic_add_delivery_policy
-
- - name: assert that adding delivery policy worked
- vars:
- delivery_policy: '{{ sns_topic_add_delivery_policy.sns_topic.delivery_policy | from_json }}'
- assert:
- that:
- - sns_topic_add_delivery_policy.changed
- - delivery_policy.http.defaultHealthyRetryPolicy.minDelayTarget == 20
- - delivery_policy.http.defaultHealthyRetryPolicy.maxDelayTarget == 20
- - delivery_policy.http.defaultHealthyRetryPolicy.numRetries == 3
-
- - name: rerun same delivery policy
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- display_name: "My new topic name"
- delivery_policy:
- http:
- defaultHealthyRetryPolicy:
- minDelayTarget: 20
- maxDelayTarget: 20
- numRetries: 3
- numMaxDelayRetries: 0
- numNoDelayRetries: 0
- numMinDelayRetries: 0
- backoffFunction: 'linear'
- register: sns_topic_rerun_delivery_policy
-
- - name: assert that rerunning delivery_policy had no effect
- vars:
- delivery_policy: '{{ sns_topic_rerun_delivery_policy.sns_topic.delivery_policy | from_json }}'
- assert:
- that:
- - not sns_topic_rerun_delivery_policy.changed
- - delivery_policy.http.defaultHealthyRetryPolicy.minDelayTarget == 20
- - delivery_policy.http.defaultHealthyRetryPolicy.maxDelayTarget == 20
- - delivery_policy.http.defaultHealthyRetryPolicy.numRetries == 3
-
- - name: rerun a slightly different delivery policy
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- display_name: "My new topic name"
- delivery_policy:
- http:
- defaultHealthyRetryPolicy:
- minDelayTarget: 40
- maxDelayTarget: 40
- numRetries: 6
- numMaxDelayRetries: 0
- numNoDelayRetries: 0
- numMinDelayRetries: 0
- backoffFunction: 'linear'
- register: sns_topic_rerun_delivery_policy
-
- - name: assert that rerunning delivery_policy worked
- vars:
- delivery_policy: '{{ sns_topic_rerun_delivery_policy.sns_topic.delivery_policy | from_json }}'
- assert:
- that:
- - sns_topic_rerun_delivery_policy.changed
- - delivery_policy.http.defaultHealthyRetryPolicy.minDelayTarget == 40
- - delivery_policy.http.defaultHealthyRetryPolicy.maxDelayTarget == 40
- - delivery_policy.http.defaultHealthyRetryPolicy.numRetries == 6
-
- - name: create temp dir
- tempfile:
- state: directory
- register: tempdir
-
- - name: ensure zip file exists
- archive:
- path: "{{ lookup('first_found', sns_topic_lambda_function) }}"
- dest: "{{ tempdir.path }}/{{ sns_topic_lambda_function }}.zip"
- format: zip
-
- - name: create lambda for subscribing (only auto-subscribing target available)
- lambda:
- name: '{{ sns_topic_lambda_name }}'
- state: present
- zip_file: '{{ tempdir.path }}/{{ sns_topic_lambda_function }}.zip'
- runtime: 'python2.7'
- role: ansible_lambda_role
- handler: '{{ sns_topic_lambda_function }}.handler'
- register: lambda_result
-
- - set_fact:
- sns_topic_subscriber_arn: "{{ lambda_result.configuration.function_arn }}"
-
- - name: subscribe to topic
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- display_name: "My new topic name"
- purge_subscriptions: no
- subscriptions: "{{ sns_topic_subscriptions }}"
- register: sns_topic_subscribe
-
- - name: assert that subscribing worked
- assert:
- that:
- - sns_topic_subscribe.changed
- - sns_topic_subscribe.sns_topic.subscriptions|length == 1
-
- - name: run again with purge_subscriptions set to false
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- display_name: "My new topic name"
- purge_subscriptions: no
- register: sns_topic_no_purge
-
- - name: assert that not purging subscriptions had no effect
- assert:
- that:
- - not sns_topic_no_purge.changed
- - sns_topic_no_purge.sns_topic.subscriptions|length == 1
-
- - name: run again with purge_subscriptions set to true
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- display_name: "My new topic name"
- purge_subscriptions: yes
- register: sns_topic_purge
-
- - name: assert that purging subscriptions worked
- assert:
- that:
- - sns_topic_purge.changed
- - sns_topic_purge.sns_topic.subscriptions|length == 0
-
- - name: delete topic
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- state: absent
-
- - name: no-op with third party topic (effectively get existing subscriptions)
- sns_topic:
- name: "{{ sns_topic_third_party_topic_arn }}"
- region: "{{ sns_topic_third_party_region }}"
- register: third_party_topic
-
- - name: subscribe to third party topic
- sns_topic:
- name: "{{ sns_topic_third_party_topic_arn }}"
- subscriptions: "{{ sns_topic_subscriptions }}"
- region: "{{ sns_topic_third_party_region }}"
- register: third_party_topic_subscribe
-
- - name: assert that subscribing worked
- assert:
- that:
- - third_party_topic_subscribe is changed
- - (third_party_topic_subscribe.sns_topic.subscriptions|length) - (third_party_topic.sns_topic.subscriptions|length) == 1
-
- - name: attempt to change name of third party topic
- sns_topic:
- name: "{{ sns_topic_third_party_topic_arn }}"
- display_name: "This should not work"
- subscriptions: "{{ sns_topic_subscriptions }}"
- region: "{{ sns_topic_third_party_region }}"
- ignore_errors: yes
- register: third_party_name_change
-
- - name: assert that attempting to change display name does not work
- assert:
- that:
- - third_party_name_change is failed
-
- - name: unsubscribe from third party topic (purge_subscription defaults to true)
- sns_topic:
- name: "{{ sns_topic_third_party_topic_arn }}"
- subscriptions: "{{ third_party_topic.sns_topic.subscriptions }}"
- region: "{{ sns_topic_third_party_region }}"
- register: third_party_unsubscribe
-
- - name: assert that unsubscribing from third party topic works
- assert:
- that:
- - third_party_unsubscribe.changed
- - third_party_topic.sns_topic.subscriptions|length == third_party_unsubscribe.sns_topic.subscriptions|length
-
- - name: attempt to delete third party topic
- sns_topic:
- name: "{{ sns_topic_third_party_topic_arn }}"
- state: absent
- subscriptions: "{{ subscriptions }}"
- region: "{{ sns_topic_third_party_region }}"
- ignore_errors: yes
- register: third_party_deletion
-
- - name: no-op after third party deletion
- sns_topic:
- name: "{{ sns_topic_third_party_topic_arn }}"
- region: "{{ sns_topic_third_party_region }}"
- register: third_party_deletion_facts
-
- - name: assert that attempting to delete third party topic does not work and preser
- assert:
- that:
- - third_party_deletion is failed
- - third_party_topic.sns_topic.subscriptions|length == third_party_deletion_facts.sns_topic.subscriptions|length
-
- always:
-
- - name: announce teardown start
- debug:
- msg: "************** TEARDOWN STARTS HERE *******************"
-
- - name: remove topic
- sns_topic:
- name: "{{ sns_topic_topic_name }}"
- state: absent
- ignore_errors: yes
-
- - name: unsubscribe from third party topic
- sns_topic:
- name: "{{ sns_topic_third_party_topic_arn }}"
- subscriptions: []
- purge_subscriptions: yes
- region: "{{ sns_topic_third_party_region }}"
- ignore_errors: yes
-
- - name: remove lambda
- lambda:
- name: '{{ sns_topic_lambda_name }}'
- state: absent
- ignore_errors: yes
-
- - name: remove tempdir
- file:
- path: "{{ tempdir.path }}"
- state: absent
- when: tempdir is defined
- ignore_errors: yes
diff --git a/test/integration/targets/sns_topic/templates/initial-policy.json b/test/integration/targets/sns_topic/templates/initial-policy.json
deleted file mode 100644
index 235c59952e..0000000000
--- a/test/integration/targets/sns_topic/templates/initial-policy.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "Version":"2012-10-17",
- "Id":"SomePolicyId",
- "Statement" :[
- {
- "Sid":"Statement1",
- "Effect":"Allow",
- "Principal" :{
- "AWS":"{{ sns_arn.split(':')[4] }}"
- },
- "Action":["sns:Subscribe"],
- "Resource": "{{ sns_arn }}",
- "Condition" :{
- "StringEquals" :{
- "sns:Protocol":"email"
- }
- }
- }
- ]
-}
diff --git a/test/integration/targets/sns_topic/templates/updated-policy.json b/test/integration/targets/sns_topic/templates/updated-policy.json
deleted file mode 100644
index c796bb4d10..0000000000
--- a/test/integration/targets/sns_topic/templates/updated-policy.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "Version":"2012-10-17",
- "Id":"SomePolicyId",
- "Statement" :[
- {
- "Sid":"ANewSid",
- "Effect":"Allow",
- "Principal" :{
- "AWS":"{{ sns_arn.split(':')[4] }}"
- },
- "Action":["sns:Subscribe"],
- "Resource": "{{ sns_arn }}",
- "Condition" :{
- "StringEquals" :{
- "sns:Protocol":"email"
- }
- }
- }
- ]
-}
diff --git a/test/integration/targets/sqs_queue/aliases b/test/integration/targets/sqs_queue/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/sqs_queue/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/sqs_queue/defaults/main.yml b/test/integration/targets/sqs_queue/defaults/main.yml
deleted file mode 100644
index ed97d539c0..0000000000
--- a/test/integration/targets/sqs_queue/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/test/integration/targets/sqs_queue/tasks/main.yml b/test/integration/targets/sqs_queue/tasks/main.yml
deleted file mode 100644
index b689c9eb2b..0000000000
--- a/test/integration/targets/sqs_queue/tasks/main.yml
+++ /dev/null
@@ -1,106 +0,0 @@
----
-- name: Main test block
- module_defaults:
- group/aws:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token | default(omit) }}"
- region: "{{ aws_region }}"
- block:
- - block:
- - name: Test creating SQS queue
- sqs_queue:
- name: "{{ resource_prefix }}{{ 1000 | random }}"
- register: create_result
- - name: Assert SQS queue created
- assert:
- that:
- - create_result.changed
- - create_result.region == "{{ aws_region }}"
- always:
- - name: Test deleting SQS queue
- sqs_queue:
- name: "{{ create_result.name }}"
- state: absent
- register: delete_result
- retries: 3
- delay: 3
- until: delete_result.changed
- - name: Assert SQS queue deleted
- assert:
- that:
- - delete_result.changed
- - name: Test delete SQS queue that doesn't exist
- sqs_queue:
- name: "{{ resource_prefix }}{{ 1000 | random }}"
- state: absent
- register: delete_result
- - name: Assert delete non-existant queue returns cleanly
- assert:
- that:
- - delete_result.changed == False
- - name: Test queue features
- block:
- - name: Test create queue with attributes
- sqs_queue:
- name: "{{ resource_prefix }}{{ 1000 | random }}"
- default_visibility_timeout: 900
- delivery_delay: 900
- maximum_message_size: 9009
- message_retention_period: 900
- receive_message_wait_time: 10
- policy:
- Version: "2012-10-17"
- Statement:
- Effect: Allow
- Action: "*"
- register: create_result
- - name: Assert queue created with configuration
- assert:
- that:
- - create_result.changed
- - create_result.default_visibility_timeout == 900
- - create_result.delivery_delay == 900
- - create_result.maximum_message_size == 9009
- - create_result.message_retention_period == 900
- - create_result.receive_message_wait_time == 10
- - create_result.policy.Version == "2012-10-17"
- - create_result.policy.Statement[0].Effect == "Allow"
- - create_result.policy.Statement[0].Action == "*"
- always:
- - name: Cleaning up queue
- sqs_queue:
- name: "{{ create_result.name }}"
- state: absent
- register: delete_result
- retries: 3
- delay: 3
- until: delete_result.changed
- - name: Test queue with redrive
- block:
- - name: Creating dead letter queue
- sqs_queue:
- name: "{{ resource_prefix }}{{ 1000 | random }}"
- register: dead_letter_queue
- - name: Test create queue with redrive_policy
- sqs_queue:
- name: "{{ resource_prefix }}{{ 1000 | random }}"
- redrive_policy:
- maxReceiveCount: 5
- deadLetterTargetArn: "{{ dead_letter_queue.queue_arn }}"
- register: create_result
- - name: Assert queue created with configuration
- assert:
- that:
- - create_result.changed
- always:
- - name: Cleaning up queue
- sqs_queue:
- name: "{{ item.name }}"
- state: absent
- register: delete_result
- retries: 3
- delay: 3
- with_items:
- - { name: "{{ create_result.name }}" }
- - { name: "{{ dead_letter_queue.name }}" }
diff --git a/test/integration/targets/sts_assume_role/aliases b/test/integration/targets/sts_assume_role/aliases
deleted file mode 100644
index 6e3860bee2..0000000000
--- a/test/integration/targets/sts_assume_role/aliases
+++ /dev/null
@@ -1,2 +0,0 @@
-cloud/aws
-shippable/aws/group2
diff --git a/test/integration/targets/sts_assume_role/meta/main.yml b/test/integration/targets/sts_assume_role/meta/main.yml
deleted file mode 100644
index 1f64f1169a..0000000000
--- a/test/integration/targets/sts_assume_role/meta/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-dependencies:
- - prepare_tests
- - setup_ec2
diff --git a/test/integration/targets/sts_assume_role/tasks/main.yml b/test/integration/targets/sts_assume_role/tasks/main.yml
deleted file mode 100644
index 345454932f..0000000000
--- a/test/integration/targets/sts_assume_role/tasks/main.yml
+++ /dev/null
@@ -1,384 +0,0 @@
----
-# tasks file for sts_assume_role
-
-- block:
-
- # ============================================================
- # TODO create simple ansible sts_get_caller_identity module
- - blockinfile:
- path: "{{ output_dir }}/sts.py"
- create: yes
- block: |
- #!/usr/bin/env python
- import boto3
- sts = boto3.client('sts')
- response = sts.get_caller_identity()
- print(response['Account'])
-
- - name: get the aws account id
- command: "{{ ansible_python.executable }} '{{ output_dir }}/sts.py'"
- environment:
- AWS_ACCESS_KEY_ID: "{{ aws_access_key }}"
- AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}"
- AWS_SESSION_TOKEN: "{{ security_token }}"
- register: result
-
- - name: register account id
- set_fact:
- aws_account: "{{ result.stdout | replace('\n', '') }}"
-
- # ============================================================
- - name: create test iam role
- iam_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- name: "ansible-test-sts-{{ resource_prefix }}"
- assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
- create_instance_profile: False
- managed_policy:
- - arn:aws:iam::aws:policy/IAMReadOnlyAccess
- state: present
- register: test_role
-
- # ============================================================
- - name: pause to ensure role exists before using
- pause:
- seconds: 30
-
- # ============================================================
- - name: test with no parameters
- sts_assume_role:
- register: result
- ignore_errors: true
-
- - name: assert with no parameters
- assert:
- that:
- - 'result.failed'
- - "'missing required arguments:' in result.msg"
-
- # ============================================================
- - name: test with empty parameters
- sts_assume_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region}}"
- role_arn:
- role_session_name:
- policy:
- duration_seconds:
- external_id:
- mfa_token:
- mfa_serial_number:
- register: result
- ignore_errors: true
-
- - name: assert with empty parameters
- assert:
- that:
- - 'result.failed'
- - "'Missing required parameter in input:' in result.msg"
- when: result.module_stderr is not defined
-
- - name: assert with empty parameters
- assert:
- that:
- - 'result.failed'
- - "'Member must have length greater than or equal to 20' in result.module_stderr"
- when: result.module_stderr is defined
-
- # ============================================================
- - name: test with only 'role_arn' parameter
- sts_assume_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- role_arn: "{{ test_role.iam_role.arn }}"
- register: result
- ignore_errors: true
-
- - name: assert with only 'role_arn' parameter
- assert:
- that:
- - 'result.failed'
- - "'missing required arguments: role_session_name' in result.msg"
-
- # ============================================================
- - name: test with only 'role_session_name' parameter
- sts_assume_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- role_session_name: "AnsibleTest"
- register: result
- ignore_errors: true
-
- - name: assert with only 'role_session_name' parameter
- assert:
- that:
- - 'result.failed'
- - "'missing required arguments: role_arn' in result.msg"
-
- # ============================================================
- - name: test assume role with invalid policy
- sts_assume_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- role_arn: "{{ test_role.iam_role.arn }}"
- role_session_name: "AnsibleTest"
- policy: "invalid policy"
- register: result
- ignore_errors: true
-
- - name: assert assume role with invalid policy
- assert:
- that:
- - 'result.failed'
- - "'The policy is not in the valid JSON format.' in result.msg"
- when: result.module_stderr is not defined
-
- - name: assert assume role with invalid policy
- assert:
- that:
- - 'result.failed'
- - "'The policy is not in the valid JSON format.' in result.module_stderr"
- when: result.module_stderr is defined
-
- # ============================================================
- - name: test assume role with invalid duration seconds
- sts_assume_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region}}"
- role_arn: "{{ test_role.iam_role.arn }}"
- role_session_name: AnsibleTest
- duration_seconds: invalid duration
- register: result
- ignore_errors: true
-
- - name: assert assume role with invalid duration seconds
- assert:
- that:
- - result is failed
- - 'result.msg is search("argument \w+ is of type <.*> and we were unable to convert to int: <.*> cannot be converted to an int")'
-
- # ============================================================
- - name: test assume role with invalid external id
- sts_assume_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region}}"
- role_arn: "{{ test_role.iam_role.arn }}"
- role_session_name: AnsibleTest
- external_id: invalid external id
- register: result
- ignore_errors: true
-
- - name: assert assume role with invalid external id
- assert:
- that:
- - 'result.failed'
- - "'Member must satisfy regular expression pattern:' in result.msg"
- when: result.module_stderr is not defined
-
- - name: assert assume role with invalid external id
- assert:
- that:
- - 'result.failed'
- - "'Member must satisfy regular expression pattern:' in result.module_stderr"
- when: result.module_stderr is defined
-
- # ============================================================
- - name: test assume role with invalid mfa serial number
- sts_assume_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region}}"
- role_arn: "{{ test_role.iam_role.arn }}"
- role_session_name: AnsibleTest
- mfa_serial_number: invalid serial number
- register: result
- ignore_errors: true
-
- - name: assert assume role with invalid mfa serial number
- assert:
- that:
- - 'result.failed'
- - "'Member must satisfy regular expression pattern:' in result.msg"
- when: result.module_stderr is not defined
-
- - name: assert assume role with invalid mfa serial number
- assert:
- that:
- - 'result.failed'
- - "'Member must satisfy regular expression pattern:' in result.module_stderr"
- when: result.module_stderr is defined
-
- # ============================================================
- - name: test assume role with invalid mfa token code
- sts_assume_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region}}"
- role_arn: "{{ test_role.iam_role.arn }}"
- role_session_name: AnsibleTest
- mfa_token: invalid token code
- register: result
- ignore_errors: true
-
- - name: assert assume role with invalid mfa token code
- assert:
- that:
- - 'result.failed'
- - "'Member must satisfy regular expression pattern:' in result.msg"
- when: result.module_stderr is not defined
-
- - name: assert assume role with invalid mfa token code
- assert:
- that:
- - 'result.failed'
- - "'Member must satisfy regular expression pattern:' in result.module_stderr"
- when: result.module_stderr is defined
-
- # ============================================================
- - name: test assume role with invalid role_arn
- sts_assume_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region}}"
- role_arn: invalid role arn
- role_session_name: AnsibleTest
- register: result
- ignore_errors: true
-
- - name: assert assume role with invalid role_arn
- assert:
- that:
- - result.failed
- - "'Invalid length for parameter RoleArn' in result.msg"
- when: result.module_stderr is not defined
-
- - name: assert assume role with invalid role_arn
- assert:
- that:
- - 'result.failed'
- - "'Member must have length greater than or equal to 20' in result.module_stderr"
- when: result.module_stderr is defined
-
- # ============================================================
- - name: test assume not existing sts role
- sts_assume_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region}}"
- role_arn: "arn:aws:iam::123456789:role/non-existing-role"
- role_session_name: "AnsibleTest"
- register: result
- ignore_errors: true
-
- - name: assert assume not existing sts role
- assert:
- that:
- - 'result.failed'
- - "'is not authorized to perform: sts:AssumeRole' in result.msg"
- when: result.module_stderr is not defined
-
- - name: assert assume not existing sts role
- assert:
- that:
- - 'result.failed'
- - "'is not authorized to perform: sts:AssumeRole' in result.msg"
- when: result.module_stderr is defined
-
- # ============================================================
- - name: test assume role
- sts_assume_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- region: "{{ aws_region }}"
- role_arn: "{{ test_role.iam_role.arn }}"
- role_session_name: AnsibleTest
- register: assumed_role
-
- - name: assert assume role
- assert:
- that:
- - 'not assumed_role.failed'
- - "'sts_creds' in assumed_role"
- - "'access_key' in assumed_role.sts_creds"
- - "'secret_key' in assumed_role.sts_creds"
- - "'session_token' in assumed_role.sts_creds"
-
- # ============================================================
- - name: test that assumed credentials have IAM read-only access
- iam_role:
- aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
- aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
- security_token: "{{ assumed_role.sts_creds.session_token }}"
- region: "{{ aws_region}}"
- name: "ansible-test-sts-{{ resource_prefix }}"
- assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
- create_instance_profile: False
- state: present
- register: result
-
- - name: assert assumed role with privileged action (expect changed=false)
- assert:
- that:
- - 'not result.failed'
- - 'not result.changed'
- - "'iam_role' in result"
-
- # ============================================================
- - name: test assumed role with unprivileged action
- iam_role:
- aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
- aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
- security_token: "{{ assumed_role.sts_creds.session_token }}"
- region: "{{ aws_region}}"
- name: "ansible-test-sts-{{ resource_prefix }}-new"
- assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
- state: present
- register: result
- ignore_errors: true
-
- - name: assert assumed role with unprivileged action (expect changed=false)
- assert:
- that:
- - 'result.failed'
- - "'is not authorized to perform: iam:CreateRole' in result.msg"
- # runs on Python2
- when: result.module_stderr is not defined
-
- - name: assert assumed role with unprivileged action (expect changed=false)
- assert:
- that:
- - 'result.failed'
- - "'is not authorized to perform: iam:CreateRole' in result.module_stderr"
- # runs on Python3
- when: result.module_stderr is defined
-
- # ============================================================
- always:
-
- - name: delete test iam role
- iam_role:
- aws_access_key: "{{ aws_access_key }}"
- aws_secret_key: "{{ aws_secret_key }}"
- security_token: "{{ security_token }}"
- name: "ansible-test-sts-{{ resource_prefix }}"
- assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}"
- managed_policy:
- - arn:aws:iam::aws:policy/IAMReadOnlyAccess
- state: absent
diff --git a/test/integration/targets/sts_assume_role/templates/policy.json.j2 b/test/integration/targets/sts_assume_role/templates/policy.json.j2
deleted file mode 100644
index 559562fd91..0000000000
--- a/test/integration/targets/sts_assume_role/templates/policy.json.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": "arn:aws:iam::{{ aws_account }}:root"
- },
- "Action": "sts:AssumeRole"
- }
- ]
-} \ No newline at end of file
diff --git a/test/sanity/ignore.txt b/test/sanity/ignore.txt
index 25f26df9e7..c3ad80a517 100644
--- a/test/sanity/ignore.txt
+++ b/test/sanity/ignore.txt
@@ -18,8 +18,6 @@ contrib/inventory/consul_io.py future-import-boilerplate
contrib/inventory/consul_io.py metaclass-boilerplate
contrib/inventory/digital_ocean.py future-import-boilerplate
contrib/inventory/digital_ocean.py metaclass-boilerplate
-contrib/inventory/ec2.py future-import-boilerplate
-contrib/inventory/ec2.py metaclass-boilerplate
contrib/inventory/fleet.py future-import-boilerplate
contrib/inventory/fleet.py metaclass-boilerplate
contrib/inventory/foreman.py future-import-boilerplate
@@ -480,170 +478,36 @@ lib/ansible/modules/cloud/alicloud/ali_instance_info.py validate-modules:doc-mis
lib/ansible/modules/cloud/alicloud/ali_instance_info.py validate-modules:doc-required-mismatch
lib/ansible/modules/cloud/alicloud/ali_instance_info.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/alicloud/ali_instance_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/amazon/aws_acm_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_acm_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_batch_compute_environment.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_batch_compute_environment.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_batch_job_definition.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_batch_job_definition.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_batch_job_queue.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_batch_job_queue.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_codebuild.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_codebuild.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_codepipeline.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_codepipeline.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_config_aggregator.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_config_aggregator.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_direct_connect_virtual_interface.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_direct_connect_virtual_interface.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_eks_cluster.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_eks_cluster.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_glue_connection.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_glue_connection.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_glue_job.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_glue_job.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_kms.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_kms.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/aws_netapp_cvs_FileSystems.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/aws_s3.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/aws_s3.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_s3_cors.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_waf_condition.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_waf_condition.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_waf_rule.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_waf_rule.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/aws_waf_web_acl.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/aws_waf_web_acl.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/cloudformation.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/cloudformation.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/cloudformation_stack_set.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/cloudformation_stack_set.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/cloudfront_distribution.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/cloudfront_distribution.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/cloudfront_invalidation.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/cloudfront_invalidation.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/data_pipeline.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/data_pipeline.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/dynamodb_table.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/dynamodb_table.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/ec2.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/ec2.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/ec2_ami.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/ec2_ami.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/ec2_ami_info.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/ec2_ami_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_asg.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_asg.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_customer_gateway_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_customer_gateway_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_elb.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_elb_info.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/ec2_elb_lb.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/ec2_eni.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/ec2_eni.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/ec2_group.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/ec2_group.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_instance.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_instance_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_launch_template.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_launch_template.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_lc.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_lc.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_lc_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_lc_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_placement_group_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_placement_group_info.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/ec2_snapshot_info.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/ec2_snapshot_info.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/ec2_tag.py validate-modules:parameter-state-invalid-choice
-lib/ansible/modules/cloud/amazon/ec2_transit_gateway_info.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/ec2_vol.py validate-modules:parameter-state-invalid-choice
lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option_info.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_option_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_endpoint_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_igw_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_igw_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_nacl_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway_info.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/ec2_vpc_net.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/ec2_vpc_net.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/ec2_vpc_net_info.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/ec2_vpc_net_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_peering_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_peering_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_info.py validate-modules:doc-elements-mismatch
lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_vpn.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_vpn.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ec2_vpc_vpn_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ec2_vpc_vpn_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ecs_attribute.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ecs_attribute.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ecs_service.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ecs_service.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ecs_service_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ecs_service_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ecs_task.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ecs_task.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/efs.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/efs.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/efs_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/efs_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/elasticache.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/elasticache.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/elasticache_subnet_group.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/elb_application_lb.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/elb_application_lb.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/elb_application_lb_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/elb_classic_lb.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/elb_classic_lb_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/elb_instance.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/elb_network_lb.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/elb_network_lb.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/elb_target_group.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/elb_target_group_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/iam.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/iam_group.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/iam_group.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/iam_role.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/iam_user.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/lambda.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/lambda.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/rds.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/rds.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/rds_instance.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/rds_subnet_group.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/redshift.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/redshift.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/redshift_subnet_group.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/redshift_subnet_group.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/route53.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/route53.py validate-modules:parameter-state-invalid-choice
-lib/ansible/modules/cloud/amazon/route53_info.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/route53_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/s3_bucket_notification.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/s3_bucket_notification.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/s3_lifecycle.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/amazon/sns_topic.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/amazon/sns_topic.py validate-modules:parameter-list-no-elements
lib/ansible/modules/cloud/atomic/atomic_container.py validate-modules:doc-missing-type
lib/ansible/modules/cloud/atomic/atomic_container.py validate-modules:doc-required-mismatch
lib/ansible/modules/cloud/atomic/atomic_container.py validate-modules:no-default-for-required-parameter
@@ -8045,8 +7909,6 @@ test/integration/targets/async/library/async_test.py future-import-boilerplate
test/integration/targets/async/library/async_test.py metaclass-boilerplate
test/integration/targets/async_fail/library/async_test.py future-import-boilerplate
test/integration/targets/async_fail/library/async_test.py metaclass-boilerplate
-test/integration/targets/aws_lambda/files/mini_lambda.py future-import-boilerplate
-test/integration/targets/aws_lambda/files/mini_lambda.py metaclass-boilerplate
test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/lookup/lookup_no_future_boilerplate.py future-import-boilerplate
test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util2.py pylint:relative-beyond-top-level
test/integration/targets/collections_relative_imports/collection_root/ansible_collections/my_ns/my_col/plugins/module_utils/my_util3.py pylint:relative-beyond-top-level
@@ -8076,8 +7938,6 @@ test/integration/targets/inventory_kubevirt/server.py future-import-boilerplate
test/integration/targets/inventory_kubevirt/server.py metaclass-boilerplate
test/integration/targets/jinja2_native_types/filter_plugins/native_plugins.py future-import-boilerplate
test/integration/targets/jinja2_native_types/filter_plugins/native_plugins.py metaclass-boilerplate
-test/integration/targets/lambda_policy/files/mini_http_lambda.py future-import-boilerplate
-test/integration/targets/lambda_policy/files/mini_http_lambda.py metaclass-boilerplate
test/integration/targets/lookup_ini/lookup-8859-15.ini no-smart-quotes
test/integration/targets/module_precedence/lib_with_extension/ping.py future-import-boilerplate
test/integration/targets/module_precedence/lib_with_extension/ping.py metaclass-boilerplate
@@ -8109,16 +7969,12 @@ test/integration/targets/pip/files/setup.py future-import-boilerplate
test/integration/targets/pip/files/setup.py metaclass-boilerplate
test/integration/targets/run_modules/library/test.py future-import-boilerplate
test/integration/targets/run_modules/library/test.py metaclass-boilerplate
-test/integration/targets/s3_bucket_notification/files/mini_lambda.py future-import-boilerplate
-test/integration/targets/s3_bucket_notification/files/mini_lambda.py metaclass-boilerplate
test/integration/targets/script/files/no_shebang.py future-import-boilerplate
test/integration/targets/script/files/no_shebang.py metaclass-boilerplate
test/integration/targets/service/files/ansible_test_service.py future-import-boilerplate
test/integration/targets/service/files/ansible_test_service.py metaclass-boilerplate
test/integration/targets/setup_rpm_repo/files/create-repo.py future-import-boilerplate
test/integration/targets/setup_rpm_repo/files/create-repo.py metaclass-boilerplate
-test/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py future-import-boilerplate
-test/integration/targets/sns_topic/files/sns_topic_lambda/sns_topic_lambda.py metaclass-boilerplate
test/integration/targets/supervisorctl/files/sendProcessStdin.py future-import-boilerplate
test/integration/targets/supervisorctl/files/sendProcessStdin.py metaclass-boilerplate
test/integration/targets/template/files/encoding_1252_utf-8.expected no-smart-quotes
diff --git a/test/units/modules/cloud/amazon/test_aws_acm.py b/test/units/modules/cloud/amazon/test_aws_acm.py
deleted file mode 100644
index d2fd87b8de..0000000000
--- a/test/units/modules/cloud/amazon/test_aws_acm.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# (c) 2019 Telstra Corporation Limited
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-from ansible.modules.cloud.amazon.aws_acm import pem_chain_split, chain_compare
-from ansible.module_utils._text import to_bytes, to_text
-from pprint import pprint
-
-
-def test_chain_compare():
-
- # The functions we're testing take module as an argument
- # Just so they can call module.fail_json
- # Let's just use None for the unit tests,
- # Because they shouldn't fail
- # And if they do, fail_json is not applicable
- module = None
-
- fixture_suffix = 'test/units/modules/cloud/amazon/fixtures/certs'
-
- # Test chain split function on super simple (invalid) certs
- expected = ['aaa', 'bbb', 'ccc']
-
- for fname in ['simple-chain-a.cert', 'simple-chain-b.cert']:
- path = fixture_suffix + '/' + fname
- with open(path, 'r') as f:
- pem = to_text(f.read())
- actual = pem_chain_split(module, pem)
- actual = [a.strip() for a in actual]
- if actual != expected:
- print("Expected:")
- pprint(expected)
- print("Actual:")
- pprint(actual)
- raise AssertionError("Failed to properly split %s" % fname)
-
- # Now test real chains
- # chains with same same_as should be considered equal
- test_chains = [
- { # Original Cert chain
- 'path': fixture_suffix + '/chain-1.0.cert',
- 'same_as': 1,
- 'length': 3
- },
- { # Same as 1.0, but longer PEM lines
- 'path': fixture_suffix + '/chain-1.1.cert',
- 'same_as': 1,
- 'length': 3
- },
- { # Same as 1.0, but without the stuff before each --------
- 'path': fixture_suffix + '/chain-1.2.cert',
- 'same_as': 1,
- 'length': 3
- },
- { # Same as 1.0, but in a different order, so should be considered different
- 'path': fixture_suffix + '/chain-1.3.cert',
- 'same_as': 2,
- 'length': 3
- },
- { # Same as 1.0, but with last link missing
- 'path': fixture_suffix + '/chain-1.4.cert',
- 'same_as': 3,
- 'length': 2
- },
- { # Completely different cert chain to all the others
- 'path': fixture_suffix + '/chain-4.cert',
- 'same_as': 4,
- 'length': 3
- },
- { # Single cert
- 'path': fixture_suffix + '/a.pem',
- 'same_as': 5,
- 'length': 1
- },
- { # a different, single cert
- 'path': fixture_suffix + '/b.pem',
- 'same_as': 6,
- 'length': 1
- }
- ]
-
- for chain in test_chains:
- with open(chain['path'], 'r') as f:
- chain['pem_text'] = to_text(f.read())
-
- # Test to make sure our regex isn't too greedy
- chain['split'] = pem_chain_split(module, chain['pem_text'])
- if len(chain['split']) != chain['length']:
- print("Cert before split")
- print(chain['pem_text'])
- print("Cert after split")
- pprint(chain['split'])
- print("path: %s" % chain['path'])
- print("Expected chain length: %d" % chain['length'])
- print("Actual chain length: %d" % len(chain['split']))
- raise AssertionError("Chain %s was not split properly" % chain['path'])
-
- for chain_a in test_chains:
- for chain_b in test_chains:
- expected = (chain_a['same_as'] == chain_b['same_as'])
-
- # Now test the comparison function
- actual = chain_compare(module, chain_a['pem_text'], chain_b['pem_text'])
- if expected != actual:
- print("Error, unexpected comparison result between \n%s\nand\n%s" % (chain_a['path'], chain_b['path']))
- print("Expected %s got %s" % (str(expected), str(actual)))
- assert(expected == actual)
diff --git a/test/units/modules/cloud/amazon/test_aws_api_gateway.py b/test/units/modules/cloud/amazon/test_aws_api_gateway.py
deleted file mode 100644
index 30b0120a11..0000000000
--- a/test/units/modules/cloud/amazon/test_aws_api_gateway.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#
-# (c) 2016 Michael De La Rue
-#
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import sys
-
-import pytest
-
-from ansible.module_utils.ec2 import HAS_BOTO3
-from units.modules.utils import set_module_args
-
-if not HAS_BOTO3:
- pytestmark = pytest.mark.skip("test_api_gateway.py requires the `boto3` and `botocore` modules")
-
-import ansible.modules.cloud.amazon.aws_api_gateway as agw
-from ansible.module_utils.aws import core
-
-
-exit_return_dict = {}
-
-
-def fake_exit_json(self, **kwargs):
- """ store the kwargs given to exit_json rather than putting them out to stdout"""
- global exit_return_dict
- exit_return_dict = kwargs
- sys.exit(0)
-
-
-def test_upload_api(monkeypatch):
- class FakeConnection:
-
- def put_rest_api(self, *args, **kwargs):
- assert kwargs["body"] == "the-swagger-text-is-fake"
- return {"msg": "success!"}
-
- def return_fake_connection(*args, **kwargs):
- return FakeConnection()
-
- monkeypatch.setattr(core, "boto3_conn", return_fake_connection)
- monkeypatch.setattr(core.AnsibleAWSModule, "exit_json", fake_exit_json)
-
- set_module_args({
- "api_id": "fred",
- "state": "present",
- "swagger_text": "the-swagger-text-is-fake",
- "region": 'mars-north-1',
- "_ansible_tmpdir": "/tmp/ansibl-abcdef",
- })
- with pytest.raises(SystemExit):
- agw.main()
- assert exit_return_dict["changed"]
-
-
-def test_warn_if_region_not_specified():
-
- set_module_args({
- "name": "aws_api_gateway",
- "state": "present",
- "runtime": 'python2.7',
- "role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
- "handler": 'lambda_python.my_handler'})
- with pytest.raises(SystemExit):
- print(agw.main())
diff --git a/test/units/modules/cloud/amazon/test_aws_direct_connect_connection.py b/test/units/modules/cloud/amazon/test_aws_direct_connect_connection.py
deleted file mode 100644
index d232b51095..0000000000
--- a/test/units/modules/cloud/amazon/test_aws_direct_connect_connection.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# (c) 2017 Red Hat Inc.
-#
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from units.utils.amazon_placebo_fixtures import placeboify, maybe_sleep
-from ansible.modules.cloud.amazon import aws_direct_connect_connection
-
-
-class FakeModule(object):
- def __init__(self, **kwargs):
- self.params = kwargs
-
- def fail_json(self, *args, **kwargs):
- self.exit_args = args
- self.exit_kwargs = kwargs
- raise Exception('FAIL')
-
- def exit_json(self, *args, **kwargs):
- self.exit_args = args
- self.exit_kwargs = kwargs
-
-
-# When rerecording these tests, create a stand alone connection with default values in us-west-2
-# with the name ansible-test-connection and set connection_id to the appropriate value
-connection_id = "dxcon-fgq9rgot"
-connection_name = 'ansible-test-connection'
-
-
-def test_connection_status(placeboify, maybe_sleep):
- client = placeboify.client('directconnect')
- status = aws_direct_connect_connection.connection_status(client, connection_id)['connection']
- assert status['connectionName'] == connection_name
- assert status['connectionId'] == connection_id
-
-
-def test_connection_exists_by_id(placeboify, maybe_sleep):
- client = placeboify.client('directconnect')
- exists = aws_direct_connect_connection.connection_exists(client, connection_id)
- assert exists == connection_id
-
-
-def test_connection_exists_by_name(placeboify, maybe_sleep):
- client = placeboify.client('directconnect')
- exists = aws_direct_connect_connection.connection_exists(client, None, connection_name)
- assert exists == connection_id
-
-
-def test_connection_does_not_exist(placeboify, maybe_sleep):
- client = placeboify.client('directconnect')
- exists = aws_direct_connect_connection.connection_exists(client, 'dxcon-notthere')
- assert exists is False
-
-
-def test_changed_properties(placeboify, maybe_sleep):
- client = placeboify.client('directconnect')
- status = aws_direct_connect_connection.connection_status(client, connection_id)['connection']
- location = "differentlocation"
- bandwidth = status['bandwidth']
- assert aws_direct_connect_connection.changed_properties(status, location, bandwidth) is True
-
-
-def test_associations_are_not_updated(placeboify, maybe_sleep):
- client = placeboify.client('directconnect')
- status = aws_direct_connect_connection.connection_status(client, connection_id)['connection']
- lag_id = status.get('lagId')
- assert aws_direct_connect_connection.update_associations(client, status, connection_id, lag_id) is False
-
-
-def test_create_and_delete(placeboify, maybe_sleep):
- client = placeboify.client('directconnect')
- created_conn = verify_create_works(placeboify, maybe_sleep, client)
- deleted_conn = verify_delete_works(placeboify, maybe_sleep, client, created_conn)
-
-
-def verify_create_works(placeboify, maybe_sleep, client):
- created = aws_direct_connect_connection.create_connection(client=client,
- location="EqSE2",
- bandwidth="1Gbps",
- name="ansible-test-2",
- lag_id=None)
- assert created.startswith('dxcon')
- return created
-
-
-def verify_delete_works(placeboify, maybe_sleep, client, conn_id):
- changed = aws_direct_connect_connection.ensure_absent(client, conn_id)
- assert changed is True
diff --git a/test/units/modules/cloud/amazon/test_aws_direct_connect_link_aggregation_group.py b/test/units/modules/cloud/amazon/test_aws_direct_connect_link_aggregation_group.py
deleted file mode 100644
index 1f733aeb4c..0000000000
--- a/test/units/modules/cloud/amazon/test_aws_direct_connect_link_aggregation_group.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# (c) 2017 Red Hat Inc.
-#
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-import os
-import collections
-from units.utils.amazon_placebo_fixtures import placeboify, maybe_sleep
-from ansible.modules.cloud.amazon import aws_direct_connect_link_aggregation_group as lag_module
-from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
-
-
-@pytest.fixture(scope="module")
-def dependencies():
-
- # each LAG dict will contain the keys: module, connections, virtual_interfaces
- Dependencies = collections.namedtuple("Dependencies", ["lag_1", "lag_2"])
- lag_1 = dict()
- lag_2 = dict()
-
- vanilla_params = {"name": "ansible_lag_1",
- "location": "EqSe2",
- "num_connections": 1,
- "min_links": 0,
- "bandwidth": "1Gbps"}
-
- for lag in ("ansible_lag_1", "ansible_lag_2"):
- params = dict(vanilla_params)
- params["name"] = lag
- if lag == "ansible_lag_1":
- lag_1["module"] = FakeModule(**params)
- else:
- lag_2["module"] = FakeModule(**params)
-
- if os.getenv("PLACEBO_RECORD"):
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(lag_1["module"], boto3=True)
- client = boto3_conn(lag_1["module"], conn_type="client", resource="directconnect", region=region, endpoint=ec2_url, **aws_connect_kwargs)
- # See if link aggregation groups exist
- for name in ("ansible_lag_1", "ansible_lag_2"):
- lag_id = lag_module.create_lag(client, num_connections=1, location="EqSe2", bandwidth="1Gbps", name=name, connection_id=None)
- if name == "ansible_lag_1":
- lag_1["lag_id"] = lag_id
- lag_1["name"] = name
- else:
- lag_2["lag_id"] = lag_id
- lag_2["name"] = name
- yield Dependencies(lag_1=lag_1, lag_2=lag_2)
- else:
- lag_1.update(lag_id="dxlag-fgkk4dja", name="ansible_lag_1")
- lag_2.update(lag_id="dxlag-fgytkicv", name="ansible_lag_2")
- yield Dependencies(lag_1=lag_1, lag_2=lag_2)
-
- if os.getenv("PLACEBO_RECORD"):
- # clean up
- lag_module.ensure_absent(client, lag_1["lag_id"], lag_1["name"], True, True, True, 120)
- lag_module.ensure_absent(client, lag_2["lag_id"], lag_2["name"], True, True, True, 120)
-
-
-class FakeModule(object):
- def __init__(self, **kwargs):
- self.params = kwargs
-
- def fail_json(self, *args, **kwargs):
- self.exit_args = args
- self.exit_kwargs = kwargs
- raise Exception("FAIL")
-
- def exit_json(self, *args, **kwargs):
- self.exit_args = args
- self.exit_kwargs = kwargs
-
-
-def test_nonexistent_lag_status(placeboify, maybe_sleep):
- client = placeboify.client("directconnect")
- exists = lag_module.lag_exists(client=client,
- lag_id="doesntexist",
- lag_name="doesntexist",
- verify=True)
- assert not exists
-
-
-def test_lag_status(placeboify, maybe_sleep, dependencies):
- client = placeboify.client("directconnect")
- status = lag_module.lag_status(client, lag_id=dependencies.lag_1.get("lag_id"))
- assert status.get("lagId") == dependencies.lag_1.get("lag_id")
- assert status.get("lagName") == "ansible_lag_1"
-
-
-def test_lag_exists(placeboify, maybe_sleep, dependencies):
- client = placeboify.client("directconnect")
- exists = lag_module.lag_exists(client=client,
- lag_id=dependencies.lag_1.get("lag_id"),
- lag_name=None,
- verify=True)
- assert exists
-
-
-def test_lag_exists_using_name(placeboify, maybe_sleep, dependencies):
- client = placeboify.client("directconnect")
- exists = lag_module.lag_exists(client=client,
- lag_id=None,
- lag_name=dependencies.lag_1.get("name"),
- verify=True)
- assert exists
-
-
-def test_nonexistent_lag_does_not_exist(placeboify, maybe_sleep):
- client = placeboify.client("directconnect")
- exists = lag_module.lag_exists(client=client,
- lag_id="dxlag-XXXXXXXX",
- lag_name="doesntexist",
- verify=True)
- assert not exists
-
-
-def test_lag_changed_true(placeboify, maybe_sleep, dependencies):
- client = placeboify.client("directconnect")
- status = lag_module.lag_status(client=client, lag_id=dependencies.lag_1.get("lag_id"))
- assert lag_module.lag_changed(status, "new_name", 1)
-
-
-def test_lag_changed_true_no(placeboify, maybe_sleep, dependencies):
- client = placeboify.client("directconnect")
- status = lag_module.lag_status(client=client, lag_id=dependencies.lag_1.get("lag_id"))
- assert not lag_module.lag_changed(status, "ansible_lag_1", 0)
-
-
-def test_update_lag(placeboify, maybe_sleep, dependencies):
- client = placeboify.client("directconnect")
- status_before = lag_module.lag_status(client=client, lag_id=dependencies.lag_2.get("lag_id"))
- lag_module.update_lag(client,
- lag_id=dependencies.lag_2.get("lag_id"),
- lag_name="ansible_lag_2_update",
- min_links=0,
- wait=False,
- wait_timeout=0,
- num_connections=1)
- status_after = lag_module.lag_status(client=client, lag_id=dependencies.lag_2.get("lag_id"))
- assert status_before != status_after
-
- # remove the lag name from the statuses and verify it was the only thing changed
- del status_before['lagName']
- del status_after['lagName']
- assert status_before == status_after
-
-
-def test_delete_nonexistent_lag(placeboify, maybe_sleep):
- client = placeboify.client("directconnect")
- changed = lag_module.ensure_absent(client, "dxlag-XXXXXXXX", "doesntexist", True, True, True, 120)
- assert not changed
-
-
-def test_delete_lag_with_connections_without_force_delete(placeboify, maybe_sleep, dependencies):
- client = placeboify.client("directconnect")
- with pytest.raises(Exception) as error_message:
- lag_module.ensure_absent(client, dependencies.lag_1.get("lag_id"), "ansible_lag_1", False, True, True, 120)
- assert "To force deletion of the LAG use delete_force: True" in error_message
-
-
-def test_delete_lag_with_connections(placeboify, maybe_sleep, dependencies):
- client = placeboify.client("directconnect")
- changed = lag_module.ensure_absent(client, dependencies.lag_1.get("lag_id"), "ansible_lag_1", True, True, True, 120)
- assert changed
diff --git a/test/units/modules/cloud/amazon/test_data_pipeline.py b/test/units/modules/cloud/amazon/test_data_pipeline.py
deleted file mode 100644
index 7d821b3993..0000000000
--- a/test/units/modules/cloud/amazon/test_data_pipeline.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# (c) 2017 Red Hat Inc.
-#
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-import json
-import collections
-
-import pytest
-from units.utils.amazon_placebo_fixtures import placeboify, maybe_sleep
-
-from ansible.modules.cloud.amazon import data_pipeline
-from ansible.module_utils._text import to_text
-
-# test_api_gateway.py requires the `boto3` and `botocore` modules
-boto3 = pytest.importorskip('boto3')
-
-
-@pytest.fixture(scope='module')
-def dp_setup():
- """
- Yield a FakeModule object, data pipeline id of a vanilla data pipeline, and data pipeline objects
-
- This fixture is module-scoped, since this can be reused for multiple tests.
- """
- Dependencies = collections.namedtuple("Dependencies", ["module", "data_pipeline_id", "objects"])
-
- # get objects to use to test populating and activating the data pipeline
- if not os.getenv('PLACEBO_RECORD'):
- objects = [{"name": "Every 1 day",
- "id": "DefaultSchedule",
- "fields": []},
- {"name": "Default",
- "id": "Default",
- "fields": []}]
- else:
- s3 = boto3.client('s3')
- data = s3.get_object(Bucket="ansible-test-datapipeline", Key="pipeline-object/new.json")
- objects = json.loads(to_text(data['Body'].read()))
-
- # create a module with vanilla data pipeline parameters
- params = {'name': 'ansible-test-create-pipeline',
- 'description': 'ansible-datapipeline-unit-test',
- 'state': 'present',
- 'timeout': 300,
- 'objects': [],
- 'tags': {},
- 'parameters': [],
- 'values': []}
- module = FakeModule(**params)
-
- # yield a module, the data pipeline id, and the data pipeline objects (that are not yet defining the vanilla data pipeline)
- if not os.getenv('PLACEBO_RECORD'):
- yield Dependencies(module=module, data_pipeline_id='df-0590406117G8DPQZY2HA', objects=objects)
- else:
- connection = boto3.client('datapipeline')
- changed, result = data_pipeline.create_pipeline(connection, module)
- data_pipeline_id = result['data_pipeline']['pipeline_id']
- yield Dependencies(module=module, data_pipeline_id=data_pipeline_id, objects=objects)
-
- # remove data pipeline
- if os.getenv('PLACEBO_RECORD'):
- module.params.update(state='absent')
- data_pipeline.delete_pipeline(connection, module)
-
-
-class FakeModule(object):
- def __init__(self, **kwargs):
- self.params = kwargs
-
- def fail_json(self, *args, **kwargs):
- self.exit_args = args
- self.exit_kwargs = kwargs
- raise Exception('FAIL')
-
- def exit_json(self, *args, **kwargs):
- self.exit_args = args
- self.exit_kwargs = kwargs
-
-
-def test_create_pipeline_already_exists(placeboify, maybe_sleep, dp_setup):
- connection = placeboify.client('datapipeline')
- changed, result = data_pipeline.create_pipeline(connection, dp_setup.module)
- assert changed is False
- assert "Data Pipeline ansible-test-create-pipeline is present" in result['msg']
-
-
-def test_pipeline_field(placeboify, maybe_sleep, dp_setup):
- connection = placeboify.client('datapipeline')
- pipeline_field_info = data_pipeline.pipeline_field(connection, dp_setup.data_pipeline_id, "@pipelineState")
- assert pipeline_field_info == "PENDING"
-
-
-def test_define_pipeline(placeboify, maybe_sleep, dp_setup):
- connection = placeboify.client('datapipeline')
- changed, result = data_pipeline.define_pipeline(connection, dp_setup.module, dp_setup.objects, dp_setup.data_pipeline_id)
- assert 'has been updated' in result
-
-
-def test_deactivate_pipeline(placeboify, maybe_sleep, dp_setup):
- connection = placeboify.client('datapipeline')
- changed, result = data_pipeline.deactivate_pipeline(connection, dp_setup.module)
- assert "Data Pipeline ansible-test-create-pipeline deactivated" in result['msg']
-
-
-def test_activate_without_population(placeboify, maybe_sleep, dp_setup):
- connection = placeboify.client('datapipeline')
- with pytest.raises(Exception) as error_message:
- changed, result = data_pipeline.activate_pipeline(connection, dp_setup.module)
- assert error_message == "You need to populate your pipeline before activation."
-
-
-def test_create_pipeline(placeboify, maybe_sleep):
- connection = placeboify.client('datapipeline')
- params = {'name': 'ansible-unittest-create-pipeline',
- 'description': 'ansible-datapipeline-unit-test',
- 'state': 'present',
- 'timeout': 300,
- 'tags': {}}
- m = FakeModule(**params)
- changed, result = data_pipeline.create_pipeline(connection, m)
- assert changed is True
- assert result['msg'] == "Data Pipeline ansible-unittest-create-pipeline created."
-
- data_pipeline.delete_pipeline(connection, m)
-
-
-def test_create_pipeline_with_tags(placeboify, maybe_sleep):
- connection = placeboify.client('datapipeline')
- params = {'name': 'ansible-unittest-create-pipeline_tags',
- 'description': 'ansible-datapipeline-unit-test',
- 'state': 'present',
- 'tags': {'ansible': 'test'},
- 'timeout': 300}
- m = FakeModule(**params)
- changed, result = data_pipeline.create_pipeline(connection, m)
- assert changed is True
- assert result['msg'] == "Data Pipeline ansible-unittest-create-pipeline_tags created."
-
- data_pipeline.delete_pipeline(connection, m)
-
-
-def test_delete_nonexistent_pipeline(placeboify, maybe_sleep):
- connection = placeboify.client('datapipeline')
- params = {'name': 'ansible-test-nonexistent',
- 'description': 'ansible-test-nonexistent',
- 'state': 'absent',
- 'objects': [],
- 'tags': {'ansible': 'test'},
- 'timeout': 300}
- m = FakeModule(**params)
- changed, result = data_pipeline.delete_pipeline(connection, m)
- assert changed is False
-
-
-def test_delete_pipeline(placeboify, maybe_sleep):
- connection = placeboify.client('datapipeline')
- params = {'name': 'ansible-test-nonexistent',
- 'description': 'ansible-test-nonexistent',
- 'state': 'absent',
- 'objects': [],
- 'tags': {'ansible': 'test'},
- 'timeout': 300}
- m = FakeModule(**params)
- data_pipeline.create_pipeline(connection, m)
- changed, result = data_pipeline.delete_pipeline(connection, m)
- assert changed is True
-
-
-def test_build_unique_id_different():
- m = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id'})
- m2 = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id-different'})
- assert data_pipeline.build_unique_id(m) != data_pipeline.build_unique_id(m2)
-
-
-def test_build_unique_id_same():
- m = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id', 'tags': {'ansible': 'test'}})
- m2 = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id', 'tags': {'ansible': 'test'}})
- assert data_pipeline.build_unique_id(m) == data_pipeline.build_unique_id(m2)
-
-
-def test_build_unique_id_obj():
- # check that the object can be different and the unique id should be the same; should be able to modify objects
- m = FakeModule(**{'name': 'ansible-unittest-1', 'objects': [{'first': 'object'}]})
- m2 = FakeModule(**{'name': 'ansible-unittest-1', 'objects': [{'second': 'object'}]})
- assert data_pipeline.build_unique_id(m) == data_pipeline.build_unique_id(m2)
-
-
-def test_format_tags():
- unformatted_tags = {'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}
- formatted_tags = data_pipeline.format_tags(unformatted_tags)
- for tag_set in formatted_tags:
- assert unformatted_tags[tag_set['key']] == tag_set['value']
-
-
-def test_format_empty_tags():
- unformatted_tags = {}
- formatted_tags = data_pipeline.format_tags(unformatted_tags)
- assert formatted_tags == []
-
-
-def test_pipeline_description(placeboify, maybe_sleep, dp_setup):
- connection = placeboify.client('datapipeline')
- dp_id = dp_setup.data_pipeline_id
- pipelines = data_pipeline.pipeline_description(connection, dp_id)
- assert dp_id == pipelines['pipelineDescriptionList'][0]['pipelineId']
-
-
-def test_pipeline_description_nonexistent(placeboify, maybe_sleep):
- hypothetical_pipeline_id = "df-015440025PF7YGLDK47C"
- connection = placeboify.client('datapipeline')
- with pytest.raises(Exception) as error:
- data_pipeline.pipeline_description(connection, hypothetical_pipeline_id)
- assert error == data_pipeline.DataPipelineNotFound
-
-
-def test_check_dp_exists_true(placeboify, maybe_sleep, dp_setup):
- connection = placeboify.client('datapipeline')
- exists = data_pipeline.check_dp_exists(connection, dp_setup.data_pipeline_id)
- assert exists is True
-
-
-def test_check_dp_exists_false(placeboify, maybe_sleep):
- hypothetical_pipeline_id = "df-015440025PF7YGLDK47C"
- connection = placeboify.client('datapipeline')
- exists = data_pipeline.check_dp_exists(connection, hypothetical_pipeline_id)
- assert exists is False
-
-
-def test_check_dp_status(placeboify, maybe_sleep, dp_setup):
- inactive_states = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING']
- connection = placeboify.client('datapipeline')
- state = data_pipeline.check_dp_status(connection, dp_setup.data_pipeline_id, inactive_states)
- assert state is True
-
-
-def test_activate_pipeline(placeboify, maybe_sleep, dp_setup):
- # use objects to define pipeline before activating
- connection = placeboify.client('datapipeline')
- data_pipeline.define_pipeline(connection,
- module=dp_setup.module,
- objects=dp_setup.objects,
- dp_id=dp_setup.data_pipeline_id)
- changed, result = data_pipeline.activate_pipeline(connection, dp_setup.module)
- assert changed is True
diff --git a/test/units/modules/cloud/amazon/test_ec2_vpc_nat_gateway.py b/test/units/modules/cloud/amazon/test_ec2_vpc_nat_gateway.py
deleted file mode 100644
index 53cd8c4c2c..0000000000
--- a/test/units/modules/cloud/amazon/test_ec2_vpc_nat_gateway.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-import unittest
-
-from mock import patch
-
-import ansible.modules.cloud.amazon.ec2_vpc_nat_gateway as ng
-
-
-boto3 = pytest.importorskip("boto3")
-botocore = pytest.importorskip("botocore")
-
-aws_region = 'us-west-2'
-
-
-class AnsibleEc2VpcNatGatewayFunctions(unittest.TestCase):
-
- def test_get_nat_gateways(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, err_msg, stream = (
- ng.get_nat_gateways(client, 'subnet-123456789', check_mode=True)
- )
- should_return = ng.DRY_RUN_GATEWAYS
- self.assertTrue(success)
- self.assertEqual(stream, should_return)
-
- def test_get_nat_gateways_no_gateways_found(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, err_msg, stream = (
- ng.get_nat_gateways(client, 'subnet-1234567', check_mode=True)
- )
- self.assertTrue(success)
- self.assertEqual(stream, [])
-
- def test_wait_for_status(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, err_msg, gws = (
- ng.wait_for_status(
- client, 5, 'nat-123456789', 'available', check_mode=True
- )
- )
- should_return = ng.DRY_RUN_GATEWAYS[0]
- self.assertTrue(success)
- self.assertEqual(gws, should_return)
-
- @patch('time.sleep')
- def test_wait_for_status_to_timeout(self, mock_sleep):
- client = boto3.client('ec2', region_name=aws_region)
- success, err_msg, gws = (
- ng.wait_for_status(
- client, 2, 'nat-12345678', 'available', check_mode=True
- )
- )
- self.assertFalse(success)
- self.assertEqual(gws, {})
-
- def test_gateway_in_subnet_exists_with_allocation_id(self):
- client = boto3.client('ec2', region_name=aws_region)
- gws, err_msg = (
- ng.gateway_in_subnet_exists(
- client, 'subnet-123456789', 'eipalloc-1234567', check_mode=True
- )
- )
- should_return = ng.DRY_RUN_GATEWAYS
- self.assertEqual(gws, should_return)
-
- def test_gateway_in_subnet_exists_with_allocation_id_does_not_exist(self):
- client = boto3.client('ec2', region_name=aws_region)
- gws, err_msg = (
- ng.gateway_in_subnet_exists(
- client, 'subnet-123456789', 'eipalloc-123', check_mode=True
- )
- )
- should_return = list()
- self.assertEqual(gws, should_return)
-
- def test_gateway_in_subnet_exists_without_allocation_id(self):
- client = boto3.client('ec2', region_name=aws_region)
- gws, err_msg = (
- ng.gateway_in_subnet_exists(
- client, 'subnet-123456789', check_mode=True
- )
- )
- should_return = ng.DRY_RUN_GATEWAYS
- self.assertEqual(gws, should_return)
-
- def test_get_eip_allocation_id_by_address(self):
- client = boto3.client('ec2', region_name=aws_region)
- allocation_id, error_msg = (
- ng.get_eip_allocation_id_by_address(
- client, '55.55.55.55', check_mode=True
- )
- )
- should_return = 'eipalloc-1234567'
- self.assertEqual(allocation_id, should_return)
-
- def test_get_eip_allocation_id_by_address_does_not_exist(self):
- client = boto3.client('ec2', region_name=aws_region)
- allocation_id, err_msg = (
- ng.get_eip_allocation_id_by_address(
- client, '52.52.52.52', check_mode=True
- )
- )
- self.assertEqual(err_msg, 'EIP 52.52.52.52 does not exist')
- self.assertTrue(allocation_id is None)
-
- def test_allocate_eip_address(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, err_msg, eip_id = (
- ng.allocate_eip_address(
- client, check_mode=True
- )
- )
- self.assertTrue(success)
-
- def test_release_address(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, err_msg = (
- ng.release_address(
- client, 'eipalloc-1234567', check_mode=True
- )
- )
- self.assertTrue(success)
-
- def test_create(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, changed, err_msg, results = (
- ng.create(
- client, 'subnet-123456', 'eipalloc-1234567', check_mode=True
- )
- )
- self.assertTrue(success)
- self.assertTrue(changed)
-
- def test_pre_create(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, changed, err_msg, results = (
- ng.pre_create(
- client, 'subnet-123456', check_mode=True
- )
- )
- self.assertTrue(success)
- self.assertTrue(changed)
-
- def test_pre_create_idemptotent_with_allocation_id(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, changed, err_msg, results = (
- ng.pre_create(
- client, 'subnet-123456789', allocation_id='eipalloc-1234567', check_mode=True
- )
- )
- self.assertTrue(success)
- self.assertFalse(changed)
-
- def test_pre_create_idemptotent_with_eip_address(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, changed, err_msg, results = (
- ng.pre_create(
- client, 'subnet-123456789', eip_address='55.55.55.55', check_mode=True
- )
- )
- self.assertTrue(success)
- self.assertFalse(changed)
-
- def test_pre_create_idemptotent_if_exist_do_not_create(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, changed, err_msg, results = (
- ng.pre_create(
- client, 'subnet-123456789', if_exist_do_not_create=True, check_mode=True
- )
- )
- self.assertTrue(success)
- self.assertFalse(changed)
-
- def test_delete(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, changed, err_msg, results = (
- ng.remove(
- client, 'nat-123456789', check_mode=True
- )
- )
- self.assertTrue(success)
- self.assertTrue(changed)
-
- def test_delete_and_release_ip(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, changed, err_msg, results = (
- ng.remove(
- client, 'nat-123456789', release_eip=True, check_mode=True
- )
- )
- self.assertTrue(success)
- self.assertTrue(changed)
-
- def test_delete_if_does_not_exist(self):
- client = boto3.client('ec2', region_name=aws_region)
- success, changed, err_msg, results = (
- ng.remove(
- client, 'nat-12345', check_mode=True
- )
- )
- self.assertFalse(success)
- self.assertFalse(changed)
diff --git a/test/units/modules/cloud/amazon/test_ec2_vpc_vpn.py b/test/units/modules/cloud/amazon/test_ec2_vpc_vpn.py
deleted file mode 100644
index 5bf3b40f91..0000000000
--- a/test/units/modules/cloud/amazon/test_ec2_vpc_vpn.py
+++ /dev/null
@@ -1,352 +0,0 @@
-# (c) 2017 Red Hat Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-import os
-from units.utils.amazon_placebo_fixtures import placeboify, maybe_sleep
-from ansible.modules.cloud.amazon import ec2_vpc_vpn
-from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, boto3_tag_list_to_ansible_dict
-
-
-class FakeModule(object):
- def __init__(self, **kwargs):
- self.params = kwargs
-
- def fail_json(self, *args, **kwargs):
- self.exit_args = args
- self.exit_kwargs = kwargs
- raise Exception('FAIL')
-
- def exit_json(self, *args, **kwargs):
- self.exit_args = args
- self.exit_kwargs = kwargs
-
-
-def get_vgw(connection):
- # see if two vgw exist and return them if so
- vgw = connection.describe_vpn_gateways(Filters=[{'Name': 'tag:Ansible_VPN', 'Values': ['Test']}])
- if len(vgw['VpnGateways']) >= 2:
- return [vgw['VpnGateways'][0]['VpnGatewayId'], vgw['VpnGateways'][1]['VpnGatewayId']]
- # otherwise create two and return them
- vgw_1 = connection.create_vpn_gateway(Type='ipsec.1')
- vgw_2 = connection.create_vpn_gateway(Type='ipsec.1')
- for resource in (vgw_1, vgw_2):
- connection.create_tags(Resources=[resource['VpnGateway']['VpnGatewayId']], Tags=[{'Key': 'Ansible_VPN', 'Value': 'Test'}])
- return [vgw_1['VpnGateway']['VpnGatewayId'], vgw_2['VpnGateway']['VpnGatewayId']]
-
-
-def get_cgw(connection):
- # see if two cgw exist and return them if so
- cgw = connection.describe_customer_gateways(DryRun=False, Filters=[{'Name': 'state', 'Values': ['available']},
- {'Name': 'tag:Name', 'Values': ['Ansible-CGW']}])
- if len(cgw['CustomerGateways']) >= 2:
- return [cgw['CustomerGateways'][0]['CustomerGatewayId'], cgw['CustomerGateways'][1]['CustomerGatewayId']]
- # otherwise create and return them
- cgw_1 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='9.8.7.6', BgpAsn=65000)
- cgw_2 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='5.4.3.2', BgpAsn=65000)
- for resource in (cgw_1, cgw_2):
- connection.create_tags(Resources=[resource['CustomerGateway']['CustomerGatewayId']], Tags=[{'Key': 'Ansible-CGW', 'Value': 'Test'}])
- return [cgw_1['CustomerGateway']['CustomerGatewayId'], cgw_2['CustomerGateway']['CustomerGatewayId']]
-
-
-def get_dependencies():
- if os.getenv('PLACEBO_RECORD'):
- module = FakeModule(**{})
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
- connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
- vgw = get_vgw(connection)
- cgw = get_cgw(connection)
- else:
- vgw = ["vgw-35d70c2b", "vgw-32d70c2c"]
- cgw = ["cgw-6113c87f", "cgw-9e13c880"]
-
- return cgw, vgw
-
-
-def setup_mod_conn(placeboify, params):
- conn = placeboify.client('ec2')
- m = FakeModule(**params)
- return m, conn
-
-
-def make_params(cgw, vgw, tags=None, filters=None, routes=None):
- tags = {} if tags is None else tags
- filters = {} if filters is None else filters
- routes = [] if routes is None else routes
-
- return {'customer_gateway_id': cgw,
- 'static_only': True,
- 'vpn_gateway_id': vgw,
- 'connection_type': 'ipsec.1',
- 'purge_tags': True,
- 'tags': tags,
- 'filters': filters,
- 'routes': routes,
- 'delay': 15,
- 'wait_timeout': 600}
-
-
-def make_conn(placeboify, module, connection):
- customer_gateway_id = module.params['customer_gateway_id']
- static_only = module.params['static_only']
- vpn_gateway_id = module.params['vpn_gateway_id']
- connection_type = module.params['connection_type']
- check_mode = module.params['check_mode']
- changed = True
- vpn = ec2_vpc_vpn.create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type)
- return changed, vpn
-
-
-def tear_down_conn(placeboify, connection, vpn_connection_id):
- ec2_vpc_vpn.delete_connection(connection, vpn_connection_id, delay=15, max_attempts=40)
-
-
-def test_find_connection_vpc_conn_id(placeboify, maybe_sleep):
- # setup dependencies for 2 vpn connections
- dependencies = setup_req(placeboify, 2)
- dep1, dep2 = dependencies[0], dependencies[1]
- params1, vpn1, m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection']
- params2, vpn2, m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection']
-
- # find the connection with a vpn_connection_id and assert it is the expected one
- assert vpn1['VpnConnectionId'] == ec2_vpc_vpn.find_connection(conn1, params1, vpn1['VpnConnectionId'])['VpnConnectionId']
-
- tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId'])
- tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId'])
-
-
-def test_find_connection_filters(placeboify, maybe_sleep):
- # setup dependencies for 2 vpn connections
- dependencies = setup_req(placeboify, 2)
- dep1, dep2 = dependencies[0], dependencies[1]
- params1, vpn1, m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection']
- params2, vpn2, m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection']
-
- # update to different tags
- params1.update(tags={'Wrong': 'Tag'})
- params2.update(tags={'Correct': 'Tag'})
- ec2_vpc_vpn.ensure_present(conn1, params1)
- ec2_vpc_vpn.ensure_present(conn2, params2)
-
- # create some new parameters for a filter
- params = {'filters': {'tags': {'Correct': 'Tag'}}}
-
- # find the connection that has the parameters above
- found = ec2_vpc_vpn.find_connection(conn1, params)
-
- # assert the correct connection was found
- assert found['VpnConnectionId'] == vpn2['VpnConnectionId']
-
- # delete the connections
- tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId'])
- tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId'])
-
-
-def test_find_connection_insufficient_filters(placeboify, maybe_sleep):
- # get list of customer gateways and virtual private gateways
- cgw, vgw = get_dependencies()
-
- # create two connections with the same tags
- params = make_params(cgw[0], vgw[0], tags={'Correct': 'Tag'})
- params2 = make_params(cgw[1], vgw[1], tags={'Correct': 'Tag'})
- m, conn = setup_mod_conn(placeboify, params)
- m2, conn2 = setup_mod_conn(placeboify, params2)
- vpn1 = ec2_vpc_vpn.ensure_present(conn, m.params)[1]
- vpn2 = ec2_vpc_vpn.ensure_present(conn2, m2.params)[1]
-
- # reset the parameters so only filtering by tags will occur
- m.params = {'filters': {'tags': {'Correct': 'Tag'}}}
-
- # assert that multiple matching connections have been found
- with pytest.raises(Exception) as error_message:
- ec2_vpc_vpn.find_connection(conn, m.params)
- assert error_message == "More than one matching VPN connection was found.To modify or delete a VPN please specify vpn_connection_id or add filters."
-
- # delete the connections
- tear_down_conn(placeboify, conn, vpn1['VpnConnectionId'])
- tear_down_conn(placeboify, conn, vpn2['VpnConnectionId'])
-
-
-def test_find_connection_nonexistent(placeboify, maybe_sleep):
- # create parameters but don't create a connection with them
- params = {'filters': {'tags': {'Correct': 'Tag'}}}
- m, conn = setup_mod_conn(placeboify, params)
-
- # try to find a connection with matching parameters and assert None are found
- assert ec2_vpc_vpn.find_connection(conn, m.params) is None
-
-
-def test_create_connection(placeboify, maybe_sleep):
- # get list of customer gateways and virtual private gateways
- cgw, vgw = get_dependencies()
-
- # create a connection
- params = make_params(cgw[0], vgw[0])
- m, conn = setup_mod_conn(placeboify, params)
- changed, vpn = ec2_vpc_vpn.ensure_present(conn, m.params)
-
- # assert that changed is true and that there is a connection id
- assert changed is True
- assert 'VpnConnectionId' in vpn
-
- # delete connection
- tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
-
-
-def test_create_connection_that_exists(placeboify, maybe_sleep):
- # setup dependencies for 1 vpn connection
- dependencies = setup_req(placeboify, 1)
- params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
-
- # try to recreate the same connection
- changed, vpn2 = ec2_vpc_vpn.ensure_present(conn, params)
-
- # nothing should have changed
- assert changed is False
- assert vpn['VpnConnectionId'] == vpn2['VpnConnectionId']
-
- # delete connection
- tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
-
-
-def test_modify_deleted_connection(placeboify, maybe_sleep):
- # setup dependencies for 1 vpn connection
- dependencies = setup_req(placeboify, 1)
- params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
-
- # delete it
- tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
-
- # try to update the deleted connection
- m.params.update(vpn_connection_id=vpn['VpnConnectionId'])
- with pytest.raises(Exception) as error_message:
- ec2_vpc_vpn.ensure_present(conn, m.params)
- assert error_message == "There is no VPN connection available or pending with that id. Did you delete it?"
-
-
-def test_delete_connection(placeboify, maybe_sleep):
- # setup dependencies for 1 vpn connection
- dependencies = setup_req(placeboify, 1)
- params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
-
- # delete it
- changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params)
-
- assert changed is True
- assert vpn == {}
-
-
-def test_delete_nonexistent_connection(placeboify, maybe_sleep):
- # create parameters and ensure any connection matching (None) is deleted
- params = {'filters': {'tags': {'ThisConnection': 'DoesntExist'}}, 'delay': 15, 'wait_timeout': 600}
- m, conn = setup_mod_conn(placeboify, params)
- changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params)
-
- assert changed is False
- assert vpn == {}
-
-
-def test_check_for_update_tags(placeboify, maybe_sleep):
- # setup dependencies for 1 vpn connection
- dependencies = setup_req(placeboify, 1)
- params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
-
- # add and remove a number of tags
- m.params['tags'] = {'One': 'one', 'Two': 'two'}
- ec2_vpc_vpn.ensure_present(conn, m.params)
- m.params['tags'] = {'Two': 'two', 'Three': 'three', 'Four': 'four'}
- changes = ec2_vpc_vpn.check_for_update(conn, m.params, vpn['VpnConnectionId'])
-
- flat_dict_changes = boto3_tag_list_to_ansible_dict(changes['tags_to_add'])
- correct_changes = boto3_tag_list_to_ansible_dict([{'Key': 'Three', 'Value': 'three'}, {'Key': 'Four', 'Value': 'four'}])
- assert flat_dict_changes == correct_changes
- assert changes['tags_to_remove'] == ['One']
-
- # delete connection
- tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
-
-
-def test_check_for_update_nonmodifiable_attr(placeboify, maybe_sleep):
- # setup dependencies for 1 vpn connection
- dependencies = setup_req(placeboify, 1)
- params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
- current_vgw = params['vpn_gateway_id']
-
- # update a parameter that isn't modifiable
- m.params.update(vpn_gateway_id="invalidchange")
-
- err = 'You cannot modify vpn_gateway_id, the current value of which is {0}. Modifiable VPN connection attributes are tags.'.format(current_vgw)
- with pytest.raises(Exception) as error_message:
- ec2_vpc_vpn.check_for_update(m, conn, vpn['VpnConnectionId'])
- assert error_message == err
-
- # delete connection
- tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
-
-
-def test_add_tags(placeboify, maybe_sleep):
- # setup dependencies for 1 vpn connection
- dependencies = setup_req(placeboify, 1)
- params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
-
- # add a tag to the connection
- ec2_vpc_vpn.add_tags(conn, vpn['VpnConnectionId'], add=[{'Key': 'Ansible-Test', 'Value': 'VPN'}])
-
- # assert tag is there
- current_vpn = ec2_vpc_vpn.find_connection(conn, params)
- assert current_vpn['Tags'] == [{'Key': 'Ansible-Test', 'Value': 'VPN'}]
-
- # delete connection
- tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
-
-
-def test_remove_tags(placeboify, maybe_sleep):
- # setup dependencies for 1 vpn connection
- dependencies = setup_req(placeboify, 1)
- params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
-
- # remove a tag from the connection
- ec2_vpc_vpn.remove_tags(conn, vpn['VpnConnectionId'], remove=['Ansible-Test'])
-
- # assert the tag is gone
- current_vpn = ec2_vpc_vpn.find_connection(conn, params)
- assert 'Tags' not in current_vpn
-
- # delete connection
- tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
-
-
-def test_add_routes(placeboify, maybe_sleep):
- # setup dependencies for 1 vpn connection
- dependencies = setup_req(placeboify, 1)
- params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
-
- # create connection with a route
- ec2_vpc_vpn.add_routes(conn, vpn['VpnConnectionId'], ['195.168.2.0/24', '196.168.2.0/24'])
-
- # assert both routes are there
- current_vpn = ec2_vpc_vpn.find_connection(conn, params)
- assert set(each['DestinationCidrBlock'] for each in current_vpn['Routes']) == set(['195.168.2.0/24', '196.168.2.0/24'])
-
- # delete connection
- tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
-
-
-def setup_req(placeboify, number_of_results=1):
- ''' returns dependencies for VPN connections '''
- assert number_of_results in (1, 2)
- results = []
- cgw, vgw = get_dependencies()
- for each in range(0, number_of_results):
- params = make_params(cgw[each], vgw[each])
- m, conn = setup_mod_conn(placeboify, params)
- vpn = ec2_vpc_vpn.ensure_present(conn, params)[1]
-
- results.append({'module': m, 'connection': conn, 'vpn': vpn, 'params': params})
- if number_of_results == 1:
- return results[0]
- else:
- return results[0], results[1]
diff --git a/test/units/modules/cloud/amazon/test_iam_password_policy.py b/test/units/modules/cloud/amazon/test_iam_password_policy.py
deleted file mode 100644
index 85b828a130..0000000000
--- a/test/units/modules/cloud/amazon/test_iam_password_policy.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-
-from units.modules.utils import set_module_args
-from ansible.module_utils.ec2 import HAS_BOTO3
-
-if not HAS_BOTO3:
- pytestmark = pytest.mark.skip("iam_password_policy.py requires the `boto3` and `botocore` modules")
-else:
- import boto3
- from ansible.modules.cloud.amazon import iam_password_policy
-
-
-def test_warn_if_state_not_specified():
- set_module_args({
- "min_pw_length": "8",
- "require_symbols": "false",
- "require_numbers": "true",
- "require_uppercase": "true",
- "require_lowercase": "true",
- "allow_pw_change": "true",
- "pw_max_age": "60",
- "pw_reuse_prevent": "5",
- "pw_expire": "false"
- })
- with pytest.raises(SystemExit):
- print(iam_password_policy.main())
diff --git a/test/units/modules/cloud/amazon/test_kinesis_stream.py b/test/units/modules/cloud/amazon/test_kinesis_stream.py
deleted file mode 100644
index e549ae9d11..0000000000
--- a/test/units/modules/cloud/amazon/test_kinesis_stream.py
+++ /dev/null
@@ -1,330 +0,0 @@
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-import unittest
-
-boto3 = pytest.importorskip("boto3")
-botocore = pytest.importorskip("botocore")
-
-from ansible.modules.cloud.amazon import kinesis_stream
-
-aws_region = 'us-west-2'
-
-
-class AnsibleKinesisStreamFunctions(unittest.TestCase):
-
- def test_convert_to_lower(self):
- example = {
- 'HasMoreShards': True,
- 'RetentionPeriodHours': 24,
- 'StreamName': 'test',
- 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
- 'StreamStatus': 'ACTIVE'
- }
- converted_example = kinesis_stream.convert_to_lower(example)
- keys = list(converted_example.keys())
- keys.sort()
- for i in range(len(keys)):
- if i == 0:
- self.assertEqual(keys[i], 'has_more_shards')
- if i == 1:
- self.assertEqual(keys[i], 'retention_period_hours')
- if i == 2:
- self.assertEqual(keys[i], 'stream_arn')
- if i == 3:
- self.assertEqual(keys[i], 'stream_name')
- if i == 4:
- self.assertEqual(keys[i], 'stream_status')
-
- def test_make_tags_in_aws_format(self):
- example = {
- 'env': 'development'
- }
- should_return = [
- {
- 'Key': 'env',
- 'Value': 'development'
- }
- ]
- aws_tags = kinesis_stream.make_tags_in_aws_format(example)
- self.assertEqual(aws_tags, should_return)
-
- def test_make_tags_in_proper_format(self):
- example = [
- {
- 'Key': 'env',
- 'Value': 'development'
- },
- {
- 'Key': 'service',
- 'Value': 'web'
- }
- ]
- should_return = {
- 'env': 'development',
- 'service': 'web'
- }
- proper_tags = kinesis_stream.make_tags_in_proper_format(example)
- self.assertEqual(proper_tags, should_return)
-
- def test_recreate_tags_from_list(self):
- example = [('environment', 'development'), ('service', 'web')]
- should_return = [
- {
- 'Key': 'environment',
- 'Value': 'development'
- },
- {
- 'Key': 'service',
- 'Value': 'web'
- }
- ]
- aws_tags = kinesis_stream.recreate_tags_from_list(example)
- self.assertEqual(aws_tags, should_return)
-
- def test_get_tags(self):
- client = boto3.client('kinesis', region_name=aws_region)
- success, err_msg, tags = kinesis_stream.get_tags(client, 'test', check_mode=True)
- self.assertTrue(success)
- should_return = [
- {
- 'Key': 'DryRunMode',
- 'Value': 'true'
- }
- ]
- self.assertEqual(tags, should_return)
-
- def test_find_stream(self):
- client = boto3.client('kinesis', region_name=aws_region)
- success, err_msg, stream = (
- kinesis_stream.find_stream(client, 'test', check_mode=True)
- )
- should_return = {
- 'OpenShardsCount': 5,
- 'ClosedShardsCount': 0,
- 'ShardsCount': 5,
- 'HasMoreShards': True,
- 'RetentionPeriodHours': 24,
- 'StreamName': 'test',
- 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
- 'StreamStatus': 'ACTIVE',
- 'EncryptionType': 'NONE'
- }
- self.assertTrue(success)
- self.assertEqual(stream, should_return)
-
- def test_wait_for_status(self):
- client = boto3.client('kinesis', region_name=aws_region)
- success, err_msg, stream = (
- kinesis_stream.wait_for_status(
- client, 'test', 'ACTIVE', check_mode=True
- )
- )
- should_return = {
- 'OpenShardsCount': 5,
- 'ClosedShardsCount': 0,
- 'ShardsCount': 5,
- 'HasMoreShards': True,
- 'RetentionPeriodHours': 24,
- 'StreamName': 'test',
- 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
- 'StreamStatus': 'ACTIVE',
- 'EncryptionType': 'NONE'
- }
- self.assertTrue(success)
- self.assertEqual(stream, should_return)
-
- def test_tags_action_create(self):
- client = boto3.client('kinesis', region_name=aws_region)
- tags = {
- 'env': 'development',
- 'service': 'web'
- }
- success, err_msg = (
- kinesis_stream.tags_action(
- client, 'test', tags, 'create', check_mode=True
- )
- )
- self.assertTrue(success)
-
- def test_tags_action_delete(self):
- client = boto3.client('kinesis', region_name=aws_region)
- tags = {
- 'env': 'development',
- 'service': 'web'
- }
- success, err_msg = (
- kinesis_stream.tags_action(
- client, 'test', tags, 'delete', check_mode=True
- )
- )
- self.assertTrue(success)
-
- def test_tags_action_invalid(self):
- client = boto3.client('kinesis', region_name=aws_region)
- tags = {
- 'env': 'development',
- 'service': 'web'
- }
- success, err_msg = (
- kinesis_stream.tags_action(
- client, 'test', tags, 'append', check_mode=True
- )
- )
- self.assertFalse(success)
-
- def test_update_tags(self):
- client = boto3.client('kinesis', region_name=aws_region)
- tags = {
- 'env': 'development',
- 'service': 'web'
- }
- success, changed, err_msg = (
- kinesis_stream.update_tags(
- client, 'test', tags, check_mode=True
- )
- )
- self.assertTrue(success)
-
- def test_stream_action_create(self):
- client = boto3.client('kinesis', region_name=aws_region)
- success, err_msg = (
- kinesis_stream.stream_action(
- client, 'test', 10, 'create', check_mode=True
- )
- )
- self.assertTrue(success)
-
- def test_stream_action_delete(self):
- client = boto3.client('kinesis', region_name=aws_region)
- success, err_msg = (
- kinesis_stream.stream_action(
- client, 'test', 10, 'delete', check_mode=True
- )
- )
- self.assertTrue(success)
-
- def test_stream_action_invalid(self):
- client = boto3.client('kinesis', region_name=aws_region)
- success, err_msg = (
- kinesis_stream.stream_action(
- client, 'test', 10, 'append', check_mode=True
- )
- )
- self.assertFalse(success)
-
- def test_retention_action_increase(self):
- client = boto3.client('kinesis', region_name=aws_region)
- success, err_msg = (
- kinesis_stream.retention_action(
- client, 'test', 48, 'increase', check_mode=True
- )
- )
- self.assertTrue(success)
-
- def test_retention_action_decrease(self):
- client = boto3.client('kinesis', region_name=aws_region)
- success, err_msg = (
- kinesis_stream.retention_action(
- client, 'test', 24, 'decrease', check_mode=True
- )
- )
- self.assertTrue(success)
-
- def test_retention_action_invalid(self):
- client = boto3.client('kinesis', region_name=aws_region)
- success, err_msg = (
- kinesis_stream.retention_action(
- client, 'test', 24, 'create', check_mode=True
- )
- )
- self.assertFalse(success)
-
- def test_update_shard_count(self):
- client = boto3.client('kinesis', region_name=aws_region)
- success, err_msg = (
- kinesis_stream.update_shard_count(
- client, 'test', 5, check_mode=True
- )
- )
- self.assertTrue(success)
-
- def test_update(self):
- client = boto3.client('kinesis', region_name=aws_region)
- current_stream = {
- 'OpenShardsCount': 5,
- 'ClosedShardsCount': 0,
- 'ShardsCount': 1,
- 'HasMoreShards': True,
- 'RetentionPeriodHours': 24,
- 'StreamName': 'test',
- 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/test',
- 'StreamStatus': 'ACTIVE',
- 'EncryptionType': 'NONE'
- }
- tags = {
- 'env': 'development',
- 'service': 'web'
- }
- success, changed, err_msg = (
- kinesis_stream.update(
- client, current_stream, 'test', number_of_shards=2, retention_period=48,
- tags=tags, check_mode=True
- )
- )
- self.assertTrue(success)
- self.assertTrue(changed)
- self.assertEqual(err_msg, 'Kinesis Stream test updated successfully.')
-
- def test_create_stream(self):
- client = boto3.client('kinesis', region_name=aws_region)
- tags = {
- 'env': 'development',
- 'service': 'web'
- }
- success, changed, err_msg, results = (
- kinesis_stream.create_stream(
- client, 'test', number_of_shards=10, retention_period=48,
- tags=tags, check_mode=True
- )
- )
- should_return = {
- 'open_shards_count': 5,
- 'closed_shards_count': 0,
- 'shards_count': 5,
- 'has_more_shards': True,
- 'retention_period_hours': 24,
- 'stream_name': 'test',
- 'stream_arn': 'arn:aws:kinesis:east-side:123456789:stream/test',
- 'stream_status': 'ACTIVE',
- 'encryption_type': 'NONE',
- 'tags': tags,
- }
- self.assertTrue(success)
- self.assertTrue(changed)
- self.assertEqual(results, should_return)
- self.assertEqual(err_msg, 'Kinesis Stream test updated successfully.')
-
- def test_enable_stream_encription(self):
- client = boto3.client('kinesis', region_name=aws_region)
- success, changed, err_msg, results = (
- kinesis_stream.start_stream_encryption(
- client, 'test', encryption_type='KMS', key_id='', wait=True, wait_timeout=60, check_mode=True
- )
- )
- self.assertTrue(success)
- self.assertTrue(changed)
- self.assertEqual(err_msg, 'Kinesis Stream test encryption started successfully.')
-
- def test_dsbale_stream_encryption(self):
- client = boto3.client('kinesis', region_name=aws_region)
- success, changed, err_msg, results = (
- kinesis_stream.stop_stream_encryption(
- client, 'test', encryption_type='KMS', key_id='', wait=True, wait_timeout=60, check_mode=True
- )
- )
- self.assertTrue(success)
- self.assertTrue(changed)
- self.assertEqual(err_msg, 'Kinesis Stream test encryption stopped successfully.')
diff --git a/test/units/modules/cloud/amazon/test_lambda.py b/test/units/modules/cloud/amazon/test_lambda.py
deleted file mode 100644
index 14ea2b454c..0000000000
--- a/test/units/modules/cloud/amazon/test_lambda.py
+++ /dev/null
@@ -1,273 +0,0 @@
-#
-# (c) 2017 Michael De La Rue
-#
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import copy
-import pytest
-
-from units.compat.mock import MagicMock, Mock, patch
-from ansible.module_utils import basic
-from units.modules.utils import set_module_args
-
-
-boto3 = pytest.importorskip("boto3")
-
-# lambda is a keyword so we have to hack this.
-_temp = __import__("ansible.modules.cloud.amazon.lambda")
-lda = getattr(_temp.modules.cloud.amazon, "lambda")
-
-
-base_lambda_config = {
- 'FunctionName': 'lambda_name',
- 'Role': 'arn:aws:iam::987654321012:role/lambda_basic_execution',
- 'Handler': 'lambda_python.my_handler',
- 'Description': 'this that the other',
- 'Timeout': 3,
- 'MemorySize': 128,
- 'Runtime': 'python2.7',
- 'CodeSha256': 'AqMZ+xptM7aC9VXu+5jyp1sqO+Nj4WFMNzQxtPMP2n8=',
-}
-
-one_change_lambda_config = copy.copy(base_lambda_config)
-one_change_lambda_config['Timeout'] = 4
-two_change_lambda_config = copy.copy(one_change_lambda_config)
-two_change_lambda_config['Role'] = 'arn:aws:iam::987654321012:role/lambda_advanced_execution'
-code_change_lambda_config = copy.copy(base_lambda_config)
-code_change_lambda_config['CodeSha256'] = 'P+Zy8U4T4RiiHWElhL10VBKj9jw4rSJ5bm/TiW+4Rts='
-
-base_module_args = {
- "region": "us-west-1",
- "name": "lambda_name",
- "state": "present",
- "zip_file": "test/units/modules/cloud/amazon/fixtures/thezip.zip",
- "runtime": 'python2.7',
- "role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
- "memory_size": 128,
- "timeout": 3,
- "handler": 'lambda_python.my_handler'
-}
-module_args_with_environment = dict(base_module_args, environment_variables={
- "variable_name": "variable_value"
-})
-
-
-def make_mock_no_connection_connection(config):
- """return a mock of ansible's boto3_conn ready to return a mock AWS API client"""
- lambda_client_double = MagicMock()
- lambda_client_double.get_function.configure_mock(
- return_value=False
- )
- lambda_client_double.update_function_configuration.configure_mock(
- return_value={
- 'Version': 1
- }
- )
- fake_boto3_conn = Mock(return_value=lambda_client_double)
- return (fake_boto3_conn, lambda_client_double)
-
-
-def make_mock_connection(config):
- """return a mock of ansible's boto3_conn ready to return a mock AWS API client"""
- lambda_client_double = MagicMock()
- lambda_client_double.get_function.configure_mock(
- return_value={
- 'Configuration': config
- }
- )
- lambda_client_double.update_function_configuration.configure_mock(
- return_value={
- 'Version': 1
- }
- )
- fake_boto3_conn = Mock(return_value=lambda_client_double)
- return (fake_boto3_conn, lambda_client_double)
-
-
-class AnsibleFailJson(Exception):
- pass
-
-
-def fail_json_double(*args, **kwargs):
- """works like fail_json but returns module results inside exception instead of stdout"""
- kwargs['failed'] = True
- raise AnsibleFailJson(kwargs)
-
-
-# TODO: def test_handle_different_types_in_config_params():
-
-
-def test_create_lambda_if_not_exist():
-
- set_module_args(base_module_args)
- (boto3_conn_double, lambda_client_double) = make_mock_no_connection_connection(code_change_lambda_config)
-
- with patch.object(lda, 'boto3_conn', boto3_conn_double):
- try:
- lda.main()
- except SystemExit:
- pass
-
- # guard against calling other than for a lambda connection (e.g. IAM)
- assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
- assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
- assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
- "unexpectedly updated lambda configuration when should have only created"
- assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
- "update lambda function code when function should have been created only"
- assert(len(lambda_client_double.create_function.mock_calls) > 0), \
- "failed to call create_function "
- (create_args, create_kwargs) = lambda_client_double.create_function.call_args
- assert (len(create_kwargs) > 0), "expected create called with keyword args, none found"
-
- try:
- # For now I assume that we should NOT send an empty environment. It might
- # be okay / better to explicitly send an empty environment. However `None'
- # is not acceptable - mikedlr
- create_kwargs["Environment"]
- raise(Exception("Environment sent to boto when none expected"))
- except KeyError:
- pass # We are happy, no environment is fine
-
-
-def test_update_lambda_if_code_changed():
-
- set_module_args(base_module_args)
- (boto3_conn_double, lambda_client_double) = make_mock_connection(code_change_lambda_config)
-
- with patch.object(lda, 'boto3_conn', boto3_conn_double):
- try:
- lda.main()
- except SystemExit:
- pass
-
- # guard against calling other than for a lambda connection (e.g. IAM)
- assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
- assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
- assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
- "unexpectedly updatede lambda configuration when only code changed"
- assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
- "lambda function update called multiple times when only one time should be needed"
- assert(len(lambda_client_double.update_function_code.mock_calls) > 1), \
- "failed to update lambda function when code changed"
- # 3 because after uploading we call into the return from mock to try to find what function version
- # was returned so the MagicMock actually sees two calls for one update.
- assert(len(lambda_client_double.update_function_code.mock_calls) < 3), \
- "lambda function code update called multiple times when only one time should be needed"
-
-
-def test_update_lambda_if_config_changed():
-
- set_module_args(base_module_args)
- (boto3_conn_double, lambda_client_double) = make_mock_connection(two_change_lambda_config)
-
- with patch.object(lda, 'boto3_conn', boto3_conn_double):
- try:
- lda.main()
- except SystemExit:
- pass
-
- # guard against calling other than for a lambda connection (e.g. IAM)
- assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
- assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
- assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
- "failed to update lambda function when configuration changed"
- assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
- "lambda function update called multiple times when only one time should be needed"
- assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
- "updated lambda code when no change should have happened"
-
-
-def test_update_lambda_if_only_one_config_item_changed():
-
- set_module_args(base_module_args)
- (boto3_conn_double, lambda_client_double) = make_mock_connection(one_change_lambda_config)
-
- with patch.object(lda, 'boto3_conn', boto3_conn_double):
- try:
- lda.main()
- except SystemExit:
- pass
-
- # guard against calling other than for a lambda connection (e.g. IAM)
- assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
- assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
- assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
- "failed to update lambda function when configuration changed"
- assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
- "lambda function update called multiple times when only one time should be needed"
- assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
- "updated lambda code when no change should have happened"
-
-
-def test_update_lambda_if_added_environment_variable():
-
- set_module_args(module_args_with_environment)
- (boto3_conn_double, lambda_client_double) = make_mock_connection(base_lambda_config)
-
- with patch.object(lda, 'boto3_conn', boto3_conn_double):
- try:
- lda.main()
- except SystemExit:
- pass
-
- # guard against calling other than for a lambda connection (e.g. IAM)
- assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
- assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
- assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
- "failed to update lambda function when configuration changed"
- assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
- "lambda function update called multiple times when only one time should be needed"
- assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
- "updated lambda code when no change should have happened"
-
- (update_args, update_kwargs) = lambda_client_double.update_function_configuration.call_args
- assert (len(update_kwargs) > 0), "expected update configuration called with keyword args, none found"
- assert update_kwargs['Environment']['Variables'] == module_args_with_environment['environment_variables']
-
-
-def test_dont_update_lambda_if_nothing_changed():
- set_module_args(base_module_args)
- (boto3_conn_double, lambda_client_double) = make_mock_connection(base_lambda_config)
-
- with patch.object(lda, 'boto3_conn', boto3_conn_double):
- try:
- lda.main()
- except SystemExit:
- pass
-
- # guard against calling other than for a lambda connection (e.g. IAM)
- assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
- assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
- assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
- "updated lambda function when no configuration changed"
- assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
- "updated lambda code when no change should have happened"
-
-
-def test_warn_region_not_specified():
-
- set_module_args({
- "name": "lambda_name",
- "state": "present",
- # Module is called without a region causing error
- # "region": "us-east-1",
- "zip_file": "test/units/modules/cloud/amazon/fixtures/thezip.zip",
- "runtime": 'python2.7',
- "role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
- "handler": 'lambda_python.my_handler'})
-
- get_aws_connection_info_double = Mock(return_value=(None, None, None))
-
- with patch.object(lda, 'get_aws_connection_info', get_aws_connection_info_double):
- with patch.object(basic.AnsibleModule, 'fail_json', fail_json_double):
- try:
- lda.main()
- except AnsibleFailJson as e:
- result = e.args[0]
- assert("region must be specified" in result['msg'])
diff --git a/test/units/modules/cloud/amazon/test_lambda_policy.py b/test/units/modules/cloud/amazon/test_lambda_policy.py
deleted file mode 100644
index 5c32370469..0000000000
--- a/test/units/modules/cloud/amazon/test_lambda_policy.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#
-# (c) 2017 Michael De La Rue
-#
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import copy
-
-import pytest
-
-from ansible.module_utils.aws.core import HAS_BOTO3
-from units.compat.mock import MagicMock
-from units.modules.utils import set_module_args
-
-if not HAS_BOTO3:
- pytestmark = pytest.mark.skip("test_api_gateway.py requires the `boto3` and `botocore` modules")
-
-# these are here cause ... boto!
-from ansible.modules.cloud.amazon import lambda_policy
-from ansible.modules.cloud.amazon.lambda_policy import setup_module_object
-try:
- from botocore.exceptions import ClientError
-except ImportError:
- pass
-
-
-base_module_args = {
- "region": "us-west-1",
- "function_name": "this_is_a_test_function",
- "state": "present",
- "statement_id": "test-allow-lambda",
- "principal": 123456,
- "action": "lambda:*"
-}
-
-
-def test_module_is_created_sensibly():
- set_module_args(base_module_args)
- module = setup_module_object()
- assert module.params['function_name'] == 'this_is_a_test_function'
-
-
-module_double = MagicMock()
-module_double.fail_json_aws.side_effect = Exception("unexpected call to fail_json_aws")
-module_double.check_mode = False
-
-fake_module_params_present = {
- "state": "present",
- "statement_id": "test-allow-lambda",
- "principal": "apigateway.amazonaws.com",
- "action": "lambda:InvokeFunction",
- "source_arn": u'arn:aws:execute-api:us-east-1:123456789:efghijklmn/authorizers/*',
- "version": 0,
- "alias": None
-}
-fake_module_params_different = copy.deepcopy(fake_module_params_present)
-fake_module_params_different["action"] = "lambda:api-gateway"
-fake_module_params_absent = copy.deepcopy(fake_module_params_present)
-fake_module_params_absent["state"] = "absent"
-
-fake_policy_return = {
- u'Policy': (
- u'{"Version":"2012-10-17","Id":"default","Statement":[{"Sid":"1234567890abcdef1234567890abcdef",'
- u'"Effect":"Allow","Principal":{"Service":"apigateway.amazonaws.com"},"Action":"lambda:InvokeFunction",'
- u'"Resource":"arn:aws:lambda:us-east-1:123456789:function:test_authorizer",'
- u'"Condition":{"ArnLike":{"AWS:SourceArn":"arn:aws:execute-api:us-east-1:123456789:abcdefghij/authorizers/1a2b3c"}}},'
- u'{"Sid":"2234567890abcdef1234567890abcdef","Effect":"Allow","Principal":{"Service":"apigateway.amazonaws.com"},'
- u'"Action":"lambda:InvokeFunction","Resource":"arn:aws:lambda:us-east-1:123456789:function:test_authorizer",'
- u'"Condition":{"ArnLike":{"AWS:SourceArn":"arn:aws:execute-api:us-east-1:123456789:klmnopqrst/authorizers/4d5f6g"}}},'
- u'{"Sid":"1234567890abcdef1234567890abcdef","Effect":"Allow","Principal":{"Service":"apigateway.amazonaws.com"},'
- u'"Action":"lambda:InvokeFunction","Resource":"arn:aws:lambda:us-east-1:123456789:function:test_authorizer",'
- u'"Condition":{"ArnLike":{"AWS:SourceArn":"arn:aws:execute-api:eu-west-1:123456789:uvwxyzabcd/authorizers/7h8i9j"}}},'
- u'{"Sid":"test-allow-lambda","Effect":"Allow","Principal":{"Service":"apigateway.amazonaws.com"},'
- u'"Action":"lambda:InvokeFunction","Resource":"arn:aws:lambda:us-east-1:123456789:function:test_authorizer",'
- u'"Condition":{"ArnLike":{"AWS:SourceArn":"arn:aws:execute-api:us-east-1:123456789:efghijklmn/authorizers/*"}}},'
- u'{"Sid":"1234567890abcdef1234567890abcdef","Effect":"Allow","Principal":{"Service":"apigateway.amazonaws.com"},'
- u'"Action":"lambda:InvokeFunction","Resource":"arn:aws:lambda:us-east-1:123456789:function:test_authorizer",'
- u'"Condition":{"ArnLike":{"AWS:SourceArn":"arn:aws:execute-api:us-east-1:123456789:opqrstuvwx/authorizers/0k1l2m"}}}]}'),
- 'ResponseMetadata': {
- 'RetryAttempts': 0,
- 'HTTPStatusCode': 200,
- 'RequestId': 'abcdefgi-1234-a567-b890-123456789abc',
- 'HTTPHeaders': {
- 'date': 'Sun, 13 Aug 2017 10:54:17 GMT',
- 'x-amzn-requestid': 'abcdefgi-1234-a567-b890-123456789abc',
- 'content-length': '1878',
- 'content-type': 'application/json',
- 'connection': 'keep-alive'}}}
-
-error_response = {'Error': {'Code': 'ResourceNotFoundException', 'Message': 'Fake Testing Error'}}
-operation_name = 'FakeOperation'
-
-
-def test_manage_state_adds_missing_permissions():
- lambda_client_double = MagicMock()
- # Policy actually: not present Requested State: present Should: create
- lambda_client_double.get_policy.side_effect = ClientError(error_response, operation_name)
- fake_module_params = copy.deepcopy(fake_module_params_present)
- module_double.params = fake_module_params
- lambda_policy.manage_state(module_double, lambda_client_double)
- assert lambda_client_double.get_policy.call_count > 0
- assert lambda_client_double.add_permission.call_count > 0
- lambda_client_double.remove_permission.assert_not_called()
-
-
-def test_manage_state_leaves_existing_permissions():
- lambda_client_double = MagicMock()
- # Policy actually: present Requested State: present Should: do nothing
- lambda_client_double.get_policy.return_value = fake_policy_return
- fake_module_params = copy.deepcopy(fake_module_params_present)
- module_double.params = fake_module_params
- lambda_policy.manage_state(module_double, lambda_client_double)
- assert lambda_client_double.get_policy.call_count > 0
- lambda_client_double.add_permission.assert_not_called()
- lambda_client_double.remove_permission.assert_not_called()
-
-
-def test_manage_state_updates_nonmatching_permissions():
- lambda_client_double = MagicMock()
- # Policy actually: present Requested State: present Should: do nothing
- lambda_client_double.get_policy.return_value = fake_policy_return
- fake_module_params = copy.deepcopy(fake_module_params_different)
- module_double.params = fake_module_params
- lambda_policy.manage_state(module_double, lambda_client_double)
- assert lambda_client_double.get_policy.call_count > 0
- assert lambda_client_double.add_permission.call_count > 0
- assert lambda_client_double.remove_permission.call_count > 0
-
-
-def test_manage_state_removes_unwanted_permissions():
- lambda_client_double = MagicMock()
- # Policy actually: present Requested State: not present Should: remove
- lambda_client_double.get_policy.return_value = fake_policy_return
- fake_module_params = copy.deepcopy(fake_module_params_absent)
- module_double.params = fake_module_params
- lambda_policy.manage_state(module_double, lambda_client_double)
- assert lambda_client_double.get_policy.call_count > 0
- lambda_client_double.add_permission.assert_not_called()
- assert lambda_client_double.remove_permission.call_count > 0
-
-
-def test_manage_state_leaves_already_removed_permissions():
- lambda_client_double = MagicMock()
- # Policy actually: absent Requested State: absent Should: do nothing
- lambda_client_double.get_policy.side_effect = ClientError(error_response, operation_name)
- fake_module_params = copy.deepcopy(fake_module_params_absent)
- module_double.params = fake_module_params
- lambda_policy.manage_state(module_double, lambda_client_double)
- assert lambda_client_double.get_policy.call_count > 0
- lambda_client_double.add_permission.assert_not_called()
- lambda_client_double.remove_permission.assert_not_called()
diff --git a/test/units/modules/cloud/amazon/test_redshift_cross_region_snapshots.py b/test/units/modules/cloud/amazon/test_redshift_cross_region_snapshots.py
deleted file mode 100644
index 1891b5c890..0000000000
--- a/test/units/modules/cloud/amazon/test_redshift_cross_region_snapshots.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.modules.cloud.amazon import redshift_cross_region_snapshots as rcrs
-
-mock_status_enabled = {
- 'SnapshotCopyGrantName': 'snapshot-us-east-1-to-us-west-2',
- 'DestinationRegion': 'us-west-2',
- 'RetentionPeriod': 1,
-}
-
-mock_status_disabled = {}
-
-mock_request_illegal = {
- 'snapshot_copy_grant': 'changed',
- 'destination_region': 'us-west-2',
- 'snapshot_retention_period': 1
-}
-
-mock_request_update = {
- 'snapshot_copy_grant': 'snapshot-us-east-1-to-us-west-2',
- 'destination_region': 'us-west-2',
- 'snapshot_retention_period': 3
-}
-
-mock_request_no_update = {
- 'snapshot_copy_grant': 'snapshot-us-east-1-to-us-west-2',
- 'destination_region': 'us-west-2',
- 'snapshot_retention_period': 1
-}
-
-
-def test_fail_at_unsupported_operations():
- response = rcrs.requesting_unsupported_modifications(
- mock_status_enabled, mock_request_illegal
- )
- assert response is True
-
-
-def test_needs_update_true():
- response = rcrs.needs_update(mock_status_enabled, mock_request_update)
- assert response is True
-
-
-def test_no_change():
- response = rcrs.requesting_unsupported_modifications(
- mock_status_enabled, mock_request_no_update
- )
- needs_update_response = rcrs.needs_update(mock_status_enabled, mock_request_no_update)
- assert response is False
- assert needs_update_response is False
diff --git a/test/units/modules/cloud/amazon/test_route53_zone.py b/test/units/modules/cloud/amazon/test_route53_zone.py
deleted file mode 100644
index 283584a4dd..0000000000
--- a/test/units/modules/cloud/amazon/test_route53_zone.py
+++ /dev/null
@@ -1,610 +0,0 @@
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import functools
-
-from ansible.modules.cloud.amazon import route53_zone
-from units.compat import unittest
-from units.compat.mock import patch, call
-from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
-
-
-def parameterized(params_list):
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- for params_map in params_list:
- params_map.update(kwargs)
- func(*args, **params_map)
- return wrapper
- return decorator
-
-
-# Inline and replace with subdict.items() <= superdict.items(), when Python 2.6 compat can be dropped
-def is_subdict(subdict, superdict):
- return all(superdict[k] == v for k, v in subdict.items())
-
-
-@patch('ansible.module_utils.aws.core.HAS_BOTO3', new=True)
-@patch.object(route53_zone.AnsibleAWSModule, 'client')
-@patch.object(route53_zone.time, 'time', return_value=1)
-class TestRoute53Module(ModuleTestCase):
- def test_mutually_exclusive(self, *args):
- with self.assertRaises(AnsibleFailJson) as exec_info:
- set_module_args({
- 'secret_key': 'SECRET_KEY',
- 'access_key': 'ACCESS_KEY',
- 'region': 'eu-central-1',
- 'zone': 'example.com',
- 'vpc_id': 'vpc-94ccc2ff',
- 'vpc_region': 'eu-central-1',
- 'comment': 'foobar',
- 'delegation_set_id': 'A1BCDEF2GHIJKL',
- 'state': 'present',
- })
- route53_zone.main()
-
- self.assertEqual(
- exec_info.exception.args[0]['msg'],
- 'parameters are mutually exclusive: delegation_set_id|vpc_id, delegation_set_id|vpc_region',
- )
-
- @parameterized([
- {
- 'check_mode': False,
- 'response': {
- 'private_zone': False,
- 'vpc_id': None,
- 'vpc_region': None,
- 'comment': 'foobar',
- 'name': 'example.com.',
- 'delegation_set_id': '',
- 'zone_id': 'ZONE_ID',
- },
- },
- {
- 'check_mode': True,
- 'response': {
- 'private_zone': False,
- 'vpc_id': None,
- 'vpc_region': None,
- 'comment': 'foobar',
- 'name': 'example.com.',
- 'delegation_set_id': None,
- 'zone_id': None,
- },
- }
- ])
- @patch.object(route53_zone, 'find_zones', return_value=[])
- def test_create_public_zone(self, find_zones_mock, time_mock, client_mock, check_mode, response):
- client_mock.return_value.create_hosted_zone.return_value = {
- 'HostedZone': {
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {
- 'Comment': 'foobar',
- 'PrivateZone': False,
- },
- },
- }
-
- with self.assertRaises(AnsibleExitJson) as exec_info:
- set_module_args({
- 'secret_key': 'SECRET_KEY',
- 'access_key': 'ACCESS_KEY',
- 'region': 'eu-central-1',
- 'zone': 'example.com',
- 'comment': 'foobar',
- 'state': 'present',
- '_ansible_check_mode': check_mode,
- })
- route53_zone.main()
-
- if check_mode:
- client_mock.return_value.create_hosted_zone.assert_not_called()
- else:
- client_mock.return_value.create_hosted_zone.assert_called_once_with(**{
- 'HostedZoneConfig': {
- 'Comment': 'foobar',
- 'PrivateZone': False,
- },
- 'Name': 'example.com.',
- 'CallerReference': 'example.com.-1',
- })
-
- self.assertEqual(exec_info.exception.args[0]['changed'], True)
- self.assertTrue(is_subdict(response, exec_info.exception.args[0]))
-
- @parameterized([
- {
- 'check_mode': False,
- 'response': {
- 'private_zone': True,
- 'vpc_id': 'vpc-1',
- 'vpc_region': 'eu-central-1',
- 'comment': 'foobar',
- 'name': 'example.com.',
- 'delegation_set_id': None,
- 'zone_id': 'ZONE_ID',
- },
- },
- {
- 'check_mode': True,
- 'response': {
- 'private_zone': True,
- 'vpc_id': 'vpc-1',
- 'vpc_region': 'eu-central-1',
- 'comment': 'foobar',
- 'name': 'example.com.',
- 'delegation_set_id': None,
- 'zone_id': None,
- },
- }
- ])
- @patch.object(route53_zone, 'find_zones', return_value=[])
- def test_create_private_zone(self, find_zones_mock, time_mock, client_mock, check_mode, response):
- client_mock.return_value.create_hosted_zone.return_value = {
- 'HostedZone': {
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {
- 'Comment': 'foobar',
- 'PrivateZone': True
- },
- },
- }
-
- with self.assertRaises(AnsibleExitJson) as exec_info:
- set_module_args({
- 'secret_key': 'SECRET_KEY',
- 'access_key': 'ACCESS_KEY',
- 'region': 'eu-central-1',
- 'zone': 'example.com',
- 'comment': 'foobar',
- 'vpc_id': 'vpc-1',
- 'vpc_region': 'eu-central-1',
- 'state': 'present',
- '_ansible_check_mode': check_mode,
- })
- route53_zone.main()
-
- if check_mode:
- client_mock.return_value.create_hosted_zone.assert_not_called()
- else:
- client_mock.return_value.create_hosted_zone.assert_called_once_with(**{
- 'HostedZoneConfig': {
- 'Comment': 'foobar',
- 'PrivateZone': True,
- },
- 'Name': 'example.com.',
- 'CallerReference': 'example.com.-1',
- 'VPC': {
- 'VPCRegion': 'eu-central-1',
- 'VPCId': 'vpc-1',
- },
- })
-
- self.assertEqual(exec_info.exception.args[0]['changed'], True)
- self.assertTrue(is_subdict(response, exec_info.exception.args[0]))
-
- @parameterized([
- {
- 'check_mode': False,
- 'response': {
- 'private_zone': False,
- 'vpc_id': None,
- 'vpc_region': None,
- 'comment': 'new',
- 'name': 'example.com.',
- 'delegation_set_id': '',
- 'zone_id': 'ZONE_ID',
- },
- },
- {
- 'check_mode': True,
- 'response': {
- 'private_zone': False,
- 'vpc_id': None,
- 'vpc_region': None,
- 'comment': 'new',
- 'name': 'example.com.',
- 'delegation_set_id': None,
- 'zone_id': 'ZONE_ID',
- },
- }
- ])
- @patch.object(route53_zone, 'find_zones', return_value=[{
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': '', 'PrivateZone': False},
- }])
- def test_update_comment_public_zone(self, find_zones_mock, time_mock, client_mock, check_mode, response):
- client_mock.return_value.get_hosted_zone.return_value = {
- 'HostedZone': {
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': '', 'PrivateZone': False},
- },
- }
-
- with self.assertRaises(AnsibleExitJson) as exec_info:
- set_module_args({
- 'secret_key': 'SECRET_KEY',
- 'access_key': 'ACCESS_KEY',
- 'region': 'eu-central-1',
- 'zone': 'example.com',
- 'comment': 'new',
- 'state': 'present',
- '_ansible_check_mode': check_mode,
- })
- route53_zone.main()
-
- if check_mode:
- client_mock.return_value.update_hosted_zone_comment.assert_not_called()
- else:
- client_mock.return_value.update_hosted_zone_comment.assert_called_once_with(**{
- 'Id': '/hostedzone/ZONE_ID',
- 'Comment': 'new',
- })
-
- self.assertEqual(exec_info.exception.args[0]['changed'], True)
- self.assertTrue(is_subdict(response, exec_info.exception.args[0]))
-
- @patch.object(route53_zone, 'find_zones', return_value=[{
- 'Id': '/hostedzone/Z22OU4IUOVYM30',
- 'Name': 'example.com.',
- 'Config': {'Comment': '', 'PrivateZone': False},
- }])
- def test_update_public_zone_no_changes(self, find_zones_mock, time_mock, client_mock):
- client_mock.return_value.get_hosted_zone.return_value = {
- 'HostedZone': {
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': '', 'PrivateZone': False},
- },
- }
-
- with self.assertRaises(AnsibleExitJson) as exec_info:
- set_module_args({
- 'secret_key': 'SECRET_KEY',
- 'access_key': 'ACCESS_KEY',
- 'region': 'eu-central-1',
- 'zone': 'example.com',
- 'comment': '',
- 'state': 'present',
- })
- route53_zone.main()
-
- client_mock.return_value.update_hosted_zone_comment.assert_not_called()
- self.assertEqual(exec_info.exception.args[0]['changed'], False)
-
- @parameterized([
- {
- 'check_mode': False,
- 'response': {
- 'private_zone': True,
- 'vpc_id': 'vpc-1',
- 'vpc_region': 'eu-central-1',
- 'comment': 'new',
- 'name': 'example.com.',
- 'delegation_set_id': None,
- 'zone_id': 'ZONE_ID',
- },
- },
- {
- 'check_mode': True,
- 'response': {
- 'private_zone': True,
- 'vpc_id': 'vpc-1',
- 'vpc_region': 'eu-central-1',
- 'comment': 'new',
- 'name': 'example.com.',
- 'delegation_set_id': None,
- 'zone_id': 'ZONE_ID',
- },
- }
- ])
- @patch.object(route53_zone, 'find_zones', return_value=[{
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': 'foobar', 'PrivateZone': True},
- }])
- def test_update_comment_private_zone(self, find_zones_mock, time_mock, client_mock, check_mode, response):
- client_mock.return_value.get_hosted_zone.return_value = {
- 'HostedZone': {
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': 'foobar', 'PrivateZone': True},
- },
- 'VPCs': [{'VPCRegion': 'eu-central-1', 'VPCId': 'vpc-1'}],
- }
-
- with self.assertRaises(AnsibleExitJson) as exec_info:
- set_module_args({
- 'secret_key': 'SECRET_KEY',
- 'access_key': 'ACCESS_KEY',
- 'region': 'eu-central-1',
- 'zone': 'example.com',
- 'comment': 'new',
- 'vpc_id': 'vpc-1',
- 'vpc_region': 'eu-central-1',
- 'state': 'present',
- '_ansible_check_mode': check_mode,
- })
- route53_zone.main()
-
- if check_mode:
- client_mock.return_value.update_hosted_zone_comment.assert_not_called()
- else:
- client_mock.return_value.update_hosted_zone_comment.assert_called_once_with(**{
- 'Id': '/hostedzone/ZONE_ID',
- 'Comment': 'new',
- })
-
- self.assertEqual(exec_info.exception.args[0]['changed'], True)
- self.assertTrue(is_subdict(response, exec_info.exception.args[0]))
-
- @parameterized([
- {
- 'check_mode': False,
- 'response': {
- 'private_zone': True,
- 'vpc_id': 'vpc-2',
- 'vpc_region': 'us-east-2',
- 'comment': 'foobar',
- 'name': 'example.com.',
- 'delegation_set_id': None,
- 'zone_id': 'ZONE_ID_2',
- },
- },
- {
- 'check_mode': True,
- 'response': {
- 'private_zone': True,
- 'vpc_id': 'vpc-2',
- 'vpc_region': 'us-east-2',
- 'comment': 'foobar',
- 'name': 'example.com.',
- 'delegation_set_id': None,
- 'zone_id': None,
- },
- }
- ])
- @patch.object(route53_zone, 'find_zones', return_value=[{
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': 'foobar', 'PrivateZone': True},
- }])
- def test_update_vpc_private_zone(self, find_zones_mock, time_mock, client_mock, check_mode, response):
- client_mock.return_value.get_hosted_zone.return_value = {
- 'HostedZone': {
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': 'foobar', 'PrivateZone': True},
- },
- 'VPCs': [{'VPCRegion': 'eu-central-1', 'VPCId': 'vpc-1'}],
- }
- client_mock.return_value.create_hosted_zone.return_value = {
- 'HostedZone': {
- 'Id': '/hostedzone/ZONE_ID_2',
- 'Name': 'example.com.',
- 'Config': {
- 'Comment': 'foobar',
- 'PrivateZone': True
- },
- },
- }
-
- with self.assertRaises(AnsibleExitJson) as exec_info:
- set_module_args({
- 'secret_key': 'SECRET_KEY',
- 'access_key': 'ACCESS_KEY',
- 'region': 'us-east-2',
- 'zone': 'example.com',
- 'comment': 'foobar',
- 'vpc_id': 'vpc-2',
- 'vpc_region': 'us-east-2',
- 'state': 'present',
- '_ansible_check_mode': check_mode,
- })
- route53_zone.main()
-
- if check_mode:
- client_mock.return_value.create_hosted_zone.assert_not_called()
- else:
- client_mock.return_value.create_hosted_zone.assert_called_once_with(**{
- 'HostedZoneConfig': {
- 'Comment': 'foobar',
- 'PrivateZone': True,
- },
- 'Name': 'example.com.',
- 'CallerReference': 'example.com.-1',
- 'VPC': {
- 'VPCRegion': 'us-east-2',
- 'VPCId': 'vpc-2',
- },
- })
-
- self.assertEqual(exec_info.exception.args[0]['changed'], True)
- self.assertTrue(is_subdict(response, exec_info.exception.args[0]))
-
- @patch.object(route53_zone, 'find_zones', return_value=[{
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': 'foobar', 'PrivateZone': True},
- }])
- def test_update_private_zone_no_changes(self, find_zones_mock, time_mock, client_mock):
- client_mock.return_value.get_hosted_zone.return_value = {
- 'HostedZone': {
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': 'foobar', 'PrivateZone': True},
- },
- 'VPCs': [{'VPCRegion': 'eu-central-1', 'VPCId': 'vpc-1'}],
- }
-
- with self.assertRaises(AnsibleExitJson) as exec_info:
- set_module_args({
- 'secret_key': 'SECRET_KEY',
- 'access_key': 'ACCESS_KEY',
- 'region': 'eu-central-1',
- 'zone': 'example.com',
- 'comment': 'foobar',
- 'vpc_id': 'vpc-1',
- 'vpc_region': 'eu-central-1',
- 'state': 'present',
- })
- route53_zone.main()
-
- client_mock.return_value.update_hosted_zone_comment.assert_not_called()
- self.assertEqual(exec_info.exception.args[0]['changed'], False)
-
- response = {
- 'private_zone': True,
- 'vpc_id': 'vpc-1',
- 'vpc_region': 'eu-central-1',
- 'comment': 'foobar',
- 'name': 'example.com.',
- 'delegation_set_id': None,
- 'zone_id': 'ZONE_ID',
- }
- self.assertTrue(is_subdict(response, exec_info.exception.args[0]))
-
- @parameterized([
- {'check_mode': False},
- {'check_mode': True}
- ])
- @patch.object(route53_zone, 'find_zones', return_value=[{
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': '', 'PrivateZone': False},
- }])
- def test_delete_public_zone(self, find_zones_mock, time_mock, client_mock, check_mode):
- with self.assertRaises(AnsibleExitJson) as exec_info:
- set_module_args({
- 'secret_key': 'SECRET_KEY',
- 'access_key': 'ACCESS_KEY',
- 'region': 'eu-central-1',
- 'zone': 'example.com',
- 'state': 'absent',
- '_ansible_check_mode': check_mode,
- })
- route53_zone.main()
-
- if check_mode:
- client_mock.return_value.delete_hosted_zone.assert_not_called()
- else:
- client_mock.return_value.delete_hosted_zone.assert_called_once_with(**{
- 'Id': '/hostedzone/ZONE_ID',
- })
-
- self.assertEqual(exec_info.exception.args[0]['changed'], True)
-
- @parameterized([
- {'check_mode': False},
- {'check_mode': True}
- ])
- @patch.object(route53_zone, 'find_zones', return_value=[{
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': 'foobar', 'PrivateZone': True},
- }])
- def test_delete_private_zone(self, find_zones_mock, time_mock, client_mock, check_mode):
- client_mock.return_value.get_hosted_zone.return_value = {
- 'HostedZone': {
- 'Id': '/hostedzone/ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': 'foobar', 'PrivateZone': True},
- },
- 'VPCs': [{'VPCRegion': 'eu-central-1', 'VPCId': 'vpc-1'}],
- }
-
- with self.assertRaises(AnsibleExitJson) as exec_info:
- set_module_args({
- 'secret_key': 'SECRET_KEY',
- 'access_key': 'ACCESS_KEY',
- 'region': 'eu-central-1',
- 'zone': 'example.com',
- 'vpc_id': 'vpc-1',
- 'vpc_region': 'eu-central-1',
- 'state': 'absent',
- '_ansible_check_mode': check_mode,
- })
- route53_zone.main()
-
- if check_mode:
- client_mock.return_value.delete_hosted_zone.assert_not_called()
- else:
- client_mock.return_value.delete_hosted_zone.assert_called_once_with(**{
- 'Id': '/hostedzone/ZONE_ID',
- })
-
- self.assertEqual(exec_info.exception.args[0]['changed'], True)
-
- @parameterized([
- {'check_mode': False},
- {'check_mode': True}
- ])
- @parameterized([
- {
- 'hosted_zone_id': 'PRIVATE_ZONE_ID',
- 'call_params': [call(**{
- 'Id': 'PRIVATE_ZONE_ID',
- })],
- }, {
- 'hosted_zone_id': 'all',
- 'call_params': [call(**{
- 'Id': '/hostedzone/PUBLIC_ZONE_ID',
- }), call(**{
- 'Id': '/hostedzone/PRIVATE_ZONE_ID',
- })],
- }
- ])
- @patch.object(route53_zone, 'find_zones', return_value=[{
- 'Id': '/hostedzone/PUBLIC_ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': '', 'PrivateZone': False},
- }, {
- 'Id': '/hostedzone/PRIVATE_ZONE_ID',
- 'Name': 'example.com.',
- 'Config': {'Comment': 'foobar', 'PrivateZone': True},
- }])
- def test_delete_by_zone_id(self, find_zones_mock, time_mock, client_mock, hosted_zone_id, call_params, check_mode):
- with self.assertRaises(AnsibleExitJson) as exec_info:
- set_module_args({
- 'secret_key': 'SECRET_KEY',
- 'access_key': 'ACCESS_KEY',
- 'region': 'eu-central-1',
- 'zone': 'example.com',
- 'hosted_zone_id': hosted_zone_id,
- 'state': 'absent',
- '_ansible_check_mode': check_mode,
- })
- route53_zone.main()
-
- if check_mode:
- client_mock.return_value.delete_hosted_zone.assert_not_called()
- else:
- client_mock.return_value.delete_hosted_zone.assert_has_calls(call_params)
-
- self.assertEqual(exec_info.exception.args[0]['changed'], True)
-
- @patch.object(route53_zone, 'find_zones', return_value=[])
- def test_delete_absent_zone(self, find_zones_mock, time_mock, client_mock):
- with self.assertRaises(AnsibleExitJson) as exec_info:
- set_module_args({
- 'secret_key': 'SECRET_KEY',
- 'access_key': 'ACCESS_KEY',
- 'region': 'eu-central-1',
- 'zone': 'example.com',
- 'state': 'absent',
- })
- route53_zone.main()
-
- client_mock.return_value.delete_hosted_zone.assert_not_called()
- self.assertEqual(exec_info.exception.args[0]['changed'], False)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/test/units/modules/cloud/amazon/test_s3_bucket_notification.py b/test/units/modules/cloud/amazon/test_s3_bucket_notification.py
deleted file mode 100644
index cf342064c0..0000000000
--- a/test/units/modules/cloud/amazon/test_s3_bucket_notification.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-
-from units.compat.mock import MagicMock, patch
-from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
-
-from ansible.modules.cloud.amazon.s3_bucket_notification import AmazonBucket, Config
-from ansible.modules.cloud.amazon import s3_bucket_notification
-try:
- from botocore.exceptions import ClientError
-except ImportError:
- pass
-
-
-class TestAmazonBucketOperations:
- def test_current_config(self):
- api_config = {
- 'Id': 'test-id',
- 'LambdaFunctionArn': 'test-arn',
- 'Events': [],
- 'Filter': {
- 'Key': {
- 'FilterRules': [{
- 'Name': 'Prefix',
- 'Value': ''
- }, {
- 'Name': 'Suffix',
- 'Value': ''
- }]
- }
- }
- }
- client = MagicMock()
- client.get_bucket_notification_configuration.return_value = {
- 'LambdaFunctionConfigurations': [api_config]
- }
- bucket = AmazonBucket(client, 'test-bucket')
- current = bucket.current_config('test-id')
- assert current.raw == api_config
- assert client.get_bucket_notification_configuration.call_count == 1
-
- def test_current_config_empty(self):
- client = MagicMock()
- client.get_bucket_notification_configuration.return_value = {
- 'LambdaFunctionConfigurations': []
- }
- bucket = AmazonBucket(client, 'test-bucket')
- current = bucket.current_config('test-id')
- assert current is None
- assert client.get_bucket_notification_configuration.call_count == 1
-
- def test_apply_invalid_config(self):
- client = MagicMock()
- client.get_bucket_notification_configuration.return_value = {
- 'LambdaFunctionConfigurations': []
- }
- client.put_bucket_notification_configuration.side_effect = ClientError({}, '')
- bucket = AmazonBucket(client, 'test-bucket')
- config = Config.from_params(**{
- 'event_name': 'test_event',
- 'lambda_function_arn': 'lambda_arn',
- 'lambda_version': 1,
- 'events': ['s3:ObjectRemoved:*', 's3:ObjectCreated:*'],
- 'prefix': '',
- 'suffix': ''
- })
- with pytest.raises(ClientError):
- bucket.apply_config(config)
-
- def test_apply_config(self):
- client = MagicMock()
- client.get_bucket_notification_configuration.return_value = {
- 'LambdaFunctionConfigurations': []
- }
-
- bucket = AmazonBucket(client, 'test-bucket')
- config = Config.from_params(**{
- 'event_name': 'test_event',
- 'lambda_function_arn': 'lambda_arn',
- 'lambda_version': 1,
- 'events': ['s3:ObjectRemoved:*', 's3:ObjectCreated:*'],
- 'prefix': '',
- 'suffix': ''
- })
- bucket.apply_config(config)
- assert client.get_bucket_notification_configuration.call_count == 1
- assert client.put_bucket_notification_configuration.call_count == 1
-
- def test_apply_config_add_event(self):
- api_config = {
- 'Id': 'test-id',
- 'LambdaFunctionArn': 'test-arn',
- 'Events': ['s3:ObjectRemoved:*'],
- 'Filter': {
- 'Key': {
- 'FilterRules': [{
- 'Name': 'Prefix',
- 'Value': ''
- }, {
- 'Name': 'Suffix',
- 'Value': ''
- }]
- }
- }
- }
- client = MagicMock()
- client.get_bucket_notification_configuration.return_value = {
- 'LambdaFunctionConfigurations': [api_config]
- }
-
- bucket = AmazonBucket(client, 'test-bucket')
- config = Config.from_params(**{
- 'event_name': 'test-id',
- 'lambda_function_arn': 'test-arn',
- 'lambda_version': 1,
- 'events': ['s3:ObjectRemoved:*', 's3:ObjectCreated:*'],
- 'prefix': '',
- 'suffix': ''
- })
- bucket.apply_config(config)
- assert client.get_bucket_notification_configuration.call_count == 1
- assert client.put_bucket_notification_configuration.call_count == 1
- client.put_bucket_notification_configuration.assert_called_with(
- Bucket='test-bucket',
- NotificationConfiguration={
- 'LambdaFunctionConfigurations': [{
- 'Id': 'test-id',
- 'LambdaFunctionArn': 'test-arn:1',
- 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
- 'Filter': {
- 'Key': {
- 'FilterRules': [{
- 'Name': 'Prefix',
- 'Value': ''
- }, {
- 'Name': 'Suffix',
- 'Value': ''
- }]
- }
- }
- }]
- }
- )
-
- def test_delete_config(self):
- api_config = {
- 'Id': 'test-id',
- 'LambdaFunctionArn': 'test-arn',
- 'Events': [],
- 'Filter': {
- 'Key': {
- 'FilterRules': [{
- 'Name': 'Prefix',
- 'Value': ''
- }, {
- 'Name': 'Suffix',
- 'Value': ''
- }]
- }
- }
- }
- client = MagicMock()
- client.get_bucket_notification_configuration.return_value = {
- 'LambdaFunctionConfigurations': [api_config]
- }
- bucket = AmazonBucket(client, 'test-bucket')
- config = Config.from_params(**{
- 'event_name': 'test-id',
- 'lambda_function_arn': 'lambda_arn',
- 'lambda_version': 1,
- 'events': [],
- 'prefix': '',
- 'suffix': ''
- })
- bucket.delete_config(config)
- assert client.get_bucket_notification_configuration.call_count == 1
- assert client.put_bucket_notification_configuration.call_count == 1
- client.put_bucket_notification_configuration.assert_called_with(
- Bucket='test-bucket',
- NotificationConfiguration={'LambdaFunctionConfigurations': []}
- )
-
-
-class TestConfig:
- def test_config_from_params(self):
- config = Config({
- 'Id': 'test-id',
- 'LambdaFunctionArn': 'test-arn:10',
- 'Events': [],
- 'Filter': {
- 'Key': {
- 'FilterRules': [{
- 'Name': 'Prefix',
- 'Value': ''
- }, {
- 'Name': 'Suffix',
- 'Value': ''
- }]
- }
- }
- })
- config_from_params = Config.from_params(**{
- 'event_name': 'test-id',
- 'lambda_function_arn': 'test-arn',
- 'lambda_version': 10,
- 'events': [],
- 'prefix': '',
- 'suffix': ''
- })
- assert config.raw == config_from_params.raw
- assert config == config_from_params
-
-
-class TestModule(ModuleTestCase):
- def test_module_fail_when_required_args_missing(self):
- with pytest.raises(AnsibleFailJson):
- set_module_args({})
- s3_bucket_notification.main()
-
- @patch('ansible.modules.cloud.amazon.s3_bucket_notification.AnsibleAWSModule.client')
- def test_add_s3_bucket_notification(self, aws_client):
- aws_client.return_value.get_bucket_notification_configuration.return_value = {
- 'LambdaFunctionConfigurations': []
- }
- set_module_args({
- 'region': 'us-east-2',
- 'lambda_function_arn': 'test-lambda-arn',
- 'bucket_name': 'test-lambda',
- 'event_name': 'test-id',
- 'events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
- 'state': 'present',
- 'prefix': '/images',
- 'suffix': '.jpg'
- })
- with pytest.raises(AnsibleExitJson) as context:
- s3_bucket_notification.main()
- result = context.value.args[0]
- assert result['changed'] is True
- assert aws_client.return_value.get_bucket_notification_configuration.call_count == 1
- aws_client.return_value.put_bucket_notification_configuration.assert_called_with(
- Bucket='test-lambda',
- NotificationConfiguration={
- 'LambdaFunctionConfigurations': [{
- 'Id': 'test-id',
- 'LambdaFunctionArn': 'test-lambda-arn',
- 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'],
- 'Filter': {
- 'Key': {
- 'FilterRules': [{
- 'Name': 'Prefix',
- 'Value': '/images'
- }, {
- 'Name': 'Suffix',
- 'Value': '.jpg'
- }]
- }
- }
- }]
- })
diff --git a/test/units/plugins/connection/test_aws_ssm.py b/test/units/plugins/connection/test_aws_ssm.py
deleted file mode 100644
index bcea207e78..0000000000
--- a/test/units/plugins/connection/test_aws_ssm.py
+++ /dev/null
@@ -1,194 +0,0 @@
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from io import StringIO
-import pytest
-import sys
-from ansible import constants as C
-from ansible.compat.selectors import SelectorKey, EVENT_READ
-from units.compat import unittest
-from units.compat.mock import patch, MagicMock, PropertyMock
-from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
-from ansible.module_utils.six.moves import shlex_quote
-from ansible.module_utils._text import to_bytes
-from ansible.playbook.play_context import PlayContext
-from ansible.plugins.connection import aws_ssm
-from ansible.plugins.loader import connection_loader
-
-
-@pytest.mark.skipif(sys.version_info < (2, 7), reason="requires Python 2.7 or higher")
-class TestConnectionBaseClass(unittest.TestCase):
-
- @patch('os.path.exists')
- @patch('subprocess.Popen')
- @patch('select.poll')
- @patch('boto3.client')
- def test_plugins_connection_aws_ssm_start_session(self, boto_client, s_poll, s_popen, mock_ospe):
- pc = PlayContext()
- new_stdin = StringIO()
- conn = connection_loader.get('aws_ssm', pc, new_stdin)
- conn.get_option = MagicMock()
- conn.get_option.side_effect = ['i1234', 'executable', 'abcd', 'i1234']
- conn.host = 'abc'
- mock_ospe.return_value = True
- boto3 = MagicMock()
- boto3.client('ssm').return_value = MagicMock()
- conn.start_session = MagicMock()
- conn._session_id = MagicMock()
- conn._session_id.return_value = 's1'
- s_popen.return_value.stdin.write = MagicMock()
- s_poll.return_value = MagicMock()
- s_poll.return_value.register = MagicMock()
- s_popen.return_value.poll = MagicMock()
- s_popen.return_value.poll.return_value = None
- conn._stdin_readline = MagicMock()
- conn._stdin_readline.return_value = 'abc123'
- conn.SESSION_START = 'abc'
- conn.start_session()
-
- @patch('random.choice')
- def test_plugins_connection_aws_ssm_exec_command(self, r_choice):
- pc = PlayContext()
- new_stdin = StringIO()
- conn = connection_loader.get('aws_ssm', pc, new_stdin)
- r_choice.side_effect = ['a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b']
- conn.MARK_LENGTH = 5
- conn._session = MagicMock()
- conn._session.stdin.write = MagicMock()
- conn._wrap_command = MagicMock()
- conn._wrap_command.return_value = 'cmd1'
- conn._flush_stderr = MagicMock()
- conn._windows = MagicMock()
- conn._windows.return_value = True
- sudoable = True
- conn._session.poll = MagicMock()
- conn._session.poll.return_value = None
- remaining = 0
- conn._timeout = MagicMock()
- conn._poll_stdout = MagicMock()
- conn._poll_stdout.poll = MagicMock()
- conn._poll_stdout.poll.return_value = True
- conn._session.stdout = MagicMock()
- conn._session.stdout.readline = MagicMock()
- begin = True
- mark_end = 'a'
- line = ['a', 'b']
- conn._post_process = MagicMock()
- conn._post_process.return_value = 'test'
- conn._session.stdout.readline.side_effect = iter(['aaaaa\n', 'Hi\n', '0\n', 'bbbbb\n'])
- conn.get_option = MagicMock()
- conn.get_option.return_value = 1
- cmd = MagicMock()
- returncode = 'a'
- stdout = 'b'
- return (returncode, stdout, conn._flush_stderr)
-
- def test_plugins_connection_aws_ssm_prepare_terminal(self):
- pc = PlayContext()
- new_stdin = StringIO()
- conn = connection_loader.get('aws_ssm', pc, new_stdin)
- conn.is_windows = MagicMock()
- conn.is_windows.return_value = True
-
- def test_plugins_connection_aws_ssm_wrap_command(self):
- pc = PlayContext()
- new_stdin = StringIO()
- conn = connection_loader.get('aws_ssm', pc, new_stdin)
- conn.is_windows = MagicMock()
- conn.is_windows.return_value = True
- return('windows1')
-
- def test_plugins_connection_aws_ssm_post_process(self):
- pc = PlayContext()
- new_stdin = StringIO()
- conn = connection_loader.get('aws_ssm', pc, new_stdin)
- conn.is_windows = MagicMock()
- conn.is_windows.return_value = True
- success = 3
- fail = 2
- conn.stdout = MagicMock()
- returncode = 0
- return(returncode, conn.stdout)
-
- @patch('subprocess.Popen')
- def test_plugins_connection_aws_ssm_flush_stderr(self, s_popen):
- pc = PlayContext()
- new_stdin = StringIO()
- conn = connection_loader.get('aws_ssm', pc, new_stdin)
- conn.poll_stderr = MagicMock()
- conn.poll_stderr.register = MagicMock()
- conn.stderr = None
- s_popen.poll().return_value = 123
- return(conn.stderr)
-
- @patch('boto3.client')
- def test_plugins_connection_aws_ssm_get_url(self, boto):
- pc = PlayContext()
- new_stdin = StringIO()
- conn = connection_loader.get('aws_ssm', pc, new_stdin)
- boto3 = MagicMock()
- boto3.client('s3').return_value = MagicMock()
- boto3.generate_presigned_url.return_value = MagicMock()
- return (boto3.generate_presigned_url.return_value)
-
- @patch('os.path.exists')
- def test_plugins_connection_aws_ssm_put_file(self, mock_ospe):
- pc = PlayContext()
- new_stdin = StringIO()
- conn = connection_loader.get('aws_ssm', pc, new_stdin)
- conn._connect = MagicMock()
- conn._file_transport_command = MagicMock()
- conn._file_transport_command.return_value = (0, 'stdout', 'stderr')
- res, stdout, stderr = conn.put_file('/in/file', '/out/file')
-
- def test_plugins_connection_aws_ssm_fetch_file(self):
- pc = PlayContext()
- new_stdin = StringIO()
- conn = connection_loader.get('aws_ssm', pc, new_stdin)
- conn._connect = MagicMock()
- conn._file_transport_command = MagicMock()
- conn._file_transport_command.return_value = (0, 'stdout', 'stderr')
- res, stdout, stderr = conn.fetch_file('/in/file', '/out/file')
-
- @patch('subprocess.check_output')
- @patch('boto3.client')
- def test_plugins_connection_file_transport_command(self, boto_client, s_check_output):
- pc = PlayContext()
- new_stdin = StringIO()
- conn = connection_loader.get('aws_ssm', pc, new_stdin)
- conn.get_option = MagicMock()
- conn.get_option.side_effect = ['1', '2', '3', '4', '5']
- conn._get_url = MagicMock()
- conn._get_url.side_effect = ['url1', 'url2']
- boto3 = MagicMock()
- boto3.client('s3').return_value = MagicMock()
- conn.get_option.return_value = 1
- ssm_action = 'get'
- get_command = MagicMock()
- put_command = MagicMock()
- conn.exec_command = MagicMock()
- conn.exec_command.return_value = (put_command, None, False)
- conn.download_fileobj = MagicMock()
- (returncode, stdout, stderr) = conn.exec_command(put_command, in_data=None, sudoable=False)
- returncode = 0
- (returncode, stdout, stderr) = conn.exec_command(get_command, in_data=None, sudoable=False)
-
- @patch('subprocess.check_output')
- def test_plugins_connection_aws_ssm_close(self, s_check_output):
- pc = PlayContext()
- new_stdin = StringIO()
- conn = connection_loader.get('aws_ssm', pc, new_stdin)
- conn.instance_id = "i-12345"
- conn._session_id = True
- conn.get_option = MagicMock()
- conn.get_option.side_effect = ["/abc", "pqr"]
- conn._session = MagicMock()
- conn._session.terminate = MagicMock()
- conn._session.communicate = MagicMock()
- conn._terminate_session = MagicMock()
- conn._terminate_session.return_value = ''
- conn._session_id = MagicMock()
- conn._session_id.return_value = 'a'
- conn._client = MagicMock()
- conn.close()